summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/device-drivers.tmpl5
-rw-r--r--Documentation/DocBook/kernel-api.tmpl6
-rw-r--r--Documentation/DocBook/media-entities.tmpl6
-rw-r--r--Documentation/DocBook/v4l/compat.xml24
-rw-r--r--Documentation/DocBook/v4l/controls.xml12
-rw-r--r--Documentation/DocBook/v4l/dev-rds.xml68
-rw-r--r--Documentation/DocBook/v4l/dev-teletext.xml29
-rw-r--r--Documentation/DocBook/v4l/pixfmt-packed-rgb.xml2
-rw-r--r--Documentation/DocBook/v4l/pixfmt-srggb10.xml90
-rw-r--r--Documentation/DocBook/v4l/pixfmt-srggb8.xml67
-rw-r--r--Documentation/DocBook/v4l/pixfmt-y10.xml79
-rw-r--r--Documentation/DocBook/v4l/pixfmt.xml32
-rw-r--r--Documentation/DocBook/v4l/v4l2.xml10
-rw-r--r--Documentation/DocBook/v4l/videodev2.h.xml106
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-dv-preset.xml3
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-dv-timings.xml3
-rw-r--r--Documentation/DocBook/v4l/vidioc-query-dv-preset.xml2
-rw-r--r--Documentation/DocBook/v4l/vidioc-querycap.xml7
-rw-r--r--Documentation/DocBook/v4l/vidioc-queryctrl.xml18
-rw-r--r--Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml10
-rw-r--r--Documentation/accounting/getdelays.c38
-rw-r--r--Documentation/cgroups/cgroups.txt14
-rw-r--r--Documentation/devices.txt9
-rw-r--r--Documentation/dvb/get_dvb_firmware46
-rw-r--r--Documentation/dvb/lmedm04.txt58
-rw-r--r--Documentation/fb/viafb.txt48
-rw-r--r--Documentation/feature-removal-schedule.txt67
-rw-r--r--Documentation/filesystems/9p.txt4
-rw-r--r--Documentation/filesystems/Locking31
-rw-r--r--Documentation/filesystems/ext4.txt14
-rw-r--r--Documentation/filesystems/nfs/00-INDEX2
-rw-r--r--Documentation/filesystems/nfs/idmapper.txt28
-rw-r--r--Documentation/filesystems/nfs/pnfs.txt48
-rw-r--r--Documentation/filesystems/proc.txt25
-rw-r--r--Documentation/filesystems/sharedsubtree.txt4
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/misc-devices/apds990x.txt111
-rw-r--r--Documentation/misc-devices/bh1770glc.txt116
-rw-r--r--Documentation/networking/phy.txt18
-rw-r--r--Documentation/sysctl/vm.txt12
-rw-r--r--Documentation/sysrq.txt7
-rw-r--r--Documentation/timers/hpet_example.c27
-rw-r--r--Documentation/trace/postprocess/trace-vmscan-postprocess.pl39
-rw-r--r--Documentation/video4linux/CARDLIST.cx881
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx3
-rw-r--r--Documentation/video4linux/CARDLIST.saa71342
-rw-r--r--Documentation/video4linux/bttv/MAKEDEV1
-rw-r--r--Documentation/video4linux/gspca.txt2
-rw-r--r--Documentation/video4linux/v4l2-framework.txt35
-rw-r--r--Documentation/vm/highmem.txt162
-rw-r--r--Kbuild2
-rw-r--r--MAINTAINERS102
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/core_mcpcia.h2
-rw-r--r--arch/alpha/include/asm/core_t2.h54
-rw-r--r--arch/alpha/include/asm/pgtable.h2
-rw-r--r--arch/alpha/kernel/core_t2.c11
-rw-r--r--arch/alpha/kernel/machvec_impl.h3
-rw-r--r--arch/alpha/kernel/pci_iommu.c4
-rw-r--r--arch/alpha/kernel/ptrace.c7
-rw-r--r--arch/arm/configs/pcontrol_g20_defconfig175
-rw-r--r--arch/arm/include/asm/highmem.h6
-rw-r--r--arch/arm/include/asm/pgtable.h14
-rw-r--r--arch/arm/kernel/ptrace.c28
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/Makefile13
-rw-r--r--arch/arm/mach-at91/at91sam9260.c7
-rw-r--r--arch/arm/mach-at91/at91sam9261.c7
-rw-r--r--arch/arm/mach-at91/at91sam9263.c7
-rw-r--r--arch/arm/mach-at91/at91sam9_alt_reset.S48
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c165
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c7
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c322
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c24
-rw-r--r--arch/arm/mach-at91/generic.h3
-rw-r--r--arch/arm/mach-at91/pm.c15
-rw-r--r--arch/arm/mach-at91/pm.h5
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S1
-rw-r--r--arch/arm/mach-ep93xx/clock.c3
-rw-r--r--arch/arm/mach-imx/include/mach/dma-v1.h8
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c2
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c1
-rw-r--r--arch/arm/mach-mx3/mx31moboard-smartbot.c1
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c5
-rw-r--r--arch/arm/mach-pxa/em-x270.c1
-rw-r--r--arch/arm/mach-pxa/ezx.c2
-rw-r--r--arch/arm/mach-pxa/mioa701.c1
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-tegra/timer.c1
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c13
-rw-r--r--arch/arm/mm/fault-armv.c4
-rw-r--r--arch/arm/mm/highmem.c24
-rw-r--r--arch/arm/mm/pgd.c4
-rw-r--r--arch/arm/plat-mxc/include/mach/dma.h67
-rw-r--r--arch/arm/plat-mxc/include/mach/sdma.h17
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h135
-rw-r--r--arch/arm/plat-omap/fb.c14
-rw-r--r--arch/arm/plat-omap/include/plat/display.h31
-rw-r--r--arch/arm/plat-omap/include/plat/vrfb.h16
-rw-r--r--arch/arm/plat-pxa/include/plat/sdhci.h32
-rw-r--r--arch/avr32/include/asm/pgtable.h2
-rw-r--r--arch/avr32/kernel/ptrace.c11
-rw-r--r--arch/blackfin/include/asm/entry.h8
-rw-r--r--arch/blackfin/kernel/ptrace.c16
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c20
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c16
-rw-r--r--arch/cris/include/asm/pgtable.h2
-rw-r--r--arch/frv/include/asm/highmem.h25
-rw-r--r--arch/frv/include/asm/pgtable.h9
-rw-r--r--arch/frv/kernel/ptrace.c32
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c4
-rw-r--r--arch/frv/mm/cache-page.c8
-rw-r--r--arch/frv/mm/highmem.c51
-rw-r--r--arch/h8300/kernel/ptrace.c33
-rw-r--r--arch/ia64/include/asm/cputime.h6
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/siginfo.h1
-rw-r--r--arch/ia64/kernel/ptrace.c3
-rw-r--r--arch/m32r/include/asm/pgtable.h2
-rw-r--r--arch/m32r/kernel/ptrace.c11
-rw-r--r--arch/m68k/include/asm/entry_mm.h8
-rw-r--r--arch/m68k/include/asm/entry_no.h10
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h2
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h2
-rw-r--r--arch/m68k/kernel/ptrace.c51
-rw-r--r--arch/m68knommu/kernel/ptrace.c57
-rw-r--r--arch/microblaze/include/asm/pgtable.h7
-rw-r--r--arch/microblaze/kernel/ptrace.c5
-rw-r--r--arch/mips/include/asm/highmem.h18
-rw-r--r--arch/mips/include/asm/pci/bridge.h2
-rw-r--r--arch/mips/include/asm/pgtable-32.h3
-rw-r--r--arch/mips/include/asm/pgtable-64.h3
-rw-r--r--arch/mips/kernel/ptrace.c25
-rw-r--r--arch/mips/mm/highmem.c51
-rw-r--r--arch/mn10300/Kconfig279
-rw-r--r--arch/mn10300/Makefile6
-rw-r--r--arch/mn10300/boot/compressed/head.S69
-rw-r--r--arch/mn10300/configs/asb2303_defconfig2
-rw-r--r--arch/mn10300/configs/asb2364_defconfig98
-rw-r--r--arch/mn10300/include/asm/atomic.h350
-rw-r--r--arch/mn10300/include/asm/bitops.h14
-rw-r--r--arch/mn10300/include/asm/cache.h14
-rw-r--r--arch/mn10300/include/asm/cacheflush.h164
-rw-r--r--arch/mn10300/include/asm/cpu-regs.h91
-rw-r--r--arch/mn10300/include/asm/dmactl-regs.h87
-rw-r--r--arch/mn10300/include/asm/elf.h12
-rw-r--r--arch/mn10300/include/asm/exceptions.h8
-rw-r--r--arch/mn10300/include/asm/fpu.h157
-rw-r--r--arch/mn10300/include/asm/frame.inc20
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h2
-rw-r--r--arch/mn10300/include/asm/hardirq.h3
-rw-r--r--arch/mn10300/include/asm/highmem.h46
-rw-r--r--arch/mn10300/include/asm/intctl-regs.h37
-rw-r--r--arch/mn10300/include/asm/io.h13
-rw-r--r--arch/mn10300/include/asm/irq.h12
-rw-r--r--arch/mn10300/include/asm/irq_regs.h6
-rw-r--r--arch/mn10300/include/asm/irqflags.h111
-rw-r--r--arch/mn10300/include/asm/mmu_context.h73
-rw-r--r--arch/mn10300/include/asm/pgalloc.h1
-rw-r--r--arch/mn10300/include/asm/pgtable.h90
-rw-r--r--arch/mn10300/include/asm/processor.h66
-rw-r--r--arch/mn10300/include/asm/ptrace.h13
-rw-r--r--arch/mn10300/include/asm/reset-regs.h2
-rw-r--r--arch/mn10300/include/asm/rtc.h11
-rw-r--r--arch/mn10300/include/asm/rwlock.h125
-rw-r--r--arch/mn10300/include/asm/serial-regs.h51
-rw-r--r--arch/mn10300/include/asm/serial.h8
-rw-r--r--arch/mn10300/include/asm/smp.h91
-rw-r--r--arch/mn10300/include/asm/smsc911x.h1
-rw-r--r--arch/mn10300/include/asm/spinlock.h179
-rw-r--r--arch/mn10300/include/asm/spinlock_types.h20
-rw-r--r--arch/mn10300/include/asm/system.h73
-rw-r--r--arch/mn10300/include/asm/thread_info.h18
-rw-r--r--arch/mn10300/include/asm/timer-regs.h191
-rw-r--r--arch/mn10300/include/asm/timex.h20
-rw-r--r--arch/mn10300/include/asm/tlbflush.h174
-rw-r--r--arch/mn10300/include/asm/uaccess.h6
-rw-r--r--arch/mn10300/kernel/Makefile15
-rw-r--r--arch/mn10300/kernel/asm-offsets.c11
-rw-r--r--arch/mn10300/kernel/cevt-mn10300.c131
-rw-r--r--arch/mn10300/kernel/csrc-mn10300.c35
-rw-r--r--arch/mn10300/kernel/entry.S146
-rw-r--r--arch/mn10300/kernel/fpu-low.S265
-rw-r--r--arch/mn10300/kernel/fpu-nofpu-low.S39
-rw-r--r--arch/mn10300/kernel/fpu-nofpu.c30
-rw-r--r--arch/mn10300/kernel/fpu.c141
-rw-r--r--arch/mn10300/kernel/gdb-io-serial-low.S5
-rw-r--r--arch/mn10300/kernel/gdb-io-serial.c37
-rw-r--r--arch/mn10300/kernel/gdb-io-ttysm.c24
-rw-r--r--arch/mn10300/kernel/gdb-stub.c17
-rw-r--r--arch/mn10300/kernel/head.S202
-rw-r--r--arch/mn10300/kernel/internal.h25
-rw-r--r--arch/mn10300/kernel/irq.c276
-rw-r--r--arch/mn10300/kernel/kprobes.c4
-rw-r--r--arch/mn10300/kernel/mn10300-serial-low.S6
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c210
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog-low.S9
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog.c100
-rw-r--r--arch/mn10300/kernel/process.c61
-rw-r--r--arch/mn10300/kernel/profile.c2
-rw-r--r--arch/mn10300/kernel/ptrace.c20
-rw-r--r--arch/mn10300/kernel/rtc.c41
-rw-r--r--arch/mn10300/kernel/setup.c79
-rw-r--r--arch/mn10300/kernel/signal.c20
-rw-r--r--arch/mn10300/kernel/smp-low.S97
-rw-r--r--arch/mn10300/kernel/smp.c1152
-rw-r--r--arch/mn10300/kernel/switch_to.S7
-rw-r--r--arch/mn10300/kernel/time.c112
-rw-r--r--arch/mn10300/kernel/traps.c42
-rw-r--r--arch/mn10300/lib/bitops.c4
-rw-r--r--arch/mn10300/lib/delay.c8
-rw-r--r--arch/mn10300/lib/do_csum.S49
-rw-r--r--arch/mn10300/mm/Kconfig.cache101
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-by-reg.S308
-rw-r--r--arch/mn10300/mm/cache-flush-by-tag.S251
-rw-r--r--arch/mn10300/mm/cache-flush-icache.c155
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-inv-by-reg.S356
-rw-r--r--arch/mn10300/mm/cache-inv-by-tag.S348
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c129
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache-smp-flush.c156
-rw-r--r--arch/mn10300/mm/cache-smp-inv.c153
-rw-r--r--arch/mn10300/mm/cache-smp.c105
-rw-r--r--arch/mn10300/mm/cache-smp.h69
-rw-r--r--arch/mn10300/mm/cache.c95
-rw-r--r--arch/mn10300/mm/fault.c17
-rw-r--r--arch/mn10300/mm/init.c26
-rw-r--r--arch/mn10300/mm/misalignment.c3
-rw-r--r--arch/mn10300/mm/mmu-context.c41
-rw-r--r--arch/mn10300/mm/pgtable.c2
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S59
-rw-r--r--arch/mn10300/mm/tlb-smp.c214
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/cache.h9
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/clock.h2
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h102
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h29
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/proc.h2
-rw-r--r--arch/mn10300/proc-mn103e010/proc-init.c37
-rw-r--r--arch/mn10300/proc-mn2ws0050/Makefile5
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/cache.h48
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/clock.h20
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h103
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h29
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/irq.h49
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h120
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/proc.h18
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h51
-rw-r--r--arch/mn10300/proc-mn2ws0050/proc-init.c134
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/clock.h25
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/serial.h5
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/timex.h109
-rw-r--r--arch/mn10300/unit-asb2303/unit-init.c10
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/clock.h25
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/serial.h5
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/timex.h109
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c16
-rw-r--r--arch/mn10300/unit-asb2305/pci.c2
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c6
-rw-r--r--arch/mn10300/unit-asb2364/Makefile12
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/clock.h29
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/fpga-regs.h52
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/irq.h35
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/leds.h54
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/serial.h151
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/smsc911x.h171
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/timex.h159
-rw-r--r--arch/mn10300/unit-asb2364/irq-fpga.c96
-rw-r--r--arch/mn10300/unit-asb2364/leds.c98
-rw-r--r--arch/mn10300/unit-asb2364/smsc911x.c58
-rw-r--r--arch/mn10300/unit-asb2364/unit-init.c88
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/include/asm/cache.h2
-rw-r--r--arch/parisc/include/asm/cacheflush.h8
-rw-r--r--arch/parisc/include/asm/irq.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/irq.c41
-rw-r--r--arch/parisc/kernel/pdc_cons.c141
-rw-r--r--arch/parisc/kernel/ptrace.c13
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/kernel/unaligned.c3
-rw-r--r--arch/parisc/kernel/unwind.c5
-rw-r--r--arch/parisc/math-emu/Makefile2
-rw-r--r--arch/powerpc/include/asm/cputime.h12
-rw-r--r--arch/powerpc/include/asm/fsldma.h137
-rw-r--r--arch/powerpc/include/asm/highmem.h9
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h2
-rw-r--r--arch/powerpc/kernel/ptrace.c66
-rw-r--r--arch/powerpc/kernel/vio.c4
-rw-r--r--arch/powerpc/mm/highmem.c37
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c76
-rw-r--r--arch/s390/include/asm/cputime.h10
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/kernel/ptrace.c3
-rw-r--r--arch/score/include/asm/pgtable.h3
-rw-r--r--arch/score/kernel/ptrace.c7
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c1
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c4
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c1
-rw-r--r--arch/sh/boards/mach-migor/setup.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c1
-rw-r--r--arch/sh/include/asm/pgtable_32.h3
-rw-r--r--arch/sh/include/asm/pgtable_64.h2
-rw-r--r--arch/sh/kernel/ptrace_32.c45
-rw-r--r--arch/sh/kernel/ptrace_64.c25
-rw-r--r--arch/sparc/include/asm/highmem.h4
-rw-r--r--arch/sparc/include/asm/io_32.h31
-rw-r--r--arch/sparc/include/asm/io_64.h31
-rw-r--r--arch/sparc/include/asm/pci_64.h2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/kernel/ptrace_32.c57
-rw-r--r--arch/sparc/kernel/ptrace_64.c15
-rw-r--r--arch/sparc/mm/highmem.c48
-rw-r--r--arch/tile/Kconfig23
-rw-r--r--arch/tile/Makefile19
-rw-r--r--arch/tile/include/arch/sim.h619
-rw-r--r--arch/tile/include/arch/sim_def.h548
-rw-r--r--arch/tile/include/arch/spr_def.h85
-rw-r--r--arch/tile/include/arch/spr_def_32.h39
-rw-r--r--arch/tile/include/asm/backtrace.h5
-rw-r--r--arch/tile/include/asm/compat.h15
-rw-r--r--arch/tile/include/asm/highmem.h10
-rw-r--r--arch/tile/include/asm/irqflags.h64
-rw-r--r--arch/tile/include/asm/mman.h1
-rw-r--r--arch/tile/include/asm/page.h27
-rw-r--r--arch/tile/include/asm/pgtable.h5
-rw-r--r--arch/tile/include/asm/processor.h11
-rw-r--r--arch/tile/include/asm/ptrace.h4
-rw-r--r--arch/tile/include/asm/syscalls.h73
-rw-r--r--arch/tile/include/asm/system.h14
-rw-r--r--arch/tile/include/asm/traps.h4
-rw-r--r--arch/tile/include/hv/hypervisor.h30
-rw-r--r--arch/tile/kernel/backtrace.c4
-rw-r--r--arch/tile/kernel/compat.c10
-rw-r--r--arch/tile/kernel/compat_signal.c10
-rw-r--r--arch/tile/kernel/entry.S34
-rw-r--r--arch/tile/kernel/head_32.S5
-rw-r--r--arch/tile/kernel/intvec_32.S101
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/process.c50
-rw-r--r--arch/tile/kernel/ptrace.c87
-rw-r--r--arch/tile/kernel/regs_32.S2
-rw-r--r--arch/tile/kernel/setup.c36
-rw-r--r--arch/tile/kernel/signal.c6
-rw-r--r--arch/tile/kernel/single_step.c73
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/stack.c35
-rw-r--r--arch/tile/kernel/sys.c9
-rw-r--r--arch/tile/kernel/traps.c4
-rw-r--r--arch/tile/kvm/Kconfig38
-rw-r--r--arch/tile/lib/Makefile4
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/lib/memcpy_32.S206
-rw-r--r--arch/tile/lib/memmove.c (renamed from arch/tile/lib/memmove_32.c)0
-rw-r--r--arch/tile/lib/memset_32.c1
-rw-r--r--arch/tile/lib/strlen_32.c2
-rw-r--r--arch/tile/mm/fault.c12
-rw-r--r--arch/tile/mm/highmem.c86
-rw-r--r--arch/tile/mm/homecache.c11
-rw-r--r--arch/tile/mm/init.c2
-rw-r--r--arch/um/Kconfig.um6
-rw-r--r--arch/um/defconfig1
-rw-r--r--arch/um/include/asm/dma-mapping.h112
-rw-r--r--arch/um/include/asm/pgtable.h2
-rw-r--r--arch/um/include/asm/system.h49
-rw-r--r--arch/um/kernel/dyn.lds.S14
-rw-r--r--arch/um/kernel/irq.c15
-rw-r--r--arch/um/kernel/ptrace.c23
-rw-r--r--arch/um/kernel/uml.lds.S19
-rw-r--r--arch/um/os-Linux/time.c2
-rw-r--r--arch/um/sys-i386/ptrace.c4
-rw-r--r--arch/um/sys-x86_64/ptrace.c11
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/highmem.h11
-rw-r--r--arch/x86/include/asm/iomap.h4
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/perf_event.h19
-rw-r--r--arch/x86/include/asm/pgtable_32.h14
-rw-r--r--arch/x86/include/asm/pgtable_64.h2
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/include/asm/xen/hypercall.h17
-rw-r--r--arch/x86/include/asm/xen/page.h12
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c4
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c26
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c216
-rw-r--r--arch/x86/kernel/crash_dump_32.c2
-rw-r--r--arch/x86/kernel/dumpstack_32.c6
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/irq_32.c12
-rw-r--r--arch/x86/kernel/ptrace.c17
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/mm/fault.c63
-rw-r--r--arch/x86/mm/highmem_32.c76
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/iomap_32.c43
-rw-r--r--arch/x86/oprofile/nmi_int.c6
-rw-r--r--arch/x86/oprofile/op_model_amd.c146
-rw-r--r--arch/x86/pci/i386.c17
-rw-r--r--arch/x86/pci/irq.c11
-rw-r--r--arch/x86/pci/mmconfig-shared.c4
-rw-r--r--arch/x86/xen/Kconfig11
-rw-r--r--arch/x86/xen/enlighten.c19
-rw-r--r--arch/x86/xen/mmu.c501
-rw-r--r--arch/x86/xen/mmu.h1
-rw-r--r--arch/x86/xen/setup.c112
-rw-r--r--arch/x86/xen/smp.c6
-rw-r--r--arch/x86/xen/xen-ops.h3
-rw-r--r--arch/xtensa/include/asm/pgtable.h3
-rw-r--r--arch/xtensa/kernel/ptrace.c14
-rw-r--r--crypto/async_tx/Kconfig13
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--drivers/acpi/Kconfig13
-rw-r--r--drivers/acpi/ac.c14
-rw-r--r--drivers/acpi/acpica/Makefile5
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acevents.h5
-rw-r--r--drivers/acpi/acpica/acglobal.h9
-rw-r--r--drivers/acpi/acpica/achware.h7
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h4
-rw-r--r--drivers/acpi/acpica/acnamesp.h12
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acutils.h56
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c19
-rw-r--r--drivers/acpi/acpica/evevent.c41
-rw-r--r--drivers/acpi/acpica/evgpeblk.c47
-rw-r--r--drivers/acpi/acpica/evgpeinit.c31
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c14
-rw-r--r--drivers/acpi/acpica/evxface.c19
-rw-r--r--drivers/acpi/acpica/evxfevnt.c61
-rw-r--r--drivers/acpi/acpica/evxfregn.c6
-rw-r--r--drivers/acpi/acpica/exfldio.c75
-rw-r--r--drivers/acpi/acpica/exmutex.c10
-rw-r--r--drivers/acpi/acpica/exprep.c45
-rw-r--r--drivers/acpi/acpica/exregion.c4
-rw-r--r--drivers/acpi/acpica/hwpci.c412
-rw-r--r--drivers/acpi/acpica/nsrepair2.c163
-rw-r--r--drivers/acpi/acpica/nsutils.c98
-rw-r--r--drivers/acpi/acpica/tbfadt.c4
-rw-r--r--drivers/acpi/acpica/utdebug.c7
-rw-r--r--drivers/acpi/acpica/uteval.c147
-rw-r--r--drivers/acpi/acpica/utglobal.c9
-rw-r--r--drivers/acpi/acpica/utids.c45
-rw-r--r--drivers/acpi/acpica/utinit.c4
-rw-r--r--drivers/acpi/acpica/utmath.c23
-rw-r--r--drivers/acpi/acpica/utmisc.c162
-rw-r--r--drivers/acpi/acpica/utmutex.c37
-rw-r--r--drivers/acpi/acpica/utosi.c380
-rw-r--r--drivers/acpi/acpica/utxface.c138
-rw-r--r--drivers/acpi/acpica/utxferror.c415
-rw-r--r--drivers/acpi/battery.c94
-rw-r--r--drivers/acpi/bus.c7
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/dock.c7
-rw-r--r--drivers/acpi/ec.c9
-rw-r--r--drivers/acpi/fan.c139
-rw-r--r--drivers/acpi/osl.c463
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/pci_link.c1
-rw-r--r--drivers/acpi/pci_root.c1
-rw-r--r--drivers/acpi/power.c167
-rw-r--r--drivers/acpi/processor_driver.c22
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_thermal.c178
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/acpi/sbs.c25
-rw-r--r--drivers/acpi/scan.c46
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/thermal.c436
-rw-r--r--drivers/acpi/video.c771
-rw-r--r--drivers/atm/eni.c7
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/cciss.c37
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/z2ram.c6
-rw-r--r--drivers/char/agp/Makefile1
-rw-r--r--drivers/char/agp/agp.h5
-rw-r--r--drivers/char/agp/amd-k7-agp.c6
-rw-r--r--drivers/char/agp/backend.c22
-rw-r--r--drivers/char/agp/generic.c8
-rw-r--r--drivers/char/agp/intel-agp.c201
-rw-r--r--drivers/char/agp/intel-agp.h43
-rw-r--r--drivers/char/agp/intel-gtt.c1612
-rw-r--r--drivers/char/agp/parisc-agp.c4
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/hpet.c40
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/hvc_tile.c5
-rw-r--r--drivers/char/hvc_xen.c3
-rw-r--r--drivers/char/ip2/Makefile2
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c44
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/mmtimer.c60
-rw-r--r--drivers/char/mwave/Makefile4
-rw-r--r--drivers/char/mxser.c4
-rw-r--r--drivers/char/pcmcia/ipwireless/Makefile2
-rw-r--r--drivers/char/ppdev.c1
-rw-r--r--drivers/char/ramoops.c30
-rw-r--r--drivers/char/rio/Makefile2
-rw-r--r--drivers/char/rocket.c5
-rw-r--r--drivers/char/synclink_gt.c142
-rw-r--r--drivers/char/vt_ioctl.c11
-rw-r--r--drivers/connector/cn_queue.c75
-rw-r--r--drivers/connector/connector.c9
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c42
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/dma/Kconfig31
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c2167
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/fsldma.c328
-rw-r--r--drivers/dma/imx-dma.c424
-rw-r--r--drivers/dma/imx-sdma.c1392
-rw-r--r--drivers/dma/intel_mid_dma.c476
-rw-r--r--drivers/dma/intel_mid_dma_regs.h53
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/ste_dma40.c1023
-rw-r--r--drivers/dma/ste_dma40_ll.c180
-rw-r--r--drivers/dma/ste_dma40_ll.h86
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/edac/edac_core.h11
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/edac/edac_mc_sysfs.c82
-rw-r--r--drivers/edac/i7core_edac.c432
-rw-r--r--drivers/firmware/dmi_scan.c32
-rw-r--r--drivers/gpio/74x164.c182
-rw-r--r--drivers/gpio/Kconfig28
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/adp5588-gpio.c277
-rw-r--r--drivers/gpio/basic_mmio_gpio.c297
-rw-r--r--drivers/gpio/langwell_gpio.c89
-rw-r--r--drivers/gpio/pca953x.c13
-rw-r--r--drivers/gpio/pch_gpio.c312
-rw-r--r--drivers/gpio/timbgpio.c21
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c40
-rw-r--r--drivers/gpu/drm/drm_context.c8
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_drawable.c198
-rw-r--r--drivers/gpu/drm/drm_drv.c10
-rw-r--r--drivers/gpu/drm/drm_edid.c93
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c32
-rw-r--r--drivers/gpu/drm/drm_gem.c14
-rw-r--r--drivers/gpu/drm/drm_info.c14
-rw-r--r--drivers/gpu/drm/drm_lock.c30
-rw-r--r--drivers/gpu/drm/drm_memory.c14
-rw-r--r--drivers/gpu/drm/drm_proc.c14
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c13
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c66
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c10
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c336
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c360
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c214
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h271
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2230
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c148
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c72
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c54
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c264
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h335
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c28
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c286
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c234
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c127
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2357
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c658
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h160
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c69
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c29
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c193
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c484
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c435
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c (renamed from drivers/gpu/drm/i915/i915_opregion.c)181
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1001
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c109
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c457
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h81
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1076
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c165
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c374
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c290
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h253
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c318
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c363
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c776
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c205
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c518
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h74
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c289
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c309
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c212
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c60
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c68
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c81
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c110
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c506
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c87
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c286
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c3305
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c418
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c131
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c4
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h1
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c406
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c564
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c774
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c348
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.h35
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h20
-rw-r--r--drivers/gpu/drm/radeon/r100.c103
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h1
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c15
-rw-r--r--drivers/gpu/drm/radeon/r420.c16
-rw-r--r--drivers/gpu/drm/radeon/r520.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c179
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c51
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c50
-rw-r--r--drivers/gpu/drm/radeon/r600d.h21
-rw-r--r--drivers/gpu/drm/radeon/radeon.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c405
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h53
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c34
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen8
-rw-r--r--drivers/gpu/drm/radeon/rs400.c15
-rw-r--r--drivers/gpu/drm/radeon/rs600.c15
-rw-r--r--drivers/gpu/drm/radeon/rs690.c15
-rw-r--r--drivers/gpu/drm/radeon/rv515.c15
-rw-r--r--drivers/gpu/drm/radeon/rv770.c38
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c305
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c148
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c20
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c137
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c200
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c75
-rw-r--r--drivers/gpu/stub/Kconfig13
-rw-r--r--drivers/gpu/stub/Makefile1
-rw-r--r--drivers/gpu/stub/poulsbo.c64
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-i801.c10
-rw-r--r--drivers/i2c/busses/scx200_acb.c3
-rw-r--r--drivers/idle/intel_idle.c61
-rw-r--r--drivers/infiniband/core/agent.c29
-rw-r--r--drivers/infiniband/core/cma.c313
-rw-r--r--drivers/infiniband/core/iwcm.c4
-rw-r--r--drivers/infiniband/core/mad.c27
-rw-r--r--drivers/infiniband/core/multicast.c23
-rw-r--r--drivers/infiniband/core/sa_query.c30
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucma.c92
-rw-r--r--drivers/infiniband/core/ud_header.c140
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c16
-rw-r--r--drivers/infiniband/hw/amso1100/Kbuild4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile6
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h16
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c25
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_user.h8
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c178
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c191
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h68
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c44
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c283
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c62
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h44
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c6
-rw-r--r--drivers/infiniband/hw/ipath/Makefile2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c1
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c163
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c32
-rw-r--r--drivers/infiniband/hw/mlx4/main.c553
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h32
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c195
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c236
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h21
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c2
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c6
-rw-r--r--drivers/macintosh/windfarm_pm121.c2
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/media/IR/Kconfig39
-rw-r--r--drivers/media/IR/Makefile2
-rw-r--r--drivers/media/IR/ene_ir.c1046
-rw-r--r--drivers/media/IR/ene_ir.h275
-rw-r--r--drivers/media/IR/imon.c690
-rw-r--r--drivers/media/IR/ir-core-priv.h22
-rw-r--r--drivers/media/IR/ir-jvc-decoder.c5
-rw-r--r--drivers/media/IR/ir-keytable.c7
-rw-r--r--drivers/media/IR/ir-lirc-codec.c135
-rw-r--r--drivers/media/IR/ir-nec-decoder.c5
-rw-r--r--drivers/media/IR/ir-raw-event.c81
-rw-r--r--drivers/media/IR/ir-rc5-decoder.c5
-rw-r--r--drivers/media/IR/ir-rc5-sz-decoder.c154
-rw-r--r--drivers/media/IR/ir-rc6-decoder.c5
-rw-r--r--drivers/media/IR/ir-sony-decoder.c5
-rw-r--r--drivers/media/IR/ir-sysfs.c37
-rw-r--r--drivers/media/IR/keymaps/Makefile16
-rw-r--r--drivers/media/IR/keymaps/rc-alink-dtu-m.c68
-rw-r--r--drivers/media/IR/keymaps/rc-anysee.c93
-rw-r--r--drivers/media/IR/keymaps/rc-asus-pc39.c80
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-rm-ks.c79
-rw-r--r--drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c102
-rw-r--r--drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c98
-rw-r--r--drivers/media/IR/keymaps/rc-digittrade.c82
-rw-r--r--drivers/media/IR/keymaps/rc-leadtek-y04g0051.c99
-rw-r--r--drivers/media/IR/keymaps/rc-lme2510.c68
-rw-r--r--drivers/media/IR/keymaps/rc-msi-digivox-ii.c67
-rw-r--r--drivers/media/IR/keymaps/rc-msi-digivox-iii.c85
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-streamzap.c81
-rw-r--r--drivers/media/IR/keymaps/rc-rc6-mce.c88
-rw-r--r--drivers/media/IR/keymaps/rc-streamzap.c82
-rw-r--r--drivers/media/IR/keymaps/rc-terratec-slim.c79
-rw-r--r--drivers/media/IR/keymaps/rc-total-media-in-hand.c85
-rw-r--r--drivers/media/IR/keymaps/rc-trekstor.c80
-rw-r--r--drivers/media/IR/keymaps/rc-twinhan1027.c87
-rw-r--r--drivers/media/IR/lirc_dev.c134
-rw-r--r--drivers/media/IR/mceusb.c475
-rw-r--r--drivers/media/IR/nuvoton-cir.c1246
-rw-r--r--drivers/media/IR/nuvoton-cir.h408
-rw-r--r--drivers/media/IR/streamzap.c376
-rw-r--r--drivers/media/common/saa7146_fops.c2
-rw-r--r--drivers/media/common/saa7146_i2c.c1
-rw-r--r--drivers/media/common/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146_video.c2
-rw-r--r--drivers/media/common/tuners/Kconfig7
-rw-r--r--drivers/media/common/tuners/Makefile1
-rw-r--r--drivers/media/common/tuners/tda18218.c334
-rw-r--r--drivers/media/common/tuners/tda18218.h45
-rw-r--r--drivers/media/common/tuners/tda18218_priv.h106
-rw-r--r--drivers/media/common/tuners/tda18271-common.c61
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c16
-rw-r--r--drivers/media/common/tuners/tda18271.h5
-rw-r--r--drivers/media/common/tuners/xc5000.c2
-rw-r--r--drivers/media/common/tuners/xc5000.h4
-rw-r--r--drivers/media/dvb/b2c2/flexcop-i2c.c3
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig12
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile3
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c394
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h735
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c87
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-i2c.c1
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk-fe.c4
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c9
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c1088
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.h173
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c61
-rw-r--r--drivers/media/dvb/firewire/firedtv-fe.c36
-rw-r--r--drivers/media/dvb/frontends/Kconfig24
-rw-r--r--drivers/media/dvb/frontends/Makefile3
-rw-r--r--drivers/media/dvb/frontends/af9013.c251
-rw-r--r--drivers/media/dvb/frontends/af9013.h1
-rw-r--r--drivers/media/dvb/frontends/af9013_priv.h60
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c27
-rw-r--r--drivers/media/dvb/frontends/cx22702.c123
-rw-r--r--drivers/media/dvb/frontends/cx24110.c2
-rw-r--r--drivers/media/dvb/frontends/cx24123.c1
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.c1
-rw-r--r--drivers/media/dvb/frontends/drx397xD.c2
-rw-r--r--drivers/media/dvb/frontends/ix2505v.c323
-rw-r--r--drivers/media/dvb/frontends/ix2505v.h64
-rw-r--r--drivers/media/dvb/frontends/lgdt3304.c380
-rw-r--r--drivers/media/dvb/frontends/lgdt3304.h45
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c2
-rw-r--r--drivers/media/dvb/frontends/mt352.c2
-rw-r--r--drivers/media/dvb/frontends/mt352.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c1
-rw-r--r--drivers/media/dvb/frontends/s5h1432.c415
-rw-r--r--drivers/media/dvb/frontends/s5h1432.h91
-rw-r--r--drivers/media/dvb/frontends/si21xx.c2
-rw-r--r--drivers/media/dvb/frontends/stb6100.c2
-rw-r--r--drivers/media/dvb/frontends/stb6100.h4
-rw-r--r--drivers/media/dvb/frontends/stv0288.c25
-rw-r--r--drivers/media/dvb/frontends/stv0299.c2
-rw-r--r--drivers/media/dvb/frontends/stv0299.h2
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c2
-rw-r--r--drivers/media/dvb/frontends/zl10353.c2
-rw-r--r--drivers/media/dvb/mantis/mantis_core.c5
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.c9
-rw-r--r--drivers/media/dvb/ngene/ngene-i2c.c1
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c1
-rw-r--r--drivers/media/dvb/pt1/pt1.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c3
-rw-r--r--drivers/media/dvb/siano/smsir.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110.c3
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c5
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c2
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c1
-rw-r--r--drivers/media/radio/radio-cadet.c3
-rw-r--r--drivers/media/radio/radio-mr800.c75
-rw-r--r--drivers/media/radio/radio-si4713.c12
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c29
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c17
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h2
-rw-r--r--drivers/media/radio/si4713-i2c.c2
-rw-r--r--drivers/media/radio/tef6862.c1
-rw-r--r--drivers/media/video/Kconfig106
-rw-r--r--drivers/media/video/Makefile12
-rw-r--r--drivers/media/video/adv7170.c28
-rw-r--r--drivers/media/video/adv7175.c28
-rw-r--r--drivers/media/video/adv7180.c1
-rw-r--r--drivers/media/video/au0828/au0828-cards.c4
-rw-r--r--drivers/media/video/au0828/au0828-video.c4
-rw-r--r--drivers/media/video/bt819.c28
-rw-r--r--drivers/media/video/bt856.c28
-rw-r--r--drivers/media/video/bt866.c28
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c22
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c273
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c43
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c84
-rw-r--r--drivers/media/video/bt8xx/bttv-risc.c2
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/bt8xx/bttvp.h13
-rw-r--r--drivers/media/video/cafe_ccic.c180
-rw-r--r--drivers/media/video/cpia2/Kconfig2
-rw-r--r--drivers/media/video/cpia2/cpia2.h8
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c51
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c332
-rw-r--r--drivers/media/video/cpia2/cpia2dev.h4
-rw-r--r--drivers/media/video/cs5345.c27
-rw-r--r--drivers/media/video/cs53l32a.c27
-rw-r--r--drivers/media/video/cx18/cx18-driver.h19
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c23
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c1
-rw-r--r--drivers/media/video/cx231xx/Kconfig1
-rw-r--r--drivers/media/video/cx231xx/Makefile2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-417.c2194
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c256
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c687
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c427
-rw-r--r--drivers/media/video/cx231xx/cx231xx-conf-reg.h1
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c787
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dif.h3178
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dvb.c250
-rw-r--r--drivers/media/video/cx231xx/cx231xx-i2c.c11
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c222
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c109
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c595
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h260
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c3
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c7
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c11
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c1
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c54
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c44
-rw-r--r--drivers/media/video/cx25840/cx25840-ir.c1
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c117
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c16
-rw-r--r--drivers/media/video/cx88/cx88-cards.c44
-rw-r--r--drivers/media/video/cx88/cx88-core.c30
-rw-r--r--drivers/media/video/cx88/cx88-dsp.c11
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c181
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c31
-rw-r--r--drivers/media/video/cx88/cx88-input.c57
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c6
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c43
-rw-r--r--drivers/media/video/cx88/cx88-vbi.c2
-rw-r--r--drivers/media/video/cx88/cx88-video.c86
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.c2
-rw-r--r--drivers/media/video/cx88/cx88.h74
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c40
-rw-r--r--drivers/media/video/davinci/vpif_capture.c18
-rw-r--r--drivers/media/video/davinci/vpif_display.c16
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c75
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c57
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c97
-rw-r--r--drivers/media/video/em28xx/em28xx.h18
-rw-r--r--drivers/media/video/fsl-viu.c7
-rw-r--r--drivers/media/video/gspca/Kconfig18
-rw-r--r--drivers/media/video/gspca/Makefile4
-rw-r--r--drivers/media/video/gspca/benq.c23
-rw-r--r--drivers/media/video/gspca/conex.c14
-rw-r--r--drivers/media/video/gspca/cpia1.c133
-rw-r--r--drivers/media/video/gspca/etoms.c12
-rw-r--r--drivers/media/video/gspca/finepix.c15
-rw-r--r--drivers/media/video/gspca/gl860/gl860-mi2020.c6
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c6
-rw-r--r--drivers/media/video/gspca/gspca.c161
-rw-r--r--drivers/media/video/gspca/gspca.h12
-rw-r--r--drivers/media/video/gspca/jeilinj.c15
-rw-r--r--drivers/media/video/gspca/konica.c646
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c8
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.c48
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.h14
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.c70
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.h9
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c102
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.h12
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.c136
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.h13
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c28
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.h14
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.h12
-rw-r--r--drivers/media/video/gspca/mars.c327
-rw-r--r--drivers/media/video/gspca/mr97310a.c56
-rw-r--r--drivers/media/video/gspca/ov519.c389
-rw-r--r--drivers/media/video/gspca/ov534.c19
-rw-r--r--drivers/media/video/gspca/ov534_9.c19
-rw-r--r--drivers/media/video/gspca/pac207.c26
-rw-r--r--drivers/media/video/gspca/pac7302.c32
-rw-r--r--drivers/media/video/gspca/pac7311.c32
-rw-r--r--drivers/media/video/gspca/sn9c2028.c19
-rw-r--r--drivers/media/video/gspca/sn9c20x.c65
-rw-r--r--drivers/media/video/gspca/sonixb.c21
-rw-r--r--drivers/media/video/gspca/sonixj.c926
-rw-r--r--drivers/media/video/gspca/spca1528.c15
-rw-r--r--drivers/media/video/gspca/spca500.c14
-rw-r--r--drivers/media/video/gspca/spca501.c16
-rw-r--r--drivers/media/video/gspca/spca505.c18
-rw-r--r--drivers/media/video/gspca/spca508.c16
-rw-r--r--drivers/media/video/gspca/spca561.c16
-rw-r--r--drivers/media/video/gspca/sq905.c21
-rw-r--r--drivers/media/video/gspca/sq905c.c15
-rw-r--r--drivers/media/video/gspca/sq930x.c23
-rw-r--r--drivers/media/video/gspca/stk014.c174
-rw-r--r--drivers/media/video/gspca/stv0680.c17
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c14
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c19
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h4
-rw-r--r--drivers/media/video/gspca/sunplus.c27
-rw-r--r--drivers/media/video/gspca/t613.c10
-rw-r--r--drivers/media/video/gspca/tv8532.c8
-rw-r--r--drivers/media/video/gspca/vc032x.c19
-rw-r--r--drivers/media/video/gspca/w996Xcf.c10
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c3253
-rw-r--r--drivers/media/video/gspca/zc3xx.c37
-rw-r--r--drivers/media/video/hdpvr/hdpvr-control.c5
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c36
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c1
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c5
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h7
-rw-r--r--drivers/media/video/hexium_gemini.c1
-rw-r--r--drivers/media/video/hexium_orion.c1
-rw-r--r--drivers/media/video/imx074.c508
-rw-r--r--drivers/media/video/indycam.c27
-rw-r--r--drivers/media/video/ir-kbd-i2c.c62
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h14
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c42
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c1
-rw-r--r--drivers/media/video/ks0127.c27
-rw-r--r--drivers/media/video/m52790.c28
-rw-r--r--drivers/media/video/mem2mem_testdev.c2
-rw-r--r--drivers/media/video/msp3400-driver.c38
-rw-r--r--drivers/media/video/mt9m001.c26
-rw-r--r--drivers/media/video/mt9m111.c38
-rw-r--r--drivers/media/video/mt9t031.c24
-rw-r--r--drivers/media/video/mt9t112.c14
-rw-r--r--drivers/media/video/mt9v011.c29
-rw-r--r--drivers/media/video/mt9v022.c26
-rw-r--r--drivers/media/video/mx1_camera.c12
-rw-r--r--drivers/media/video/mx2_camera.c44
-rw-r--r--drivers/media/video/mx3_camera.c11
-rw-r--r--drivers/media/video/mxb.c17
-rw-r--r--drivers/media/video/omap/omap_vout.c2
-rw-r--r--drivers/media/video/omap1_camera.c1702
-rw-r--r--drivers/media/video/omap24xxcam.c4
-rw-r--r--drivers/media/video/ov6650.c1225
-rw-r--r--drivers/media/video/ov7670.c268
-rw-r--r--drivers/media/video/ov7670.h20
-rw-r--r--drivers/media/video/ov772x.c18
-rw-r--r--drivers/media/video/ov9640.c12
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c11
-rw-r--r--drivers/media/video/pwc/Kconfig2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c20
-rw-r--r--drivers/media/video/pwc/pwc-if.c35
-rw-r--r--drivers/media/video/pwc/pwc-misc.c4
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.c2
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c322
-rw-r--r--drivers/media/video/pwc/pwc.h6
-rw-r--r--drivers/media/video/pxa_camera.c12
-rw-r--r--drivers/media/video/rj54n1cb0c.c26
-rw-r--r--drivers/media/video/s2255drv.c4
-rw-r--r--drivers/media/video/s5p-fimc/Makefile2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c819
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c952
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h377
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c321
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h64
-rw-r--r--drivers/media/video/saa5246a.c1123
-rw-r--r--drivers/media/video/saa5249.c650
-rw-r--r--drivers/media/video/saa6588.c29
-rw-r--r--drivers/media/video/saa7110.c27
-rw-r--r--drivers/media/video/saa7115.c33
-rw-r--r--drivers/media/video/saa7127.c27
-rw-r--r--drivers/media/video/saa7134/Kconfig11
-rw-r--r--drivers/media/video/saa7134/Makefile7
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c27
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c15
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c16
-rw-r--r--drivers/media/video/saa7134/saa7134.h16
-rw-r--r--drivers/media/video/saa7164/Makefile2
-rw-r--r--drivers/media/video/saa7164/saa7164-api.c973
-rw-r--r--drivers/media/video/saa7164/saa7164-buffer.c194
-rw-r--r--drivers/media/video/saa7164/saa7164-bus.c131
-rw-r--r--drivers/media/video/saa7164/saa7164-cards.c33
-rw-r--r--drivers/media/video/saa7164/saa7164-cmd.c35
-rw-r--r--drivers/media/video/saa7164/saa7164-core.c890
-rw-r--r--drivers/media/video/saa7164/saa7164-dvb.c109
-rw-r--r--drivers/media/video/saa7164/saa7164-encoder.c1503
-rw-r--r--drivers/media/video/saa7164/saa7164-fw.c11
-rw-r--r--drivers/media/video/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/video/saa7164/saa7164-reg.h59
-rw-r--r--drivers/media/video/saa7164/saa7164-types.h255
-rw-r--r--drivers/media/video/saa7164/saa7164-vbi.c1375
-rw-r--r--drivers/media/video/saa7164/saa7164.h295
-rw-r--r--drivers/media/video/saa717x.c27
-rw-r--r--drivers/media/video/saa7185.c28
-rw-r--r--drivers/media/video/saa7191.c27
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c30
-rw-r--r--drivers/media/video/sh_vou.c7
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h4
-rw-r--r--drivers/media/video/soc_camera.c200
-rw-r--r--drivers/media/video/sr030pc30.c894
-rw-r--r--drivers/media/video/tda7432.c27
-rw-r--r--drivers/media/video/tda9840.c27
-rw-r--r--drivers/media/video/tda9875.c27
-rw-r--r--drivers/media/video/tea6415c.c27
-rw-r--r--drivers/media/video/tea6420.c27
-rw-r--r--drivers/media/video/tlg2300/pd-video.c4
-rw-r--r--drivers/media/video/tlv320aic23b.c28
-rw-r--r--drivers/media/video/tuner-core.c40
-rw-r--r--drivers/media/video/tvaudio.c40
-rw-r--r--drivers/media/video/tvp514x.c67
-rw-r--r--drivers/media/video/tvp5150.c31
-rw-r--r--drivers/media/video/tvp7002.c126
-rw-r--r--drivers/media/video/tw9910.c20
-rw-r--r--drivers/media/video/upd64031a.c27
-rw-r--r--drivers/media/video/upd64083.c27
-rw-r--r--drivers/media/video/usbvideo/Kconfig10
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c15
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c8
-rw-r--r--drivers/media/video/usbvision/usbvision.h1
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c712
-rw-r--r--drivers/media/video/uvc/uvc_driver.c19
-rw-r--r--drivers/media/video/uvc/uvc_isight.c2
-rw-r--r--drivers/media/video/uvc/uvc_queue.c11
-rw-r--r--drivers/media/video/uvc/uvc_status.c4
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c56
-rw-r--r--drivers/media/video/uvc/uvc_video.c52
-rw-r--r--drivers/media/video/uvc/uvcvideo.h40
-rw-r--r--drivers/media/video/v4l1-compat.c13
-rw-r--r--drivers/media/video/v4l2-common.c27
-rw-r--r--drivers/media/video/v4l2-ctrls.c4
-rw-r--r--drivers/media/video/v4l2-dev.c115
-rw-r--r--drivers/media/video/v4l2-event.c9
-rw-r--r--drivers/media/video/v4l2-mem2mem.c8
-rw-r--r--drivers/media/video/via-camera.c1474
-rw-r--r--drivers/media/video/via-camera.h93
-rw-r--r--drivers/media/video/videobuf-core.c115
-rw-r--r--drivers/media/video/videobuf-dma-contig.c15
-rw-r--r--drivers/media/video/videobuf-dma-sg.c13
-rw-r--r--drivers/media/video/videobuf-dvb.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c9
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/vivi.c17
-rw-r--r--drivers/media/video/vp27smpx.c28
-rw-r--r--drivers/media/video/vpx3220.c27
-rw-r--r--drivers/media/video/wm8739.c27
-rw-r--r--drivers/media/video/wm8775.c132
-rw-r--r--drivers/media/video/zoran/zoran.h2
-rw-r--r--drivers/media/video/zoran/zoran_card.c23
-rw-r--r--drivers/media/video/zoran/zoran_device.c12
-rw-r--r--drivers/media/video/zoran/zoran_driver.c2
-rw-r--r--drivers/media/video/zr364xx.c4
-rw-r--r--drivers/misc/Kconfig45
-rw-r--r--drivers/misc/Makefile4
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c2
-rw-r--r--drivers/misc/ad525x_dpot-spi.c6
-rw-r--r--drivers/misc/ad525x_dpot.c117
-rw-r--r--drivers/misc/ad525x_dpot.h37
-rw-r--r--drivers/misc/apds9802als.c347
-rw-r--r--drivers/misc/apds990x.c1295
-rw-r--r--drivers/misc/bh1770glc.c1413
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c1
-rw-r--r--drivers/misc/isl29020.c248
-rw-r--r--drivers/misc/lkdtm.c128
-rw-r--r--drivers/misc/phantom.c8
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c17
-rw-r--r--drivers/mmc/Makefile4
-rw-r--r--drivers/mmc/card/Kconfig17
-rw-r--r--drivers/mmc/card/Makefile4
-rw-r--r--drivers/mmc/card/block.c61
-rw-r--r--drivers/mmc/card/mmc_test.c469
-rw-r--r--drivers/mmc/card/queue.c14
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/bus.c58
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/core.c179
-rw-r--r--drivers/mmc/core/core.h7
-rw-r--r--drivers/mmc/core/debugfs.c35
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/mmc/core/mmc.c58
-rw-r--r--drivers/mmc/core/sd.c10
-rw-r--r--drivers/mmc/core/sdio.c54
-rw-r--r--drivers/mmc/core/sdio_bus.c85
-rw-r--r--drivers/mmc/host/Kconfig39
-rw-r--r--drivers/mmc/host/Makefile7
-rw-r--r--drivers/mmc/host/at91_mci.c11
-rw-r--r--drivers/mmc/host/atmel-mci.c5
-rw-r--r--drivers/mmc/host/au1xmmc.c4
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c54
-rw-r--r--drivers/mmc/host/davinci_mmc.c8
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c3
-rw-r--r--drivers/mmc/host/mmc_spi.c24
-rw-r--r--drivers/mmc/host/mmci.c31
-rw-r--r--drivers/mmc/host/msm_sdcc.c3
-rw-r--r--drivers/mmc/host/mvsdio.c3
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/mmc/host/omap.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c24
-rw-r--r--drivers/mmc/host/pxamci.c43
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c143
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h83
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c70
-rw-r--r--drivers/mmc/host/sdhci-pci.c89
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c44
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h10
-rw-r--r--drivers/mmc/host/sdhci-pxa.c253
-rw-r--r--drivers/mmc/host/sdhci.c86
-rw-r--r--drivers/mmc/host/sdhci.h150
-rw-r--r--drivers/mmc/host/sh_mmcif.c3
-rw-r--r--drivers/mmc/host/tifm_sd.c3
-rw-r--r--drivers/mmc/host/ushc.c566
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mmc/host/wbsd.c3
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/atl1c/atl1c.h2
-rw-r--r--drivers/net/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/atlx/atl1.c12
-rw-r--r--drivers/net/atlx/atl1.h9
-rw-r--r--drivers/net/atlx/atlx.c4
-rw-r--r--drivers/net/benet/be_cmds.c36
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_main.c49
-rw-r--r--drivers/net/bnx2x/bnx2x.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h55
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c137
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c55
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c129
-rw-r--r--drivers/net/caif/caif_shmcore.c744
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c95
-rw-r--r--drivers/net/can/flexcan.c3
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/pch_can.c1463
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/tscan1.c216
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c8
-rw-r--r--drivers/net/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c33
-rw-r--r--drivers/net/cxgb4/sge.c23
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c42
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/jme.c45
-rw-r--r--drivers/net/macb.c27
-rw-r--r--drivers/net/mlx4/en_main.c15
-rw-r--r--drivers/net/mlx4/en_netdev.c10
-rw-r--r--drivers/net/mlx4/en_port.c4
-rw-r--r--drivers/net/mlx4/en_port.h3
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/icm.c28
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c21
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mlx4/port.c30
-rw-r--r--drivers/net/phy/phy.c13
-rw-r--r--drivers/net/phy/phy_device.c19
-rw-r--r--drivers/net/qlcnic/qlcnic.h7
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c19
-rw-r--r--drivers/net/qlge/qlge.h12
-rw-r--r--drivers/net/qlge/qlge_main.c24
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/sb1000.c6
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/slhc.c15
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/smsc911x.h11
-rw-r--r--drivers/net/tg3.c10
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/typhoon.c92
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h19
-rw-r--r--drivers/net/vxge/vxge-config.c332
-rw-r--r--drivers/net/vxge/vxge-config.h227
-rw-r--r--drivers/net/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/vxge/vxge-main.c64
-rw-r--r--drivers/net/vxge/vxge-main.h59
-rw-r--r--drivers/net/vxge/vxge-traffic.c101
-rw-r--r--drivers/net/vxge/vxge-traffic.h134
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h191
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h51
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c25
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c3
-rw-r--r--drivers/net/wireless/wl1251/Makefile8
-rw-r--r--drivers/oprofile/oprofilefs.c1
-rw-r--r--drivers/parisc/dino.c29
-rw-r--r--drivers/parisc/eisa.c29
-rw-r--r--drivers/parisc/gsc.c36
-rw-r--r--drivers/parisc/iosapic.c56
-rw-r--r--drivers/parisc/led.c6
-rw-r--r--drivers/parisc/superio.c25
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/pci/bus.c53
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c4
-rw-r--r--drivers/pci/msi.h4
-rw-r--r--drivers/pci/pci.c79
-rw-r--r--drivers/pci/pci.h3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h3
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c34
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c2
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/quirks.c31
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c1
-rw-r--r--drivers/pnp/base.h5
-rw-r--r--drivers/pnp/core.c8
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c31
-rw-r--r--drivers/pnp/resource.c10
-rw-r--r--drivers/power/Kconfig24
-rw-r--r--drivers/power/Makefile19
-rw-r--r--drivers/power/bq20z75.c493
-rw-r--r--drivers/power/bq27x00_battery.c1
-rw-r--r--drivers/power/ds2760_battery.c1
-rw-r--r--drivers/power/ds2782_battery.c12
-rw-r--r--drivers/power/isp1704_charger.c369
-rw-r--r--drivers/power/jz4740-battery.c1
-rw-r--r--drivers/power/olpc_battery.c8
-rw-r--r--drivers/power/pcf50633-charger.c1
-rw-r--r--drivers/power/power_supply_sysfs.c4
-rw-r--r--drivers/power/twl4030_charger.c565
-rw-r--r--drivers/power/wm831x_power.c2
-rw-r--r--drivers/rapidio/rio-driver.c2
-rw-r--r--drivers/rapidio/rio-scan.c187
-rw-r--r--drivers/rapidio/rio-sysfs.c26
-rw-r--r--drivers/rapidio/rio.c330
-rw-r--r--drivers/rapidio/rio.h5
-rw-r--r--drivers/rapidio/switches/Kconfig7
-rw-r--r--drivers/rapidio/switches/Makefile1
-rw-r--r--drivers/rapidio/switches/idt_gen2.c447
-rw-r--r--drivers/rapidio/switches/idtcps.c10
-rw-r--r--drivers/rapidio/switches/tsi568.c15
-rw-r--r--drivers/rapidio/switches/tsi57x.c7
-rw-r--r--drivers/rtc/Kconfig24
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c4
-rw-r--r--drivers/rtc/rtc-bfin.c43
-rw-r--r--drivers/rtc/rtc-ds3232.c181
-rw-r--r--drivers/rtc/rtc-jz4740.c45
-rw-r--r--drivers/rtc/rtc-lpc32xx.c414
-rw-r--r--drivers/rtc/rtc-omap.c12
-rw-r--r--drivers/rtc/rtc-s3c.c92
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/cpia/Kconfig39
-rw-r--r--drivers/staging/cpia/Makefile5
-rw-r--r--drivers/staging/cpia/TODO8
-rw-r--r--drivers/staging/cpia/cpia.c (renamed from drivers/media/video/cpia.c)0
-rw-r--r--drivers/staging/cpia/cpia.h (renamed from drivers/media/video/cpia.h)0
-rw-r--r--drivers/staging/cpia/cpia_pp.c (renamed from drivers/media/video/cpia_pp.c)0
-rw-r--r--drivers/staging/cpia/cpia_usb.c (renamed from drivers/media/video/cpia_usb.c)0
-rw-r--r--drivers/staging/cx25821/Kconfig2
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c13
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.h4
-rw-r--r--drivers/staging/cx25821/cx25821-audio.h10
-rw-r--r--drivers/staging/cx25821/cx25821-core.c64
-rw-r--r--drivers/staging/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-reg.h10
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c8
-rw-r--r--drivers/staging/cx25821/cx25821-reg.h4
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.c135
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.h14
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c28
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.h10
-rw-r--r--drivers/staging/cx25821/cx25821-video.c6
-rw-r--r--drivers/staging/cx25821/cx25821.h51
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c8
-rw-r--r--drivers/staging/go7007/Kconfig2
-rw-r--r--drivers/staging/go7007/go7007-driver.c55
-rw-r--r--drivers/staging/go7007/go7007-usb.c2
-rw-r--r--drivers/staging/go7007/go7007-v4l2.c19
-rw-r--r--drivers/staging/go7007/s2250-board.c34
-rw-r--r--drivers/staging/go7007/wis-ov7640.c1
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-sony-tuner.c1
-rw-r--r--drivers/staging/go7007/wis-tw2804.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/go7007/wis-uda1342.c1
-rw-r--r--drivers/staging/lirc/Kconfig2
-rw-r--r--drivers/staging/lirc/lirc_igorplugusb.c190
-rw-r--r--drivers/staging/lirc/lirc_it87.c23
-rw-r--r--drivers/staging/lirc/lirc_ite8709.c6
-rw-r--r--drivers/staging/lirc/lirc_parallel.c61
-rw-r--r--drivers/staging/lirc/lirc_serial.c24
-rw-r--r--drivers/staging/lirc/lirc_sir.c24
-rw-r--r--drivers/staging/lirc/lirc_zilog.c3
-rw-r--r--drivers/staging/pohmelfs/inode.c6
-rw-r--r--drivers/staging/stradis/Kconfig7
-rw-r--r--drivers/staging/stradis/Makefile3
-rw-r--r--drivers/staging/stradis/TODO6
-rw-r--r--drivers/staging/stradis/stradis.c (renamed from drivers/media/video/stradis.c)0
-rw-r--r--drivers/staging/tm6000/TODO6
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c102
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c41
-rw-r--r--drivers/staging/tm6000/tm6000-core.c157
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c32
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c43
-rw-r--r--drivers/staging/tm6000/tm6000-input.c34
-rw-r--r--drivers/staging/tm6000/tm6000-regs.h32
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c350
-rw-r--r--drivers/staging/tm6000/tm6000-usb-isoc.h32
-rw-r--r--drivers/staging/tm6000/tm6000-video.c442
-rw-r--r--drivers/staging/tm6000/tm6000.h54
-rw-r--r--drivers/usb/core/inode.c1
-rw-r--r--drivers/usb/gadget/f_fs.c1
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/au1200fb.c2
-rw-r--r--drivers/video/fbmem.c60
-rw-r--r--drivers/video/gbefb.c6
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c5
-rw-r--r--drivers/video/matrox/matroxfb_maven.c2
-rw-r--r--drivers/video/omap/blizzard.c2
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c2
-rw-r--r--drivers/video/omap2/displays/Kconfig2
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c6
-rw-r--r--drivers/video/omap2/displays/panel-generic.c6
-rw-r--r--drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c6
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/video/omap2/displays/panel-toppoly-tdo35s.c6
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c6
-rw-r--r--drivers/video/omap2/dss/Makefile2
-rw-r--r--drivers/video/omap2/dss/core.c3
-rw-r--r--drivers/video/omap2/dss/dispc.c270
-rw-r--r--drivers/video/omap2/dss/dsi.c1
-rw-r--r--drivers/video/omap2/dss/dss_features.c191
-rw-r--r--drivers/video/omap2/dss/dss_features.h50
-rw-r--r--drivers/video/omap2/dss/manager.c33
-rw-r--r--drivers/video/omap2/dss/overlay.c24
-rw-r--r--drivers/video/omap2/omapfb/Kconfig2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c26
-rw-r--r--drivers/video/savage/savagefb-i2c.c9
-rw-r--r--drivers/video/via/Makefile2
-rw-r--r--drivers/video/via/accel.c55
-rw-r--r--drivers/video/via/accel.h3
-rw-r--r--drivers/video/via/chip.h3
-rw-r--r--drivers/video/via/dvi.c189
-rw-r--r--drivers/video/via/dvi.h4
-rw-r--r--drivers/video/via/global.h1
-rw-r--r--drivers/video/via/hw.c648
-rw-r--r--drivers/video/via/hw.h53
-rw-r--r--drivers/video/via/ioctl.c2
-rw-r--r--drivers/video/via/lcd.c90
-rw-r--r--drivers/video/via/lcd.h6
-rw-r--r--drivers/video/via/lcdtbl.h591
-rw-r--r--drivers/video/via/tbl1636.c71
-rw-r--r--drivers/video/via/tbl1636.h34
-rw-r--r--drivers/video/via/via-core.c32
-rw-r--r--drivers/video/via/via_i2c.c31
-rw-r--r--drivers/video/via/viafbdev.c294
-rw-r--r--drivers/video/via/viafbdev.h7
-rw-r--r--drivers/video/via/vt1636.c121
-rw-r--r--drivers/w1/w1.c8
-rw-r--r--drivers/xen/events.c101
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c4
-rw-r--r--drivers/xen/xenfs/Makefile3
-rw-r--r--drivers/xen/xenfs/privcmd.c404
-rw-r--r--drivers/xen/xenfs/super.c95
-rw-r--r--drivers/xen/xenfs/xenfs.h3
-rw-r--r--drivers/xen/xenfs/xenstored.c68
-rw-r--r--firmware/ihex2fw.c17
-rw-r--r--fs/9p/Kconfig13
-rw-r--r--fs/9p/Makefile1
-rw-r--r--fs/9p/acl.c392
-rw-r--r--fs/9p/acl.h49
-rw-r--r--fs/9p/fid.c1
-rw-r--r--fs/9p/v9fs.c22
-rw-r--r--fs/9p/v9fs.h10
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_addr.c30
-rw-r--r--fs/9p/vfs_dir.c4
-rw-r--r--fs/9p/vfs_file.c265
-rw-r--r--fs/9p/vfs_inode.c258
-rw-r--r--fs/9p/vfs_super.c14
-rw-r--r--fs/9p/xattr.c52
-rw-r--r--fs/9p/xattr.h6
-rw-r--r--fs/Kconfig7
-rw-r--r--fs/Kconfig.binfmt4
-rw-r--r--fs/Makefile5
-rw-r--r--fs/affs/file.c4
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/write.c19
-rw-r--r--fs/aio.c14
-rw-r--r--fs/anon_inodes.c6
-rw-r--r--fs/autofs4/inode.c1
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/binfmt_misc.c1
-rw-r--r--fs/block_dev.c34
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/buffer.c29
-rw-r--r--fs/ceph/addr.c9
-rw-r--r--fs/cifs/file.c10
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/compat.c2
-rw-r--r--fs/configfs/inode.c1
-rw-r--r--fs/dcache.c277
-rw-r--r--fs/debugfs/inode.c1
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/eventpoll.c35
-rw-r--r--fs/exec.c173
-rw-r--r--fs/exofs/file.c6
-rw-r--r--fs/exofs/namei.c2
-rw-r--r--fs/exportfs/expfs.c17
-rw-r--r--fs/ext2/balloc.c3
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/inode.c11
-rw-r--r--fs/ext2/namei.c2
-rw-r--r--fs/ext2/super.c4
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext3/balloc.c17
-rw-r--r--fs/ext3/ialloc.c11
-rw-r--r--fs/ext3/inode.c24
-rw-r--r--fs/ext3/namei.c2
-rw-r--r--fs/ext3/resize.c13
-rw-r--r--fs/ext3/super.c41
-rw-r--r--fs/ext4/Makefile2
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/block_validity.c7
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h110
-rw-r--r--fs/ext4/ext4_extents.h65
-rw-r--r--fs/ext4/extents.c368
-rw-r--r--fs/ext4/file.c44
-rw-r--r--fs/ext4/fsync.c83
-rw-r--r--fs/ext4/ialloc.c135
-rw-r--r--fs/ext4/inode.c596
-rw-r--r--fs/ext4/mballoc.c555
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c22
-rw-r--r--fs/ext4/namei.c65
-rw-r--r--fs/ext4/page-io.c430
-rw-r--r--fs/ext4/resize.c52
-rw-r--r--fs/ext4/super.c531
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/ext4/xattr.h10
-rw-r--r--fs/fcntl.c62
-rw-r--r--fs/file_table.c17
-rw-r--r--fs/freevxfs/vxfs_inode.c1
-rw-r--r--fs/fs-writeback.c88
-rw-r--r--fs/fuse/control.c1
-rw-r--r--fs/fuse/dev.c19
-rw-r--r--fs/gfs2/aops.c3
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/ops_inode.c8
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/hfs/hfs_fs.h13
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfs/mdb.c4
-rw-r--r--fs/hfs/super.c1
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/hostfs/hostfs.h3
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/hostfs/hostfs_user.c14
-rw-r--r--fs/hugetlbfs/inode.c16
-rw-r--r--fs/inode.c527
-rw-r--r--fs/internal.h7
-rw-r--r--fs/ioctl.c39
-rw-r--r--fs/isofs/inode.c57
-rw-r--r--fs/jbd/checkpoint.c4
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/jbd/journal.c44
-rw-r--r--fs/jbd/recovery.c2
-rw-r--r--fs/jbd/transaction.c6
-rw-r--r--fs/jbd2/checkpoint.c10
-rw-r--r--fs/jbd2/commit.c12
-rw-r--r--fs/jbd2/journal.c6
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/jffs2/dir.c4
-rw-r--r--fs/jfs/jfs_imap.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/libfs.c8
-rw-r--r--fs/lockd/host.c1
-rw-r--r--fs/lockd/mon.c1
-rw-r--r--fs/lockd/svc.c13
-rw-r--r--fs/lockd/svc4proc.c2
-rw-r--r--fs/lockd/svclock.c37
-rw-r--r--fs/lockd/svcproc.c2
-rw-r--r--fs/lockd/svcsubs.c9
-rw-r--r--fs/locks.c76
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/minix/namei.c2
-rw-r--r--fs/namei.c16
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/Kconfig9
-rw-r--r--fs/nfs/Makefile4
-rw-r--r--fs/nfs/callback.c4
-rw-r--r--fs/nfs/callback_proc.c8
-rw-r--r--fs/nfs/client.c11
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/dns_resolve.c6
-rw-r--r--fs/nfs/file.c5
-rw-r--r--fs/nfs/getroot.c3
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs4filelayout.c280
-rw-r--r--fs/nfs/nfs4filelayout.h94
-rw-r--r--fs/nfs/nfs4filelayoutdev.c448
-rw-r--r--fs/nfs/nfs4proc.c218
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c360
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfs/pnfs.c783
-rw-r--r--fs/nfs/pnfs.h189
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/Kconfig13
-rw-r--r--fs/nfsd/export.c73
-rw-r--r--fs/nfsd/nfs4callback.c245
-rw-r--r--fs/nfsd/nfs4idmap.c105
-rw-r--r--fs/nfsd/nfs4proc.c7
-rw-r--r--fs/nfsd/nfs4state.c519
-rw-r--r--fs/nfsd/nfs4xdr.c18
-rw-r--r--fs/nfsd/nfsctl.c26
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfssvc.c5
-rw-r--r--fs/nfsd/state.h52
-rw-r--r--fs/nfsd/vfs.c16
-rw-r--r--fs/nilfs2/namei.c2
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/notify/fsnotify.c33
-rw-r--r--fs/notify/inode_mark.c2
-rw-r--r--fs/ntfs/super.c19
-rw-r--r--fs/ocfs2/aops.c19
-rw-r--r--fs/ocfs2/aops.h3
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c2
-rw-r--r--fs/ocfs2/file.c9
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/Kconfig4
-rw-r--r--fs/proc/base.c111
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/softirqs.c4
-rw-r--r--fs/proc/stat.c14
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/quota/Kconfig4
-rw-r--r--fs/quota/dquot.c30
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/read_write.c28
-rw-r--r--fs/reiserfs/inode.c26
-rw-r--r--fs/reiserfs/ioctl.c6
-rw-r--r--fs/reiserfs/namei.c2
-rw-r--r--fs/reiserfs/xattr.c7
-rw-r--r--fs/select.c6
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/signalfd.c10
-rw-r--r--fs/smbfs/dir.c16
-rw-r--r--fs/smbfs/inode.c1
-rw-r--r--fs/smbfs/proc.c10
-rw-r--r--fs/super.c8
-rw-r--r--fs/sysv/namei.c2
-rw-r--r--fs/ubifs/dir.c2
-rw-r--r--fs/udf/namei.c2
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c2
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--include/acpi/acpi_bus.h14
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/acpiosxf.h14
-rw-r--r--include/acpi/acpixf.h11
-rw-r--r--include/acpi/actypes.h30
-rw-r--r--include/acpi/platform/acenv.h6
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h7
-rw-r--r--include/asm-generic/cputime.h6
-rw-r--r--include/asm-generic/gpio.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/drm/drmP.h45
-rw-r--r--include/drm/drm_crtc.h4
-rw-r--r--include/drm/drm_crtc_helper.h8
-rw-r--r--include/drm/drm_dp_helper.h3
-rw-r--r--include/drm/i915_drm.h6
-rw-r--r--include/drm/intel-gtt.h18
-rw-r--r--include/drm/ttm/ttm_bo_api.h3
-rw-r--r--include/drm/ttm/ttm_bo_driver.h27
-rw-r--r--include/drm/vmwgfx_drm.h1
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/amba/pl08x.h222
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/basic_mmio_gpio.h20
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/completion.h10
-rw-r--r--include/linux/connector.h8
-rw-r--r--include/linux/dmaengine.h60
-rw-r--r--include/linux/fb.h6
-rw-r--r--include/linux/fs.h68
-rw-r--r--include/linux/gfp.h105
-rw-r--r--include/linux/highmem.h74
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/i2c/adp5588.h21
-rw-r--r--include/linux/i2c/apds990x.h79
-rw-r--r--include/linux/i2c/bh1770glc.h53
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/intel_mid_dma.h16
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io-mapping.h14
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kernel.h36
-rw-r--r--include/linux/kernel_stat.h18
-rw-r--r--include/linux/kfifo.h28
-rw-r--r--include/linux/list.h6
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/math64.h12
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/migrate.h16
-rw-r--r--include/linux/mlx4/cmd.h2
-rw-r--r--include/linux/mlx4/device.h35
-rw-r--r--include/linux/mlx4/driver.h9
-rw-r--r--include/linux/mlx4/qp.h9
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/card.h6
-rw-r--r--include/linux/mmc/core.h2
-rw-r--r--include/linux/mmc/host.h48
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmc/sdhci-pltfm.h (renamed from include/linux/sdhci-pltfm.h)2
-rw-r--r--include/linux/mmc/sdhci.h144
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/moduleparam.h4
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h18
-rw-r--r--include/linux/nfs4.h65
-rw-r--r--include/linux/nfs_fs.h5
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_xdr.h50
-rw-r--r--include/linux/pageblock-flags.h5
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h28
-rw-r--r--include/linux/pci_regs.h6
-rw-r--r--include/linux/percpu-defs.h12
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/ramoops.h15
-rw-r--r--include/linux/ratelimit.h2
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/ring_buffer.h12
-rw-r--r--include/linux/rio.h17
-rw-r--r--include/linux/rio_ids.h2
-rw-r--r--include/linux/rio_regs.h18
-rw-r--r--include/linux/rmap.h30
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sfi.h24
-rw-r--r--include/linux/signalfd.h3
-rw-r--r--include/linux/smp.h19
-rw-r--r--include/linux/spi/74x164.h11
-rw-r--r--include/linux/sunrpc/auth.h4
-rw-r--r--include/linux/sunrpc/cache.h37
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/gss_spkm3.h55
-rw-r--r--include/linux/sunrpc/stats.h23
-rw-r--r--include/linux/sunrpc/svc_xprt.h32
-rw-r--r--include/linux/sunrpc/svcauth.h17
-rw-r--r--include/linux/sunrpc/xdr.h7
-rw-r--r--include/linux/sunrpc/xprt.h4
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/synclink.h5
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/types.h20
-rw-r--r--include/linux/via-core.h4
-rw-r--r--include/linux/videodev2.h12
-rw-r--r--include/linux/videotext.h125
-rw-r--r--include/linux/virtio_9p.h1
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/writeback.h6
-rw-r--r--include/media/ir-core.h41
-rw-r--r--include/media/ir-kbd-i2c.h10
-rw-r--r--include/media/lirc_dev.h6
-rw-r--r--include/media/omap1_camera.h35
-rw-r--r--include/media/rc-map.h19
-rw-r--r--include/media/s3c_fimc.h60
-rw-r--r--include/media/sh_vou.h1
-rw-r--r--include/media/soc_camera.h9
-rw-r--r--include/media/sr030pc30.h21
-rw-r--r--include/media/v4l2-chip-ident.h8
-rw-r--r--include/media/v4l2-common.h10
-rw-r--r--include/media/v4l2-dev.h8
-rw-r--r--include/media/v4l2-device.h57
-rw-r--r--include/media/v4l2-i2c-drv.h80
-rw-r--r--include/media/v4l2-mediabus.h8
-rw-r--r--include/media/v4l2-subdev.h24
-rw-r--r--include/media/videobuf-core.h19
-rw-r--r--include/media/videobuf-dma-contig.h3
-rw-r--r--include/media/videobuf-dma-sg.h3
-rw-r--r--include/media/videobuf-vmalloc.h3
-rw-r--r--include/media/wm8775.h3
-rw-r--r--include/net/9p/9p.h54
-rw-r--r--include/net/9p/client.h4
-rw-r--r--include/net/caif/caif_shm.h26
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/fib_rules.h2
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/inetpeer.h2
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ipip.h6
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/rdma/ib_addr.h134
-rw-r--r--include/rdma/ib_pack.h39
-rw-r--r--include/rdma/ib_user_verbs.h3
-rw-r--r--include/rdma/ib_verbs.h11
-rw-r--r--include/scsi/srp.h38
-rw-r--r--include/trace/events/ext4.h385
-rw-r--r--include/trace/events/irq.h54
-rw-r--r--include/trace/events/jbd2.h78
-rw-r--r--include/trace/events/vmscan.h44
-rw-r--r--include/trace/events/writeback.h37
-rw-r--r--include/xen/Kbuild1
-rw-r--r--include/xen/interface/memory.h29
-rw-r--r--include/xen/privcmd.h80
-rw-r--r--include/xen/xen-ops.h5
-rw-r--r--init/Kconfig117
-rw-r--r--init/do_mounts.c4
-rw-r--r--init/do_mounts_md.c2
-rw-r--r--init/do_mounts_rd.c4
-rw-r--r--init/initramfs.c5
-rw-r--r--init/noinitramfs.c6
-rw-r--r--ipc/compat.c6
-rw-r--r--ipc/compat_mq.c5
-rw-r--r--ipc/mqueue.c3
-rw-r--r--ipc/shm.c63
-rw-r--r--kernel/cgroup.c130
-rw-r--r--kernel/cgroup_freezer.c72
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c7
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/ns_cgroup.c8
-rw-r--r--kernel/perf_event.c94
-rw-r--r--kernel/power/snapshot.c18
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/printk.c5
-rw-r--r--kernel/ptrace.c36
-rw-r--r--kernel/resource.c153
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/smp.c8
-rw-r--r--kernel/softirq.c16
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/sysctl.c14
-rw-r--r--kernel/taskstats.c172
-rw-r--r--kernel/trace/ring_buffer.c335
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace_kprobe.c1
-rw-r--r--kernel/tsacct.c10
-rw-r--r--kernel/user.c1
-rw-r--r--kernel/wait.c6
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/bitmap.c3
-rw-r--r--lib/div64.c52
-rw-r--r--lib/idr.c65
-rw-r--r--lib/list_sort.c172
-rw-r--r--lib/parser.c7
-rw-r--r--lib/percpu_counter.c55
-rw-r--r--lib/vsprintf.c19
-rw-r--r--mm/backing-dev.c74
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/filemap.c42
-rw-r--r--mm/highmem.c66
-rw-r--r--mm/hugetlb.c238
-rw-r--r--mm/internal.h2
-rw-r--r--mm/maccess.c2
-rw-r--r--mm/memcontrol.c406
-rw-r--r--mm/memory-failure.c176
-rw-r--r--mm/memory.c35
-rw-r--r--mm/memory_hotplug.c48
-rw-r--r--mm/mempolicy.c17
-rw-r--r--mm/migrate.c249
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/nommu.c49
-rw-r--r--mm/oom_kill.c33
-rw-r--r--mm/page-writeback.c31
-rw-r--r--mm/page_alloc.c99
-rw-r--r--mm/page_isolation.c3
-rw-r--r--mm/rmap.c33
-rw-r--r--mm/shmem.c7
-rw-r--r--mm/slab.c2
-rw-r--r--mm/swap.c1
-rw-r--r--mm/swapfile.c49
-rw-r--r--mm/vmalloc.c56
-rw-r--r--mm/vmscan.c218
-rw-r--r--mm/vmstat.c44
-rw-r--r--net/802/garp.c18
-rw-r--r--net/802/stp.c4
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/9p/client.c178
-rw-r--r--net/9p/protocol.c5
-rw-r--r--net/9p/trans_virtio.c76
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/pktgen.c30
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sysctl_net_core.c3
-rw-r--r--net/ipv4/fib_hash.c36
-rw-r--r--net/ipv4/gre.c5
-rw-r--r--net/ipv4/inetpeer.c138
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_sockglue.c10
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/route.c75
-rw-r--r--net/ipv4/tunnel4.c29
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tunnel6.c24
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/l2tp/l2tp_core.c53
-rw-r--r--net/l2tp/l2tp_core.h33
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/mac80211/ibss.c1
-rw-r--r--net/mac80211/main.c8
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/xt_TPROXY.c10
-rw-r--r--net/netfilter/xt_socket.c12
-rw-r--r--net/netlink/af_netlink.c65
-rw-r--r--net/socket.c6
-rw-r--r--net/sunrpc/Kconfig19
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_generic.c2
-rw-r--r--net/sunrpc/auth_gss/Makefile5
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c247
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c186
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c267
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c127
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c51
-rw-r--r--net/sunrpc/cache.c288
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/netns.h19
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/stats.c43
-rw-r--r--net/sunrpc/sunrpc_syms.c58
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/sunrpc/svc_xprt.c59
-rw-r--r--net/sunrpc/svcauth_unix.c194
-rw-r--r--net/sunrpc/svcsock.c27
-rw-r--r--net/sunrpc/xprt.c39
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c11
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c82
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c49
-rw-r--r--net/sunrpc/xprtrdma/transport.c25
-rw-r--r--net/sunrpc/xprtsock.c358
-rw-r--r--net/unix/af_unix.c14
-rw-r--r--net/wireless/reg.c2
-rwxr-xr-xscripts/checkpatch.pl150
-rwxr-xr-xscripts/get_maintainer.pl1162
-rw-r--r--security/apparmor/path.c2
-rw-r--r--security/inode.c1
-rw-r--r--security/integrity/ima/ima.h18
-rw-r--r--security/integrity/ima/ima_api.c2
-rw-r--r--security/integrity/ima/ima_iint.c158
-rw-r--r--security/integrity/ima/ima_main.c184
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--security/security.c10
-rw-r--r--security/selinux/selinuxfs.c1
-rw-r--r--security/tomoyo/realpath.c2
-rw-r--r--sound/oss/sb_ess.c1
-rw-r--r--sound/pci/hda/patch_sigmatel.c78
-rw-r--r--sound/soc/codecs/ad73311.c2
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/codecs/wm9090.c2
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c3
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--sound/usb/proc.c5
-rw-r--r--sound/usb/urb.c170
-rw-r--r--tools/perf/Documentation/perf-list.txt17
-rw-r--r--tools/perf/Documentation/perf-probe.txt18
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/builtin-probe.c78
-rw-r--r--tools/perf/builtin-record.c8
-rw-r--r--tools/perf/builtin-trace.c17
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-report2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-report2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-report2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-report2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-report2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py58
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-record2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-report4
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-report2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-report2
-rw-r--r--tools/perf/scripts/python/bin/sctop-report2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report2
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py21
-rw-r--r--tools/perf/scripts/python/futex-contention.py50
-rw-r--r--tools/perf/scripts/python/sctop.py9
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py21
-rw-r--r--tools/perf/scripts/python/syscall-counts.py5
-rw-r--r--tools/perf/util/debug.c4
-rw-r--r--tools/perf/util/debug.h2
-rw-r--r--tools/perf/util/map.h10
-rw-r--r--tools/perf/util/probe-event.c189
-rw-r--r--tools/perf/util/probe-event.h16
-rw-r--r--tools/perf/util/probe-finder.c645
-rw-r--r--tools/perf/util/probe-finder.h31
-rw-r--r--tools/perf/util/ui/browser.c1
2143 files changed, 117900 insertions, 44954 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index feca0758391e..22edcbb9ddaf 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -51,8 +51,13 @@
<sect1><title>Delaying, scheduling, and timer routines</title>
!Iinclude/linux/sched.h
!Ekernel/sched.c
+!Iinclude/linux/completion.h
!Ekernel/timer.c
</sect1>
+ <sect1><title>Wait queues and Wake events</title>
+!Iinclude/linux/wait.h
+!Ekernel/wait.c
+ </sect1>
<sect1><title>High-resolution timers</title>
!Iinclude/linux/ktime.h
!Iinclude/linux/hrtimer.h
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 6b4e07f28b69..7160652a8736 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -93,6 +93,12 @@ X!Ilib/string.c
!Elib/crc32.c
!Elib/crc-ccitt.c
</sect1>
+
+ <sect1 id="idr"><title>idr/ida Functions</title>
+!Pinclude/linux/idr.h idr sync
+!Plib/idr.c IDA description
+!Elib/idr.c
+ </sect1>
</chapter>
<chapter id="mm">
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index 6ae97157b1c7..be34dcbe0d90 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -250,6 +250,9 @@
<!ENTITY sub-yuv422p SYSTEM "v4l/pixfmt-yuv422p.xml">
<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
+<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
+<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
+<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
<!ENTITY sub-pixfmt SYSTEM "v4l/pixfmt.xml">
<!ENTITY sub-cropcap SYSTEM "v4l/vidioc-cropcap.xml">
<!ENTITY sub-dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml">
@@ -347,6 +350,9 @@
<!ENTITY yuv422p SYSTEM "v4l/pixfmt-yuv422p.xml">
<!ENTITY yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
<!ENTITY yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
+<!ENTITY srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
+<!ENTITY srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
+<!ENTITY y10 SYSTEM "v4l/pixfmt-y10.xml">
<!ENTITY cropcap SYSTEM "v4l/vidioc-cropcap.xml">
<!ENTITY dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml">
<!ENTITY encoder-cmd SYSTEM "v4l/vidioc-encoder-cmd.xml">
diff --git a/Documentation/DocBook/v4l/compat.xml b/Documentation/DocBook/v4l/compat.xml
index 54447f0d0784..c9ce61d981f5 100644
--- a/Documentation/DocBook/v4l/compat.xml
+++ b/Documentation/DocBook/v4l/compat.xml
@@ -21,11 +21,15 @@ API.</para>
<title>Opening and Closing Devices</title>
<para>For compatibility reasons the character device file names
-recommended for V4L2 video capture, overlay, radio, teletext and raw
+recommended for V4L2 video capture, overlay, radio and raw
vbi capture devices did not change from those used by V4L. They are
listed in <xref linkend="devices" /> and below in <xref
linkend="v4l-dev" />.</para>
+ <para>The teletext devices (minor range 192-223) have been removed in
+V4L2 and no longer exist. There is no hardware available anymore for handling
+pure teletext. Instead raw or sliced VBI is used.</para>
+
<para>The V4L <filename>videodev</filename> module automatically
assigns minor numbers to drivers in load order, depending on the
registered device type. We recommend that V4L2 drivers by default
@@ -66,13 +70,6 @@ not compatible with V4L or V4L2.</para> </footnote>,
<entry>64-127</entry>
</row>
<row>
- <entry>Teletext decoder</entry>
- <entry><para><filename>/dev/vtx</filename>,
-<filename>/dev/vtx0</filename> to
-<filename>/dev/vtx31</filename></para></entry>
- <entry>192-223</entry>
- </row>
- <row>
<entry>Raw VBI capture</entry>
<entry><para><filename>/dev/vbi</filename>,
<filename>/dev/vbi0</filename> to
@@ -2345,6 +2342,17 @@ more information.</para>
</listitem>
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 2.6.37</title>
+ <orderedlist>
+ <listitem>
+ <para>Remove the vtx (videotext/teletext) API. This API was no longer
+used and no hardware exists to verify the API. Nor were any userspace applications found
+that used it. It was originally scheduled for removal in 2.6.35.
+ </para>
+ </listitem>
+ </orderedlist>
+ </section>
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
diff --git a/Documentation/DocBook/v4l/controls.xml b/Documentation/DocBook/v4l/controls.xml
index 8408caaee276..2fae3e87ce73 100644
--- a/Documentation/DocBook/v4l/controls.xml
+++ b/Documentation/DocBook/v4l/controls.xml
@@ -312,10 +312,17 @@ minimum value disables backlight compensation.</entry>
information and bits 24-31 must be zero.</entry>
</row>
<row>
+ <entry><constant>V4L2_CID_ILLUMINATORS_1</constant>
+ <constant>V4L2_CID_ILLUMINATORS_2</constant></entry>
+ <entry>boolean</entry>
+ <entry>Switch on or off the illuminator 1 or 2 of the device
+ (usually a microscope).</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CID_LASTP1</constant></entry>
<entry></entry>
<entry>End of the predefined control IDs (currently
-<constant>V4L2_CID_BG_COLOR</constant> + 1).</entry>
+<constant>V4L2_CID_ILLUMINATORS_2</constant> + 1).</entry>
</row>
<row>
<entry><constant>V4L2_CID_PRIVATE_BASE</constant></entry>
@@ -357,9 +364,6 @@ enumerate_menu (void)
querymenu.index++) {
if (0 == ioctl (fd, &VIDIOC-QUERYMENU;, &amp;querymenu)) {
printf (" %s\n", querymenu.name);
- } else {
- perror ("VIDIOC_QUERYMENU");
- exit (EXIT_FAILURE);
}
}
}
diff --git a/Documentation/DocBook/v4l/dev-rds.xml b/Documentation/DocBook/v4l/dev-rds.xml
index 0869d701b1e5..360d2737e649 100644
--- a/Documentation/DocBook/v4l/dev-rds.xml
+++ b/Documentation/DocBook/v4l/dev-rds.xml
@@ -3,15 +3,16 @@
<para>The Radio Data System transmits supplementary
information in binary format, for example the station name or travel
information, on an inaudible audio subcarrier of a radio program. This
-interface is aimed at devices capable of receiving and decoding RDS
+interface is aimed at devices capable of receiving and/or transmitting RDS
information.</para>
<para>For more information see the core RDS standard <xref linkend="en50067" />
and the RBDS standard <xref linkend="nrsc4" />.</para>
<para>Note that the RBDS standard as is used in the USA is almost identical
-to the RDS standard. Any RDS decoder can also handle RBDS. Only some of the fields
-have slightly different meanings. See the RBDS standard for more information.</para>
+to the RDS standard. Any RDS decoder/encoder can also handle RBDS. Only some of the
+fields have slightly different meanings. See the RBDS standard for more
+information.</para>
<para>The RBDS standard also specifies support for MMBS (Modified Mobile Search).
This is a proprietary format which seems to be discontinued. The RDS interface does not
@@ -21,16 +22,25 @@ be needed, then please contact the linux-media mailing list: &v4l-ml;.</para>
<section>
<title>Querying Capabilities</title>
- <para>Devices supporting the RDS capturing API
-set the <constant>V4L2_CAP_RDS_CAPTURE</constant> flag in
+ <para>Devices supporting the RDS capturing API set
+the <constant>V4L2_CAP_RDS_CAPTURE</constant> flag in
the <structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl.
-Any tuner that supports RDS will set the
-<constant>V4L2_TUNER_CAP_RDS</constant> flag in the <structfield>capability</structfield>
-field of &v4l2-tuner;.
-Whether an RDS signal is present can be detected by looking at
-the <structfield>rxsubchans</structfield> field of &v4l2-tuner;: the
-<constant>V4L2_TUNER_SUB_RDS</constant> will be set if RDS data was detected.</para>
+returned by the &VIDIOC-QUERYCAP; ioctl. Any tuner that supports RDS
+will set the <constant>V4L2_TUNER_CAP_RDS</constant> flag in
+the <structfield>capability</structfield> field of &v4l2-tuner;. If
+the driver only passes RDS blocks without interpreting the data
+the <constant>V4L2_TUNER_SUB_RDS_BLOCK_IO</constant> flag has to be
+set, see <link linkend="reading-rds-data">Reading RDS data</link>.
+For future use the
+flag <constant>V4L2_TUNER_SUB_RDS_CONTROLS</constant> has also been
+defined. However, a driver for a radio tuner with this capability does
+not yet exist, so if you are planning to write such a driver you
+should discuss this on the linux-media mailing list: &v4l-ml;.</para>
+
+ <para> Whether an RDS signal is present can be detected by looking
+at the <structfield>rxsubchans</structfield> field of &v4l2-tuner;:
+the <constant>V4L2_TUNER_SUB_RDS</constant> will be set if RDS data
+was detected.</para>
<para>Devices supporting the RDS output API
set the <constant>V4L2_CAP_RDS_OUTPUT</constant> flag in
@@ -40,16 +50,31 @@ Any modulator that supports RDS will set the
<constant>V4L2_TUNER_CAP_RDS</constant> flag in the <structfield>capability</structfield>
field of &v4l2-modulator;.
In order to enable the RDS transmission one must set the <constant>V4L2_TUNER_SUB_RDS</constant>
-bit in the <structfield>txsubchans</structfield> field of &v4l2-modulator;.</para>
-
+bit in the <structfield>txsubchans</structfield> field of &v4l2-modulator;.
+If the driver only passes RDS blocks without interpreting the data
+the <constant>V4L2_TUNER_SUB_RDS_BLOCK_IO</constant> flag has to be set. If the
+tuner is capable of handling RDS entities like program identification codes and radio
+text, the flag <constant>V4L2_TUNER_SUB_RDS_CONTROLS</constant> should be set,
+see <link linkend="writing-rds-data">Writing RDS data</link> and
+<link linkend="fm-tx-controls">FM Transmitter Control Reference</link>.</para>
</section>
- <section>
+ <section id="reading-rds-data">
<title>Reading RDS data</title>
<para>RDS data can be read from the radio device
-with the &func-read; function. The data is packed in groups of three bytes,
+with the &func-read; function. The data is packed in groups of three bytes.</para>
+ </section>
+
+ <section id="writing-rds-data">
+ <title>Writing RDS data</title>
+
+ <para>RDS data can be written to the radio device
+with the &func-write; function. The data is packed in groups of three bytes,
as follows:</para>
+ </section>
+
+ <section>
<table frame="none" pgwide="1" id="v4l2-rds-data">
<title>struct
<structname>v4l2_rds_data</structname></title>
@@ -111,48 +136,57 @@ as follows:</para>
<tbody valign="top">
<row>
<entry>V4L2_RDS_BLOCK_MSK</entry>
+ <entry> </entry>
<entry>7</entry>
<entry>Mask for bits 0-2 to get the block ID.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_A</entry>
+ <entry> </entry>
<entry>0</entry>
<entry>Block A.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_B</entry>
+ <entry> </entry>
<entry>1</entry>
<entry>Block B.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_C</entry>
+ <entry> </entry>
<entry>2</entry>
<entry>Block C.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_D</entry>
+ <entry> </entry>
<entry>3</entry>
<entry>Block D.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_C_ALT</entry>
+ <entry> </entry>
<entry>4</entry>
<entry>Block C'.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_INVALID</entry>
+ <entry>read-only</entry>
<entry>7</entry>
<entry>An invalid block.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_CORRECTED</entry>
+ <entry>read-only</entry>
<entry>0x40</entry>
<entry>A bit error was detected but corrected.</entry>
</row>
<row>
<entry>V4L2_RDS_BLOCK_ERROR</entry>
+ <entry>read-only</entry>
<entry>0x80</entry>
- <entry>An incorrectable error occurred.</entry>
+ <entry>An uncorrectable error occurred.</entry>
</row>
</tbody>
</tgroup>
diff --git a/Documentation/DocBook/v4l/dev-teletext.xml b/Documentation/DocBook/v4l/dev-teletext.xml
index 76184e8ed618..414b1cfff9f4 100644
--- a/Documentation/DocBook/v4l/dev-teletext.xml
+++ b/Documentation/DocBook/v4l/dev-teletext.xml
@@ -1,35 +1,32 @@
<title>Teletext Interface</title>
- <para>This interface aims at devices receiving and demodulating
+ <para>This interface was aimed at devices receiving and demodulating
Teletext data [<xref linkend="ets300706" />, <xref linkend="itu653" />], evaluating the
Teletext packages and storing formatted pages in cache memory. Such
devices are usually implemented as microcontrollers with serial
-interface (I<superscript>2</superscript>C) and can be found on older
+interface (I<superscript>2</superscript>C) and could be found on old
TV cards, dedicated Teletext decoding cards and home-brew devices
connected to the PC parallel port.</para>
- <para>The Teletext API was designed by Martin Buck. It is defined in
+ <para>The Teletext API was designed by Martin Buck. It was defined in
the kernel header file <filename>linux/videotext.h</filename>, the
specification is available from <ulink url="ftp://ftp.gwdg.de/pub/linux/misc/videotext/">
ftp://ftp.gwdg.de/pub/linux/misc/videotext/</ulink>. (Videotext is the name of
-the German public television Teletext service.) Conventional character
-device file names are <filename>/dev/vtx</filename> and
-<filename>/dev/vttuner</filename>, with device number 83, 0 and 83, 16
-respectively. A similar interface exists for the Philips SAA5249
-Teletext decoder [specification?] with character device file names
-<filename>/dev/tlkN</filename>, device number 102, N.</para>
+the German public television Teletext service.)</para>
<para>Eventually the Teletext API was integrated into the V4L API
with character device file names <filename>/dev/vtx0</filename> to
<filename>/dev/vtx31</filename>, device major number 81, minor numbers
-192 to 223. For reference the V4L Teletext API specification is
-reproduced here in full: "Teletext interfaces talk the existing VTX
-API." Teletext devices with major number 83 and 102 will be removed in
-Linux 2.6.</para>
+192 to 223.</para>
- <para>There are no plans to replace the Teletext API or to integrate
-it into V4L2. Please write to the linux-media mailing list: &v4l-ml;
-when the need arises.</para>
+ <para>However, teletext decoders were quickly replaced by more
+generic VBI demodulators and those dedicated teletext decoders no longer exist.
+For many years the vtx devices were still around, even though nobody used
+them. So the decision was made to finally remove support for the Teletext API in
+kernel 2.6.37.</para>
+
+ <para>Modern devices all use the <link linkend="raw-vbi">raw</link> or
+<link linkend="sliced">sliced</link> VBI API.</para>
<!--
Local Variables:
diff --git a/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml b/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml
index 26e879231088..4db272b8a0d3 100644
--- a/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml
+++ b/Documentation/DocBook/v4l/pixfmt-packed-rgb.xml
@@ -739,7 +739,7 @@ defined in error. Drivers may interpret them as in <xref
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
- <row id="V4L2-PIX-FMT-BGR666">
+ <row><!-- id="V4L2-PIX-FMT-BGR666" -->
<entry><constant>V4L2_PIX_FMT_BGR666</constant></entry>
<entry>'BGRH'</entry>
<entry></entry>
diff --git a/Documentation/DocBook/v4l/pixfmt-srggb10.xml b/Documentation/DocBook/v4l/pixfmt-srggb10.xml
new file mode 100644
index 000000000000..7b274092e60c
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-srggb10.xml
@@ -0,0 +1,90 @@
+ <refentry>
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_SRGGB10 ('RG10'),
+ V4L2_PIX_FMT_SGRBG10 ('BA10'),
+ V4L2_PIX_FMT_SGBRG10 ('GB10'),
+ V4L2_PIX_FMT_SBGGR10 ('BG10'),
+ </refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname id="V4L2-PIX-FMT-SRGGB10"><constant>V4L2_PIX_FMT_SRGGB10</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGRBG10"><constant>V4L2_PIX_FMT_SGRBG10</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGBRG10"><constant>V4L2_PIX_FMT_SGBRG10</constant></refname>
+ <refname id="V4L2-PIX-FMT-SBGGR10"><constant>V4L2_PIX_FMT_SBGGR10</constant></refname>
+ <refpurpose>10-bit Bayer formats expanded to 16 bits</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>The following four pixel formats are raw sRGB / Bayer formats with
+10 bits per colour. Each colour component is stored in a 16-bit word, with 6
+unused high bits filled with zeros. Each n-pixel row contains n/2 green samples
+and n/2 blue or red samples, with alternating red and blue rows. Bytes are
+stored in memory in little endian order. They are conventionally described
+as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example of one of these
+formats</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_SBGGR10</constant> 4 &times; 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte, high 6 bits in high bytes are 0.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>B<subscript>00low</subscript></entry>
+ <entry>B<subscript>00high</subscript></entry>
+ <entry>G<subscript>01low</subscript></entry>
+ <entry>G<subscript>01high</subscript></entry>
+ <entry>B<subscript>02low</subscript></entry>
+ <entry>B<subscript>02high</subscript></entry>
+ <entry>G<subscript>03low</subscript></entry>
+ <entry>G<subscript>03high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;8:</entry>
+ <entry>G<subscript>10low</subscript></entry>
+ <entry>G<subscript>10high</subscript></entry>
+ <entry>R<subscript>11low</subscript></entry>
+ <entry>R<subscript>11high</subscript></entry>
+ <entry>G<subscript>12low</subscript></entry>
+ <entry>G<subscript>12high</subscript></entry>
+ <entry>R<subscript>13low</subscript></entry>
+ <entry>R<subscript>13high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;16:</entry>
+ <entry>B<subscript>20low</subscript></entry>
+ <entry>B<subscript>20high</subscript></entry>
+ <entry>G<subscript>21low</subscript></entry>
+ <entry>G<subscript>21high</subscript></entry>
+ <entry>B<subscript>22low</subscript></entry>
+ <entry>B<subscript>22high</subscript></entry>
+ <entry>G<subscript>23low</subscript></entry>
+ <entry>G<subscript>23high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;24:</entry>
+ <entry>G<subscript>30low</subscript></entry>
+ <entry>G<subscript>30high</subscript></entry>
+ <entry>R<subscript>31low</subscript></entry>
+ <entry>R<subscript>31high</subscript></entry>
+ <entry>G<subscript>32low</subscript></entry>
+ <entry>G<subscript>32high</subscript></entry>
+ <entry>R<subscript>33low</subscript></entry>
+ <entry>R<subscript>33high</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+</refentry>
diff --git a/Documentation/DocBook/v4l/pixfmt-srggb8.xml b/Documentation/DocBook/v4l/pixfmt-srggb8.xml
new file mode 100644
index 000000000000..2570e3be3cf1
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-srggb8.xml
@@ -0,0 +1,67 @@
+ <refentry id="V4L2-PIX-FMT-SRGGB8">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_SRGGB8 ('RGGB')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname><constant>V4L2_PIX_FMT_SRGGB8</constant></refname>
+ <refpurpose>Bayer RGB format</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>This is commonly the native format of digital cameras,
+reflecting the arrangement of sensors on the CCD device. Only one red,
+green or blue value is given for each pixel. Missing components must
+be interpolated from neighbouring pixels. From left to right the first
+row consists of a red and green value, the second row of a green and
+blue value. This scheme repeats to the right and down for every two
+columns and rows.</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_SRGGB8</constant> 4 &times; 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>R<subscript>00</subscript></entry>
+ <entry>G<subscript>01</subscript></entry>
+ <entry>R<subscript>02</subscript></entry>
+ <entry>G<subscript>03</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;4:</entry>
+ <entry>G<subscript>10</subscript></entry>
+ <entry>B<subscript>11</subscript></entry>
+ <entry>G<subscript>12</subscript></entry>
+ <entry>B<subscript>13</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;8:</entry>
+ <entry>R<subscript>20</subscript></entry>
+ <entry>G<subscript>21</subscript></entry>
+ <entry>R<subscript>22</subscript></entry>
+ <entry>G<subscript>23</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;12:</entry>
+ <entry>G<subscript>30</subscript></entry>
+ <entry>B<subscript>31</subscript></entry>
+ <entry>G<subscript>32</subscript></entry>
+ <entry>B<subscript>33</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+ </refentry>
diff --git a/Documentation/DocBook/v4l/pixfmt-y10.xml b/Documentation/DocBook/v4l/pixfmt-y10.xml
new file mode 100644
index 000000000000..d065043db8d8
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-y10.xml
@@ -0,0 +1,79 @@
+<refentry id="V4L2-PIX-FMT-Y10">
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_Y10 ('Y10 ')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname><constant>V4L2_PIX_FMT_Y10</constant></refname>
+ <refpurpose>Grey-scale image</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>This is a grey-scale image with a depth of 10 bits per pixel. Pixels
+are stored in 16-bit words with unused high bits padded with 0. The least
+significant byte is stored at lower memory addresses (little-endian).</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_Y10</constant> 4 &times; 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="9" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start&nbsp;+&nbsp;0:</entry>
+ <entry>Y'<subscript>00low</subscript></entry>
+ <entry>Y'<subscript>00high</subscript></entry>
+ <entry>Y'<subscript>01low</subscript></entry>
+ <entry>Y'<subscript>01high</subscript></entry>
+ <entry>Y'<subscript>02low</subscript></entry>
+ <entry>Y'<subscript>02high</subscript></entry>
+ <entry>Y'<subscript>03low</subscript></entry>
+ <entry>Y'<subscript>03high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;8:</entry>
+ <entry>Y'<subscript>10low</subscript></entry>
+ <entry>Y'<subscript>10high</subscript></entry>
+ <entry>Y'<subscript>11low</subscript></entry>
+ <entry>Y'<subscript>11high</subscript></entry>
+ <entry>Y'<subscript>12low</subscript></entry>
+ <entry>Y'<subscript>12high</subscript></entry>
+ <entry>Y'<subscript>13low</subscript></entry>
+ <entry>Y'<subscript>13high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;16:</entry>
+ <entry>Y'<subscript>20low</subscript></entry>
+ <entry>Y'<subscript>20high</subscript></entry>
+ <entry>Y'<subscript>21low</subscript></entry>
+ <entry>Y'<subscript>21high</subscript></entry>
+ <entry>Y'<subscript>22low</subscript></entry>
+ <entry>Y'<subscript>22high</subscript></entry>
+ <entry>Y'<subscript>23low</subscript></entry>
+ <entry>Y'<subscript>23high</subscript></entry>
+ </row>
+ <row>
+ <entry>start&nbsp;+&nbsp;24:</entry>
+ <entry>Y'<subscript>30low</subscript></entry>
+ <entry>Y'<subscript>30high</subscript></entry>
+ <entry>Y'<subscript>31low</subscript></entry>
+ <entry>Y'<subscript>31high</subscript></entry>
+ <entry>Y'<subscript>32low</subscript></entry>
+ <entry>Y'<subscript>32high</subscript></entry>
+ <entry>Y'<subscript>33low</subscript></entry>
+ <entry>Y'<subscript>33high</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+</refentry>
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index c4ad0a8e42dc..d7c467187095 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -566,7 +566,9 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
&sub-sbggr8;
&sub-sgbrg8;
&sub-sgrbg8;
+ &sub-srggb8;
&sub-sbggr16;
+ &sub-srggb10;
</section>
<section id="yuv-formats">
@@ -589,6 +591,7 @@ information.</para>
&sub-packed-yuv;
&sub-grey;
+ &sub-y10;
&sub-y16;
&sub-yuyv;
&sub-uyvy;
@@ -685,6 +688,11 @@ http://www.ivtvdriver.org/</ulink></para><para>The format is documented in the
kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm12</filename>
</para></entry>
</row>
+ <row id="V4L2-PIX-FMT-CPIA1">
+ <entry><constant>V4L2_PIX_FMT_CPIA1</constant></entry>
+ <entry>'CPIA'</entry>
+ <entry>YUV format used by the gspca cpia1 driver.</entry>
+ </row>
<row id="V4L2-PIX-FMT-SPCA501">
<entry><constant>V4L2_PIX_FMT_SPCA501</constant></entry>
<entry>'S501'</entry>
@@ -705,11 +713,6 @@ kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm
<entry>'S561'</entry>
<entry>Compressed GBRG Bayer format used by the gspca driver.</entry>
</row>
- <row id="V4L2-PIX-FMT-SGRBG10">
- <entry><constant>V4L2_PIX_FMT_SGRBG10</constant></entry>
- <entry>'DA10'</entry>
- <entry>10 bit raw Bayer, expanded to 16 bits.</entry>
- </row>
<row id="V4L2-PIX-FMT-SGRBG10DPCM8">
<entry><constant>V4L2_PIX_FMT_SGRBG10DPCM8</constant></entry>
<entry>'DB10'</entry>
@@ -770,6 +773,11 @@ kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm
<entry>'S920'</entry>
<entry>YUV 4:2:0 format of the gspca sn9c20x driver.</entry>
</row>
+ <row id="V4L2-PIX-FMT-SN9C2028">
+ <entry><constant>V4L2_PIX_FMT_SN9C2028</constant></entry>
+ <entry>'SONX'</entry>
+ <entry>Compressed GBRG bayer format of the gspca sn9c2028 driver.</entry>
+ </row>
<row id="V4L2-PIX-FMT-STV0680">
<entry><constant>V4L2_PIX_FMT_STV0680</constant></entry>
<entry>'S680'</entry>
@@ -787,6 +795,20 @@ http://www.thedirks.org/winnov/</ulink></para></entry>
<entry>'TM60'</entry>
<entry><para>Used by Trident tm6000</para></entry>
</row>
+ <row id="V4L2-PIX-FMT-CIT-YYVYUY">
+ <entry><constant>V4L2_PIX_FMT_CIT_YYVYUY</constant></entry>
+ <entry>'CITV'</entry>
+ <entry><para>Used by xirlink CIT, found at IBM webcams.</para>
+ <para>Uses one line of Y then 1 line of VYUY</para>
+ </entry>
+ </row>
+ <row id="V4L2-PIX-FMT-KONICA420">
+ <entry><constant>V4L2_PIX_FMT_KONICA420</constant></entry>
+ <entry>'KONI'</entry>
+ <entry><para>Used by Konica webcams.</para>
+ <para>YUV420 planar in blocks of 256 pixels.</para>
+ </entry>
+ </row>
<row id="V4L2-PIX-FMT-YYUV">
<entry><constant>V4L2_PIX_FMT_YYUV</constant></entry>
<entry>'YYUV'</entry>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 7c3c098d5d08..839e93e875ae 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -99,6 +99,7 @@ Remote Controller chapter.</contrib>
<year>2007</year>
<year>2008</year>
<year>2009</year>
+ <year>2010</year>
<holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
</copyright>
@@ -110,10 +111,17 @@ Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
<!-- Put document revisions here, newest first. -->
<!-- API revisions (changes and additions of defines, enums,
structs, ioctls) must be noted in more detail in the history chapter
-(compat.sgml), along with the possible impact on existing drivers and
+(compat.xml), along with the possible impact on existing drivers and
applications. -->
<revision>
+ <revnumber>2.6.37</revnumber>
+ <date>2010-08-06</date>
+ <authorinitials>hv</authorinitials>
+ <revremark>Removed obsolete vtx (videotext) API.</revremark>
+ </revision>
+
+ <revision>
<revnumber>2.6.33</revnumber>
<date>2009-12-03</date>
<authorinitials>mk</authorinitials>
diff --git a/Documentation/DocBook/v4l/videodev2.h.xml b/Documentation/DocBook/v4l/videodev2.h.xml
index 865b06d9e679..325b23b6964c 100644
--- a/Documentation/DocBook/v4l/videodev2.h.xml
+++ b/Documentation/DocBook/v4l/videodev2.h.xml
@@ -154,23 +154,13 @@ enum <link linkend="v4l2-buf-type">v4l2_buf_type</link> {
V4L2_BUF_TYPE_VBI_OUTPUT = 5,
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
-#if 1 /*KEEP*/
+#if 1
/* Experimental */
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
#endif
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
-enum <link linkend="v4l2-ctrl-type">v4l2_ctrl_type</link> {
- V4L2_CTRL_TYPE_INTEGER = 1,
- V4L2_CTRL_TYPE_BOOLEAN = 2,
- V4L2_CTRL_TYPE_MENU = 3,
- V4L2_CTRL_TYPE_BUTTON = 4,
- V4L2_CTRL_TYPE_INTEGER64 = 5,
- V4L2_CTRL_TYPE_CTRL_CLASS = 6,
- V4L2_CTRL_TYPE_STRING = 7,
-};
-
enum <link linkend="v4l2-tuner-type">v4l2_tuner_type</link> {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
@@ -288,6 +278,7 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-RGB565">V4L2_PIX_FMT_RGB565</link> v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define <link linkend="V4L2-PIX-FMT-RGB555X">V4L2_PIX_FMT_RGB555X</link> v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
#define <link linkend="V4L2-PIX-FMT-RGB565X">V4L2_PIX_FMT_RGB565X</link> v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
+#define <link linkend="V4L2-PIX-FMT-BGR666">V4L2_PIX_FMT_BGR666</link> v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define <link linkend="V4L2-PIX-FMT-BGR24">V4L2_PIX_FMT_BGR24</link> v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
#define <link linkend="V4L2-PIX-FMT-RGB24">V4L2_PIX_FMT_RGB24</link> v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
#define <link linkend="V4L2-PIX-FMT-BGR32">V4L2_PIX_FMT_BGR32</link> v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
@@ -295,6 +286,9 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
/* Grey formats */
#define <link linkend="V4L2-PIX-FMT-GREY">V4L2_PIX_FMT_GREY</link> v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define <link linkend="V4L2-PIX-FMT-Y4">V4L2_PIX_FMT_Y4</link> v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
+#define <link linkend="V4L2-PIX-FMT-Y6">V4L2_PIX_FMT_Y6</link> v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
+#define <link linkend="V4L2-PIX-FMT-Y10">V4L2_PIX_FMT_Y10</link> v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define <link linkend="V4L2-PIX-FMT-Y16">V4L2_PIX_FMT_Y16</link> v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
/* Palette formats */
@@ -330,7 +324,11 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-SBGGR8">V4L2_PIX_FMT_SBGGR8</link> v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define <link linkend="V4L2-PIX-FMT-SGBRG8">V4L2_PIX_FMT_SGBRG8</link> v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define <link linkend="V4L2-PIX-FMT-SGRBG8">V4L2_PIX_FMT_SGRBG8</link> v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
-#define <link linkend="V4L2-PIX-FMT-SGRBG10">V4L2_PIX_FMT_SGRBG10</link> v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */
+#define <link linkend="V4L2-PIX-FMT-SRGGB8">V4L2_PIX_FMT_SRGGB8</link> v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
+#define <link linkend="V4L2-PIX-FMT-SBGGR10">V4L2_PIX_FMT_SBGGR10</link> v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
+#define <link linkend="V4L2-PIX-FMT-SGBRG10">V4L2_PIX_FMT_SGBRG10</link> v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
+#define <link linkend="V4L2-PIX-FMT-SGRBG10">V4L2_PIX_FMT_SGRBG10</link> v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
+#define <link linkend="V4L2-PIX-FMT-SRGGB10">V4L2_PIX_FMT_SRGGB10</link> v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
/* 10bit raw bayer DPCM compressed to 8 bits */
#define <link linkend="V4L2-PIX-FMT-SGRBG10DPCM8">V4L2_PIX_FMT_SGRBG10DPCM8</link> v4l2_fourcc('B', 'D', '1', '0')
/*
@@ -346,6 +344,7 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-MPEG">V4L2_PIX_FMT_MPEG</link> v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */
/* Vendor-specific formats */
+#define <link linkend="V4L2-PIX-FMT-CPIA1">V4L2_PIX_FMT_CPIA1</link> v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
#define <link linkend="V4L2-PIX-FMT-WNVA">V4L2_PIX_FMT_WNVA</link> v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
#define <link linkend="V4L2-PIX-FMT-SN9C10X">V4L2_PIX_FMT_SN9C10X</link> v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
#define <link linkend="V4L2-PIX-FMT-SN9C20X-I420">V4L2_PIX_FMT_SN9C20X_I420</link> v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
@@ -358,12 +357,15 @@ struct <link linkend="v4l2-pix-format">v4l2_pix_format</link> {
#define <link linkend="V4L2-PIX-FMT-SPCA561">V4L2_PIX_FMT_SPCA561</link> v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
#define <link linkend="V4L2-PIX-FMT-PAC207">V4L2_PIX_FMT_PAC207</link> v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
#define <link linkend="V4L2-PIX-FMT-MR97310A">V4L2_PIX_FMT_MR97310A</link> v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
+#define <link linkend="V4L2-PIX-FMT-SN9C2028">V4L2_PIX_FMT_SN9C2028</link> v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */
#define <link linkend="V4L2-PIX-FMT-SQ905C">V4L2_PIX_FMT_SQ905C</link> v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
#define <link linkend="V4L2-PIX-FMT-PJPG">V4L2_PIX_FMT_PJPG</link> v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
#define <link linkend="V4L2-PIX-FMT-OV511">V4L2_PIX_FMT_OV511</link> v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define <link linkend="V4L2-PIX-FMT-OV518">V4L2_PIX_FMT_OV518</link> v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
-#define <link linkend="V4L2-PIX-FMT-TM6000">V4L2_PIX_FMT_TM6000</link> v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
#define <link linkend="V4L2-PIX-FMT-STV0680">V4L2_PIX_FMT_STV0680</link> v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
+#define <link linkend="V4L2-PIX-FMT-TM6000">V4L2_PIX_FMT_TM6000</link> v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
+#define <link linkend="V4L2-PIX-FMT-CIT-YYVYUY">V4L2_PIX_FMT_CIT_YYVYUY</link> v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */
+#define <link linkend="V4L2-PIX-FMT-KONICA420">V4L2_PIX_FMT_KONICA420</link> v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */
/*
* F O R M A T E N U M E R A T I O N
@@ -380,7 +382,7 @@ struct <link linkend="v4l2-fmtdesc">v4l2_fmtdesc</link> {
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
-#if 1 /*KEEP*/
+#if 1
/* Experimental Frame Size and frame rate enumeration */
/*
* F R A M E S I Z E E N U M E R A T I O N
@@ -544,6 +546,8 @@ struct <link linkend="v4l2-buffer">v4l2_buffer</link> {
#define V4L2_BUF_FLAG_KEYFRAME 0x0008 /* Image is a keyframe (I-frame) */
#define V4L2_BUF_FLAG_PFRAME 0x0010 /* Image is a P-frame */
#define V4L2_BUF_FLAG_BFRAME 0x0020 /* Image is a B-frame */
+/* Buffer is ready, but the data contained within is corrupted. */
+#define V4L2_BUF_FLAG_ERROR 0x0040
#define V4L2_BUF_FLAG_TIMECODE 0x0100 /* timecode field is valid */
#define V4L2_BUF_FLAG_INPUT 0x0200 /* input field is valid */
@@ -934,6 +938,16 @@ struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link> {
#define V4L2_CTRL_ID2CLASS(id) ((id) &amp; 0x0fff0000UL)
#define V4L2_CTRL_DRIVER_PRIV(id) (((id) &amp; 0xffff) &gt;= 0x1000)
+enum <link linkend="v4l2-ctrl-type">v4l2_ctrl_type</link> {
+ V4L2_CTRL_TYPE_INTEGER = 1,
+ V4L2_CTRL_TYPE_BOOLEAN = 2,
+ V4L2_CTRL_TYPE_MENU = 3,
+ V4L2_CTRL_TYPE_BUTTON = 4,
+ V4L2_CTRL_TYPE_INTEGER64 = 5,
+ V4L2_CTRL_TYPE_CTRL_CLASS = 6,
+ V4L2_CTRL_TYPE_STRING = 7,
+};
+
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
struct <link linkend="v4l2-queryctrl">v4l2_queryctrl</link> {
__u32 id;
@@ -1018,21 +1032,27 @@ enum <link linkend="v4l2-colorfx">v4l2_colorfx</link> {
V4L2_COLORFX_NONE = 0,
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
- V4L2_COLORFX_NEGATIVE = 3,
- V4L2_COLORFX_EMBOSS = 4,
- V4L2_COLORFX_SKETCH = 5,
- V4L2_COLORFX_SKY_BLUE = 6,
+ V4L2_COLORFX_NEGATIVE = 3,
+ V4L2_COLORFX_EMBOSS = 4,
+ V4L2_COLORFX_SKETCH = 5,
+ V4L2_COLORFX_SKY_BLUE = 6,
V4L2_COLORFX_GRASS_GREEN = 7,
V4L2_COLORFX_SKIN_WHITEN = 8,
- V4L2_COLORFX_VIVID = 9.
+ V4L2_COLORFX_VIVID = 9,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
#define V4L2_CID_ROTATE (V4L2_CID_BASE+34)
#define V4L2_CID_BG_COLOR (V4L2_CID_BASE+35)
+
+#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36)
+
+#define V4L2_CID_ILLUMINATORS_1 (V4L2_CID_BASE+37)
+#define V4L2_CID_ILLUMINATORS_2 (V4L2_CID_BASE+38)
+
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+36)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+39)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
@@ -1349,6 +1369,8 @@ struct <link linkend="v4l2-modulator">v4l2_modulator</link> {
#define V4L2_TUNER_CAP_SAP 0x0020
#define V4L2_TUNER_CAP_LANG1 0x0040
#define V4L2_TUNER_CAP_RDS 0x0080
+#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100
+#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200
/* Flags for the 'rxsubchans' field */
#define V4L2_TUNER_SUB_MONO 0x0001
@@ -1378,7 +1400,8 @@ struct <link linkend="v4l2-hw-freq-seek">v4l2_hw_freq_seek</link> {
enum <link linkend="v4l2-tuner-type">v4l2_tuner_type</link> type;
__u32 seek_upward;
__u32 wrap_around;
- __u32 reserved[8];
+ __u32 spacing;
+ __u32 reserved[7];
};
/*
@@ -1433,7 +1456,7 @@ struct <link linkend="v4l2-audioout">v4l2_audioout</link> {
*
* NOTE: EXPERIMENTAL API
*/
-#if 1 /*KEEP*/
+#if 1
#define V4L2_ENC_IDX_FRAME_I (0)
#define V4L2_ENC_IDX_FRAME_P (1)
#define V4L2_ENC_IDX_FRAME_B (2)
@@ -1626,6 +1649,38 @@ struct <link linkend="v4l2-streamparm">v4l2_streamparm</link> {
};
/*
+ * E V E N T S
+ */
+
+#define V4L2_EVENT_ALL 0
+#define V4L2_EVENT_VSYNC 1
+#define V4L2_EVENT_EOS 2
+#define V4L2_EVENT_PRIVATE_START 0x08000000
+
+/* Payload for V4L2_EVENT_VSYNC */
+struct <link linkend="v4l2-event-vsync">v4l2_event_vsync</link> {
+ /* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */
+ __u8 field;
+} __attribute__ ((packed));
+
+struct <link linkend="v4l2-event">v4l2_event</link> {
+ __u32 type;
+ union {
+ struct <link linkend="v4l2-event-vsync">v4l2_event_vsync</link> vsync;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct timespec timestamp;
+ __u32 reserved[9];
+};
+
+struct <link linkend="v4l2-event-subscription">v4l2_event_subscription</link> {
+ __u32 type;
+ __u32 reserved[7];
+};
+
+/*
* A D V A N C E D D E B U G G I N G
*
* NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS!
@@ -1720,7 +1775,7 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link>)
#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link>)
#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct <link linkend="v4l2-ext-controls">v4l2_ext_controls</link>)
-#if 1 /*KEEP*/
+#if 1
#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct <link linkend="v4l2-frmsizeenum">v4l2_frmsizeenum</link>)
#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct <link linkend="v4l2-frmivalenum">v4l2_frmivalenum</link>)
#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct <link linkend="v4l2-enc-idx">v4l2_enc_idx</link>)
@@ -1728,7 +1783,7 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct <link linkend="v4l2-encoder-cmd">v4l2_encoder_cmd</link>)
#endif
-#if 1 /*KEEP*/
+#if 1
/* Experimental, meant for debugging, testing and internal use.
Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
You must be root to use these ioctls. Never use these in applications! */
@@ -1747,6 +1802,9 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
+#define VIDIOC_DQEVENT _IOR('V', 89, struct <link linkend="v4l2-event">v4l2_event</link>)
+#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct <link linkend="v4l2-event-subscription">v4l2_event_subscription</link>)
+#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct <link linkend="v4l2-event-subscription">v4l2_event_subscription</link>)
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
index 3c6784e132f3..d733721a7519 100644
--- a/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
@@ -16,8 +16,7 @@
<funcdef>int <function>ioctl</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>&v4l2-dv-preset;
-*<parameter>argp</parameter></paramdef>
+ <paramdef>struct v4l2_dv_preset *<parameter>argp</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
index ecc19576bb8f..d5ec6abf0ce2 100644
--- a/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
@@ -16,8 +16,7 @@
<funcdef>int <function>ioctl</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>&v4l2-dv-timings;
-*<parameter>argp</parameter></paramdef>
+ <paramdef>struct v4l2_dv_timings *<parameter>argp</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
diff --git a/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
index 402229ee06f6..d272f7ab91b8 100644
--- a/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
+++ b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
@@ -16,7 +16,7 @@ input</refpurpose>
<funcdef>int <function>ioctl</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>&v4l2-dv-preset; *<parameter>argp</parameter></paramdef>
+ <paramdef>struct v4l2_dv_preset *<parameter>argp</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
diff --git a/Documentation/DocBook/v4l/vidioc-querycap.xml b/Documentation/DocBook/v4l/vidioc-querycap.xml
index 6ab7e25b31b6..d499da93a450 100644
--- a/Documentation/DocBook/v4l/vidioc-querycap.xml
+++ b/Documentation/DocBook/v4l/vidioc-querycap.xml
@@ -184,7 +184,7 @@ data.</entry>
<row>
<entry><constant>V4L2_CAP_RDS_CAPTURE</constant></entry>
<entry>0x00000100</entry>
- <entry>The device supports the <link linkend="rds">RDS</link> interface.</entry>
+ <entry>The device supports the <link linkend="rds">RDS</link> capture interface.</entry>
</row>
<row>
<entry><constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant></entry>
@@ -206,6 +206,11 @@ driver capabilities.</para></footnote></entry>
hardware frequency seeking.</entry>
</row>
<row>
+ <entry><constant>V4L2_CAP_RDS_OUTPUT</constant></entry>
+ <entry>0x00000800</entry>
+ <entry>The device supports the <link linkend="rds">RDS</link> output interface.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CAP_TUNER</constant></entry>
<entry>0x00010000</entry>
<entry>The device has some sort of tuner to
diff --git a/Documentation/DocBook/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
index 8e0e055ac934..0d5e8283cf32 100644
--- a/Documentation/DocBook/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
@@ -103,8 +103,12 @@ structure. The driver fills the rest of the structure or returns an
<structfield>index</structfield> is invalid. Menu items are enumerated
by calling <constant>VIDIOC_QUERYMENU</constant> with successive
<structfield>index</structfield> values from &v4l2-queryctrl;
-<structfield>minimum</structfield> (0) to
-<structfield>maximum</structfield>, inclusive.</para>
+<structfield>minimum</structfield> to
+<structfield>maximum</structfield>, inclusive. Note that it is possible
+for <constant>VIDIOC_QUERYMENU</constant> to return an &EINVAL; for some
+indices between <structfield>minimum</structfield> and <structfield>maximum</structfield>.
+In that case that particular menu item is not supported by this driver. Also note that
+the <structfield>minimum</structfield> value is not necessarily 0.</para>
<para>See also the examples in <xref linkend="control" />.</para>
@@ -139,7 +143,7 @@ string. This information is intended for the user.</entry>
<entry><structfield>minimum</structfield></entry>
<entry>Minimum value, inclusive. This field gives a lower
bound for <constant>V4L2_CTRL_TYPE_INTEGER</constant> controls and the
-lowest valid index (always 0) for <constant>V4L2_CTRL_TYPE_MENU</constant> controls.
+lowest valid index for <constant>V4L2_CTRL_TYPE_MENU</constant> controls.
For <constant>V4L2_CTRL_TYPE_STRING</constant> controls the minimum value
gives the minimum length of the string. This length <emphasis>does not include the terminating
zero</emphasis>. It may not be valid for any other type of control, including
@@ -279,7 +283,7 @@ values which are actually different on the hardware.</entry>
</row>
<row>
<entry><constant>V4L2_CTRL_TYPE_MENU</constant></entry>
- <entry>0</entry>
+ <entry>&ge; 0</entry>
<entry>1</entry>
<entry>N-1</entry>
<entry>The control has a menu of N choices. The names of
@@ -405,8 +409,10 @@ writing a value will cause the device to carry out a given action
<term><errorcode>EINVAL</errorcode></term>
<listitem>
<para>The &v4l2-queryctrl; <structfield>id</structfield>
-is invalid. The &v4l2-querymenu; <structfield>id</structfield> or
-<structfield>index</structfield> is invalid.</para>
+is invalid. The &v4l2-querymenu; <structfield>id</structfield> is
+invalid or <structfield>index</structfield> is out of range (less than
+<structfield>minimum</structfield> or greater than <structfield>maximum</structfield>)
+or this particular menu item is not supported by the driver.</para>
</listitem>
</varlistentry>
<varlistentry>
diff --git a/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml b/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml
index 14b3ec7ed75b..c30dcc4232c0 100644
--- a/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml
+++ b/Documentation/DocBook/v4l/vidioc-s-hw-freq-seek.xml
@@ -51,7 +51,8 @@
<para>Start a hardware frequency seek from the current frequency.
To do this applications initialize the <structfield>tuner</structfield>,
-<structfield>type</structfield>, <structfield>seek_upward</structfield> and
+<structfield>type</structfield>, <structfield>seek_upward</structfield>,
+<structfield>spacing</structfield> and
<structfield>wrap_around</structfield> fields, and zero out the
<structfield>reserved</structfield> array of a &v4l2-hw-freq-seek; and
call the <constant>VIDIOC_S_HW_FREQ_SEEK</constant> ioctl with a pointer
@@ -89,7 +90,12 @@ field and the &v4l2-tuner; <structfield>index</structfield> field.</entry>
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
+ <entry><structfield>spacing</structfield></entry>
+ <entry>If non-zero, defines the hardware seek resolution in Hz. The driver selects the nearest value that is supported by the device. If spacing is zero a reasonable default value is used.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[7]</entry>
<entry>Reserved for future extensions. Drivers and
applications must set the array to zero.</entry>
</row>
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index 6e25c2659e0a..a2976a6de033 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -21,6 +21,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
+#include <sys/wait.h>
#include <signal.h>
#include <linux/genetlink.h>
@@ -266,11 +267,13 @@ int main(int argc, char *argv[])
int containerset = 0;
char containerpath[1024];
int cfd = 0;
+ int forking = 0;
+ sigset_t sigset;
struct msgtemplate msg;
- while (1) {
- c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:");
+ while (!forking) {
+ c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:");
if (c < 0)
break;
@@ -319,6 +322,28 @@ int main(int argc, char *argv[])
err(1, "Invalid pid\n");
cmd_type = TASKSTATS_CMD_ATTR_PID;
break;
+ case 'c':
+
+ /* Block SIGCHLD for sigwait() later */
+ if (sigemptyset(&sigset) == -1)
+ err(1, "Failed to empty sigset");
+ if (sigaddset(&sigset, SIGCHLD))
+ err(1, "Failed to set sigchld in sigset");
+ sigprocmask(SIG_BLOCK, &sigset, NULL);
+
+ /* fork/exec a child */
+ tid = fork();
+ if (tid < 0)
+ err(1, "Fork failed\n");
+ if (tid == 0)
+ if (execvp(argv[optind - 1],
+ &argv[optind - 1]) < 0)
+ exit(-1);
+
+ /* Set the command type and avoid further processing */
+ cmd_type = TASKSTATS_CMD_ATTR_PID;
+ forking = 1;
+ break;
case 'v':
printf("debug on\n");
dbg = 1;
@@ -370,6 +395,15 @@ int main(int argc, char *argv[])
goto err;
}
+ /*
+ * If we forked a child, wait for it to exit. Cannot use waitpid()
+ * as all the delicious data would be reaped as part of the wait
+ */
+ if (tid && forking) {
+ int sig_received;
+ sigwait(&sigset, &sig_received);
+ }
+
if (tid) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
cmd_type, &tid, sizeof(__u32));
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index b34823ff1646..190018b0c649 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -18,7 +18,8 @@ CONTENTS:
1.2 Why are cgroups needed ?
1.3 How are cgroups implemented ?
1.4 What does notify_on_release do ?
- 1.5 How do I use cgroups ?
+ 1.5 What does clone_children do ?
+ 1.6 How do I use cgroups ?
2. Usage Examples and Syntax
2.1 Basic Usage
2.2 Attaching processes
@@ -293,7 +294,16 @@ notify_on_release in the root cgroup at system boot is disabled
value of their parents notify_on_release setting. The default value of
a cgroup hierarchy's release_agent path is empty.
-1.5 How do I use cgroups ?
+1.5 What does clone_children do ?
+---------------------------------
+
+If the clone_children flag is enabled (1) in a cgroup, then all
+cgroups created beneath will call the post_clone callbacks for each
+subsystem of the newly created cgroup. Usually when this callback is
+implemented for a subsystem, it copies the values of the parent
+subsystem, this is the case for the cpuset.
+
+1.6 How do I use cgroups ?
--------------------------
To start a new job that is to be contained within a cgroup, using
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index c58abf1ccc71..eccffe715229 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -1496,9 +1496,6 @@ Your cooperation is appreciated.
64 = /dev/radio0 Radio device
...
127 = /dev/radio63 Radio device
- 192 = /dev/vtx0 Teletext device
- ...
- 223 = /dev/vtx31 Teletext device
224 = /dev/vbi0 Vertical blank interrupt
...
255 = /dev/vbi31 Vertical blank interrupt
@@ -2520,6 +2517,12 @@ Your cooperation is appreciated.
8 = /dev/mmcblk1 Second SD/MMC card
...
+ The start of next SD/MMC card can be configured with
+ CONFIG_MMC_BLOCK_MINORS, or overridden at boot/modprobe
+ time using the mmcblk.perdev_minors option. That would
+ bump the offset between each card to be the configured
+ value instead of the default 8.
+
179 char CCube DVXChip-based PCI products
0 = /dev/dvxirq0 First DVX device
1 = /dev/dvxirq1 Second DVX device
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index 350959f4e41b..59690de8ebfe 100644
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -26,7 +26,8 @@ use IO::Handle;
"dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
"or51211", "or51132_qam", "or51132_vsb", "bluebird",
"opera1", "cx231xx", "cx18", "cx23885", "pvrusb2", "mpc718",
- "af9015", "ngene", "az6027");
+ "af9015", "ngene", "az6027", "lme2510_lg", "lme2510c_s7395",
+ "lme2510c_s7395_old");
# Check args
syntax() if (scalar(@ARGV) != 1);
@@ -584,6 +585,49 @@ sub az6027{
$firmware;
}
+
+sub lme2510_lg {
+ my $sourcefile = "LMEBDA_DVBS.sys";
+ my $hash = "fc6017ad01e79890a97ec53bea157ed2";
+ my $outfile = "dvb-usb-lme2510-lg.fw";
+ my $hasho = "caa065d5fdbd2c09ad57b399bbf55cad";
+
+ checkstandard();
+
+ verify($sourcefile, $hash);
+ extract($sourcefile, 4168, 3841, $outfile);
+ verify($outfile, $hasho);
+ $outfile;
+}
+
+sub lme2510c_s7395 {
+ my $sourcefile = "US2A0D.sys";
+ my $hash = "b0155a8083fb822a3bd47bc360e74601";
+ my $outfile = "dvb-usb-lme2510c-s7395.fw";
+ my $hasho = "3a3cf1aeebd17b6ddc04cebe131e94cf";
+
+ checkstandard();
+
+ verify($sourcefile, $hash);
+ extract($sourcefile, 37248, 3720, $outfile);
+ verify($outfile, $hasho);
+ $outfile;
+}
+
+sub lme2510c_s7395_old {
+ my $sourcefile = "LMEBDA_DVBS7395C.sys";
+ my $hash = "7572ae0eb9cdf91baabd7c0ba9e09b31";
+ my $outfile = "dvb-usb-lme2510c-s7395.fw";
+ my $hasho = "90430c5b435eb5c6f88fd44a9d950674";
+
+ checkstandard();
+
+ verify($sourcefile, $hash);
+ extract($sourcefile, 4208, 3881, $outfile);
+ verify($outfile, $hasho);
+ $outfile;
+}
+
# ---------------------------------------------------------------
# Utilities
diff --git a/Documentation/dvb/lmedm04.txt b/Documentation/dvb/lmedm04.txt
new file mode 100644
index 000000000000..e175784b89bf
--- /dev/null
+++ b/Documentation/dvb/lmedm04.txt
@@ -0,0 +1,58 @@
+To extract firmware for the DM04/QQBOX you need to copy the
+following file(s) to this directory.
+
+for DM04+/QQBOX LME2510C (Sharp 7395 Tuner)
+-------------------------------------------
+
+The Sharp 7395 driver can be found in windows/system32/driver
+
+US2A0D.sys (dated 17 Mar 2009)
+
+
+and run
+./get_dvb_firmware lme2510c_s7395
+
+ will produce
+ dvb-usb-lme2510c-s7395.fw
+
+An alternative but older firmware can be found on the driver
+disk DVB-S_EN_3.5A in BDADriver/driver
+
+LMEBDA_DVBS7395C.sys (dated 18 Jan 2008)
+
+and run
+./get_dvb_firmware lme2510c_s7395_old
+
+ will produce
+ dvb-usb-lme2510c-s7395.fw
+
+--------------------------------------------------------------------
+
+The LG firmware can be found on the driver
+disk DM04+_5.1A[LG] in BDADriver/driver
+
+for DM04 LME2510 (LG Tuner)
+---------------------------
+
+LMEBDA_DVBS.sys (dated 13 Nov 2007)
+
+and run
+./get_dvb_firmware lme2510_lg
+
+ will produce
+ dvb-usb-lme2510-lg.fw
+
+
+Other LG firmware can be extracted manually from US280D.sys
+only found in windows/system32/driver.
+
+dd if=US280D.sys ibs=1 skip=42616 count=3668 of=dvb-usb-lme2510-lg.fw
+
+for DM04 LME2510C (LG Tuner)
+---------------------------
+
+dd if=US280D.sys ibs=1 skip=35200 count=3850 of=dvb-usb-lme2510c-lg.fw
+
+---------------------------------------------------------------------
+
+Copy the firmware file(s) to /lib/firmware
diff --git a/Documentation/fb/viafb.txt b/Documentation/fb/viafb.txt
index f3e046a6a987..1a2e8aa3fbb1 100644
--- a/Documentation/fb/viafb.txt
+++ b/Documentation/fb/viafb.txt
@@ -197,6 +197,54 @@ Notes:
example,
# fbset -depth 16
+
+[Configure viafb via /proc]
+---------------------------
+ The following files exist in /proc/viafb
+
+ supported_output_devices
+
+ This read-only file contains a full ',' seperated list containing all
+ output devices that could be available on your platform. It is likely
+ that not all of those have a connector on your hardware but it should
+ provide a good starting point to figure out which of those names match
+ a real connector.
+ Example:
+ # cat /proc/viafb/supported_output_devices
+
+ iga1/output_devices
+ iga2/output_devices
+
+ These two files are readable and writable. iga1 and iga2 are the two
+ independent units that produce the screen image. Those images can be
+ forwarded to one or more output devices. Reading those files is a way
+ to query which output devices are currently used by an iga.
+ Example:
+ # cat /proc/viafb/iga1/output_devices
+ If there are no output devices printed the output of this iga is lost.
+ This can happen for example if only one (the other) iga is used.
+ Writing to these files allows adjusting the output devices during
+ runtime. One can add new devices, remove existing ones or switch
+ between igas. Essentially you can write a ',' seperated list of device
+ names (or a single one) in the same format as the output to those
+ files. You can add a '+' or '-' as a prefix allowing simple addition
+ and removal of devices. So a prefix '+' adds the devices from your list
+ to the already existing ones, '-' removes the listed devices from the
+ existing ones and if no prefix is given it replaces all existing ones
+ with the listed ones. If you remove devices they are expected to turn
+ off. If you add devices that are already part of the other iga they are
+ removed there and added to the new one.
+ Examples:
+ Add CRT as output device to iga1
+ # echo +CRT > /proc/viafb/iga1/output_devices
+
+ Remove (turn off) DVP1 and LVDS1 as output devices of iga2
+ # echo -DVP1,LVDS1 > /proc/viafb/iga2/output_devices
+
+ Replace all iga1 output devices by CRT
+ # echo CRT > /proc/viafb/iga1/output_devices
+
+
[Bootup with viafb]:
--------------------
Add the following line to your grub.conf:
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index e833c8c81e69..d8f36f984faa 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -98,7 +98,7 @@ Who: Pavel Machek <pavel@ucw.cz>
---------------------------
What: Video4Linux API 1 ioctls and from Video devices.
-When: July 2009
+When: kernel 2.6.38
Files: include/linux/videodev.h
Check: include/linux/videodev.h
Why: V4L1 AP1 was replaced by V4L2 API during migration from 2.4 to 2.6
@@ -116,6 +116,21 @@ Who: Mauro Carvalho Chehab <mchehab@infradead.org>
---------------------------
+What: Video4Linux obsolete drivers using V4L1 API
+When: kernel 2.6.38
+Files: drivers/staging/cpia/* drivers/staging/stradis/*
+Check: drivers/staging/cpia/cpia.c drivers/staging/stradis/stradis.c
+Why: There are some drivers still using V4L1 API, despite all efforts we've done
+ to migrate. Those drivers are for obsolete hardware that the old maintainer
+ didn't care (or not have the hardware anymore), and that no other developer
+ could find any hardware to buy. They probably have no practical usage today,
+ and people with such old hardware could probably keep using an older version
+ of the kernel. Those drivers will be moved to staging on 2.6.37 and, if nobody
+ care enough to port and test them with V4L2 API, they'll be removed on 2.6.38.
+Who: Mauro Carvalho Chehab <mchehab@infradead.org>
+
+---------------------------
+
What: sys_sysctl
When: September 2010
Option: CONFIG_SYSCTL_SYSCALL
@@ -470,29 +485,6 @@ When: April 2011
Why: Superseded by xt_CT
Who: Netfilter developer team <netfilter-devel@vger.kernel.org>
----------------------------
-
-What: video4linux /dev/vtx teletext API support
-When: 2.6.35
-Files: drivers/media/video/saa5246a.c drivers/media/video/saa5249.c
- include/linux/videotext.h
-Why: The vtx device nodes have been superseded by vbi device nodes
- for many years. No applications exist that use the vtx support.
- Of the two i2c drivers that actually support this API the saa5249
- has been impossible to use for a year now and no known hardware
- that supports this device exists. The saa5246a is theoretically
- supported by the old mxb boards, but it never actually worked.
-
- In summary: there is no hardware that can use this API and there
- are no applications actually implementing this API.
-
- The vtx support still reserves minors 192-223 and we would really
- like to reuse those for upcoming new functionality. In the unlikely
- event that new hardware appears that wants to use the functionality
- provided by the vtx API, then that functionality should be build
- around the sliced VBI API instead.
-Who: Hans Verkuil <hverkuil@xs4all.nl>
-
----------------------------
What: IRQF_DISABLED
@@ -526,6 +518,23 @@ Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
----------------------------
+What: namespace cgroup (ns_cgroup)
+When: 2.6.38
+Why: The ns_cgroup leads to some problems:
+ * cgroup creation is out-of-control
+ * cgroup name can conflict when pids are looping
+ * it is not possible to have a single process handling
+ a lot of namespaces without falling in a exponential creation time
+ * we may want to create a namespace without creating a cgroup
+
+ The ns_cgroup is replaced by a compatibility flag 'clone_children',
+ where a newly created cgroup will copy the parent cgroup values.
+ The userspace has to manually create a cgroup and add a task to
+ the 'tasks' file.
+Who: Daniel Lezcano <daniel.lezcano@free.fr>
+
+----------------------------
+
What: iwlwifi disable_hw_scan module parameters
When: 2.6.40
Why: Hareware scan is the prefer method for iwlwifi devices for
@@ -535,3 +544,13 @@ Why: Hareware scan is the prefer method for iwlwifi devices for
Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
----------------------------
+
+What: access to nfsd auth cache through sys_nfsservctl or '.' files
+ in the 'nfsd' filesystem.
+When: 2.6.40
+Why: This is a legacy interface which have been replaced by a more
+ dynamic cache. Continuing to maintain this interface is an
+ unnecessary burden.
+Who: NeilBrown <neilb@suse.de>
+
+----------------------------
diff --git a/Documentation/filesystems/9p.txt b/Documentation/filesystems/9p.txt
index f9765e8cf086..b22abba78fed 100644
--- a/Documentation/filesystems/9p.txt
+++ b/Documentation/filesystems/9p.txt
@@ -111,7 +111,7 @@ OPTIONS
This can be used to share devices/named pipes/sockets between
hosts. This functionality will be expanded in later versions.
- access there are three access modes.
+ access there are four access modes.
user = if a user tries to access a file on v9fs
filesystem for the first time, v9fs sends an
attach command (Tattach) for that user.
@@ -120,6 +120,8 @@ OPTIONS
the files on the mounted filesystem
any = v9fs does single attach and performs all
operations as one user
+ client = ACL based access check on the 9p client
+ side for access validation
cachetag cache tag to use the specified persistent cache.
cache tags for existing cache sessions can be listed at
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 2db4283efa8d..8a817f656f0a 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -349,21 +349,36 @@ call this method upon the IO completion.
--------------------------- block_device_operations -----------------------
prototypes:
- int (*open) (struct inode *, struct file *);
- int (*release) (struct inode *, struct file *);
- int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
+ int (*open) (struct block_device *, fmode_t);
+ int (*release) (struct gendisk *, fmode_t);
+ int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
int (*media_changed) (struct gendisk *);
+ void (*unlock_native_capacity) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *);
+ int (*getgeo)(struct block_device *, struct hd_geometry *);
+ void (*swap_slot_free_notify) (struct block_device *, unsigned long);
locking rules:
- BKL bd_sem
-open: yes yes
-release: yes yes
-ioctl: yes no
+ BKL bd_mutex
+open: no yes
+release: no yes
+ioctl: no no
+compat_ioctl: no no
+direct_access: no no
media_changed: no no
+unlock_native_capacity: no no
revalidate_disk: no no
+getgeo: no no
+swap_slot_free_notify: no no (see below)
+
+media_changed, unlock_native_capacity and revalidate_disk are called only from
+check_disk_change().
+
+swap_slot_free_notify is called with swap_lock and sometimes the page lock
+held.
-The last two are called only from check_disk_change().
--------------------------- file_operations -------------------------------
prototypes:
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index e1def1786e50..6ab9442d7eeb 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -353,6 +353,20 @@ noauto_da_alloc replacing existing files via patterns such as
system crashes before the delayed allocation
blocks are forced to disk.
+noinit_itable Do not initialize any uninitialized inode table
+ blocks in the background. This feature may be
+ used by installation CD's so that the install
+ process can complete as quickly as possible; the
+ inode table initialization process would then be
+ deferred until the next time the file system
+ is unmounted.
+
+init_itable=n The lazy itable init code will wait n times the
+ number of milliseconds it took to zero out the
+ previous block group's inode table. This
+ minimizes the impact on the systme performance
+ while file system's inode table is being initialized.
+
discard Controls whether ext4 should issue discard/TRIM
nodiscard(*) commands to the underlying block device when
blocks are freed. This is useful for SSD devices
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
index 3225a5662114..a57e12411d2a 100644
--- a/Documentation/filesystems/nfs/00-INDEX
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -12,6 +12,8 @@ nfs-rdma.txt
- how to install and setup the Linux NFS/RDMA client and server software
nfsroot.txt
- short guide on setting up a diskless box with NFS root filesystem.
+pnfs.txt
+ - short explanation of some of the internals of the pnfs client code
rpc-cache.txt
- introduction to the caching mechanisms in the sunrpc layer.
idmapper.txt
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
index c3852041a21f..b9b4192ea8b5 100644
--- a/Documentation/filesystems/nfs/idmapper.txt
+++ b/Documentation/filesystems/nfs/idmapper.txt
@@ -6,7 +6,7 @@ Id mapper is used by NFS to translate user and group ids into names, and to
translate user and group names into ids. Part of this translation involves
performing an upcall to userspace to request the information. Id mapper will
user request-key to perform this upcall and cache the result. The program
-/usr/sbin/nfs.upcall should be called by request-key, and will perform the
+/usr/sbin/nfs.idmap should be called by request-key, and will perform the
translation and initialize a key with the resulting information.
NFS_USE_NEW_IDMAPPER must be selected when configuring the kernel to use this
@@ -20,12 +20,12 @@ direct the upcall. The following line should be added:
#OP TYPE DESCRIPTION CALLOUT INFO PROGRAM ARG1 ARG2 ARG3 ...
#====== ======= =============== =============== ===============================
-create id_resolver * * /usr/sbin/nfs.upcall %k %d 600
+create id_resolver * * /usr/sbin/nfs.idmap %k %d 600
-This will direct all id_resolver requests to the program /usr/sbin/nfs.upcall.
+This will direct all id_resolver requests to the program /usr/sbin/nfs.idmap.
The last parameter, 600, defines how many seconds into the future the key will
-expire. This parameter is optional for /usr/sbin/nfs.upcall. When the timeout
-is not specified, nfs.upcall will default to 600 seconds.
+expire. This parameter is optional for /usr/sbin/nfs.idmap. When the timeout
+is not specified, nfs.idmap will default to 600 seconds.
id mapper uses for key descriptions:
uid: Find the UID for the given user
@@ -39,29 +39,29 @@ would edit your request-key.conf so it look similar to this:
#OP TYPE DESCRIPTION CALLOUT INFO PROGRAM ARG1 ARG2 ARG3 ...
#====== ======= =============== =============== ===============================
-create id_resolver uid:* * /some/other/program %k %d 600
-create id_resolver * * /usr/sbin/nfs.upcall %k %d 600
+create id_resolver uid:* * /some/other/program %k %d 600
+create id_resolver * * /usr/sbin/nfs.idmap %k %d 600
Notice that the new line was added above the line for the generic program.
request-key will find the first matching line and corresponding program. In
this case, /some/other/program will handle all uid lookups and
-/usr/sbin/nfs.upcall will handle gid, user, and group lookups.
+/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
See <file:Documentation/keys-request-keys.txt> for more information about the
request-key function.
-==========
-nfs.upcall
-==========
-nfs.upcall is designed to be called by request-key, and should not be run "by
+=========
+nfs.idmap
+=========
+nfs.idmap is designed to be called by request-key, and should not be run "by
hand". This program takes two arguments, a serialized key and a key
description. The serialized key is first converted into a key_serial_t, and
then passed as an argument to keyctl_instantiate (both are part of keyutils.h).
-The actual lookups are performed by functions found in nfsidmap.h. nfs.upcall
+The actual lookups are performed by functions found in nfsidmap.h. nfs.idmap
determines the correct function to call by looking at the first part of the
description string. For example, a uid lookup description will appear as
"uid:user@domain".
-nfs.upcall will return 0 if the key was instantiated, and non-zero otherwise.
+nfs.idmap will return 0 if the key was instantiated, and non-zero otherwise.
diff --git a/Documentation/filesystems/nfs/pnfs.txt b/Documentation/filesystems/nfs/pnfs.txt
new file mode 100644
index 000000000000..bc0b9cfe095b
--- /dev/null
+++ b/Documentation/filesystems/nfs/pnfs.txt
@@ -0,0 +1,48 @@
+Reference counting in pnfs:
+==========================
+
+The are several inter-related caches. We have layouts which can
+reference multiple devices, each of which can reference multiple data servers.
+Each data server can be referenced by multiple devices. Each device
+can be referenced by multiple layouts. To keep all of this straight,
+we need to reference count.
+
+
+struct pnfs_layout_hdr
+----------------------
+The on-the-wire command LAYOUTGET corresponds to struct
+pnfs_layout_segment, usually referred to by the variable name lseg.
+Each nfs_inode may hold a pointer to a cache of of these layout
+segments in nfsi->layout, of type struct pnfs_layout_hdr.
+
+We reference the header for the inode pointing to it, across each
+outstanding RPC call that references it (LAYOUTGET, LAYOUTRETURN,
+LAYOUTCOMMIT), and for each lseg held within.
+
+Each header is also (when non-empty) put on a list associated with
+struct nfs_client (cl_layouts). Being put on this list does not bump
+the reference count, as the layout is kept around by the lseg that
+keeps it in the list.
+
+deviceid_cache
+--------------
+lsegs reference device ids, which are resolved per nfs_client and
+layout driver type. The device ids are held in a RCU cache (struct
+nfs4_deviceid_cache). The cache itself is referenced across each
+mount. The entries (struct nfs4_deviceid) themselves are held across
+the lifetime of each lseg referencing them.
+
+RCU is used because the deviceid is basically a write once, read many
+data structure. The hlist size of 32 buckets needs better
+justification, but seems reasonable given that we can have multiple
+deviceid's per filesystem, and multiple filesystems per nfs_client.
+
+The hash code is copied from the nfsd code base. A discussion of
+hashing and variations of this algorithm can be found at:
+http://groups.google.com/group/comp.lang.c/browse_thread/thread/9522965e2b8d3809
+
+data server cache
+-----------------
+file driver devices refer to data servers, which are kept in a module
+level cache. Its reference is held over the lifetime of the deviceid
+pointing to it.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index a6aca8740883..e73df2722ff3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -136,6 +136,7 @@ Table 1-1: Process specific entries in /proc
statm Process memory status information
status Process status in human readable form
wchan If CONFIG_KALLSYMS is set, a pre-decoded wchan
+ pagemap Page table
stack Report full stack trace, enable via CONFIG_STACKTRACE
smaps a extension based on maps, showing the memory consumption of
each mapping
@@ -370,17 +371,24 @@ Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 892 kB
+Anonymous: 0 kB
Swap: 0 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
-The first of these lines shows the same information as is displayed for the
-mapping in /proc/PID/maps. The remaining lines show the size of the mapping,
-the amount of the mapping that is currently resident in RAM, the "proportional
-set size” (divide each shared page by the number of processes sharing it), the
-number of clean and dirty shared pages in the mapping, and the number of clean
-and dirty private pages in the mapping. The "Referenced" indicates the amount
-of memory currently marked as referenced or accessed.
+The first of these lines shows the same information as is displayed for the
+mapping in /proc/PID/maps. The remaining lines show the size of the mapping
+(size), the amount of the mapping that is currently resident in RAM (RSS), the
+process' proportional share of this mapping (PSS), the number of clean and
+dirty private pages in the mapping. Note that even a page which is part of a
+MAP_SHARED mapping, but has only a single pte mapped, i.e. is currently used
+by only one process, is accounted as private and not as shared. "Referenced"
+indicates the amount of memory currently marked as referenced or accessed.
+"Anonymous" shows the amount of memory that does not belong to any file. Even
+a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
+and a page is modified, the file page is replaced by a private anonymous copy.
+"Swap" shows how much would-be-anonymous memory is also used, but out on
+swap.
This file is only present if the CONFIG_MMU kernel configuration option is
enabled.
@@ -397,6 +405,9 @@ To clear the bits for the file mapped pages associated with the process
> echo 3 > /proc/PID/clear_refs
Any other value written to /proc/PID/clear_refs will have no effect.
+The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
+using /proc/kpageflags and number of times a page is mapped using
+/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
1.2 Kernel data
---------------
diff --git a/Documentation/filesystems/sharedsubtree.txt b/Documentation/filesystems/sharedsubtree.txt
index fc0e39af43c3..4ede421c9687 100644
--- a/Documentation/filesystems/sharedsubtree.txt
+++ b/Documentation/filesystems/sharedsubtree.txt
@@ -62,10 +62,10 @@ replicas continue to be exactly same.
# mount /dev/sd0 /tmp/a
#ls /tmp/a
- t1 t2 t2
+ t1 t2 t3
#ls /mnt/a
- t1 t2 t2
+ t1 t2 t3
Note that the mount has propagated to the mount at /mnt as well.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 33223ff121d8..10f5af8b73f7 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -278,7 +278,6 @@ Code Seq#(hex) Include File Comments
<mailto:oe@port.de>
'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict!
0x80 00-1F linux/fb.h
-0x81 00-1F linux/videotext.h
0x88 00-3F media/ovcamchip.h
0x89 00-06 arch/x86/include/asm/sockios.h
0x89 0B-DF linux/sockios.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4bc2f3c3da5b..ed45e9802aa8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2175,6 +2175,11 @@ and is between 256 and 4096 characters. It is defined in the file
reset_devices [KNL] Force drivers to reset the underlying device
during initialization.
+ resource_alloc_from_bottom
+ Allocate new resources from the beginning of available
+ space, not the end. If you need to use this, please
+ report a bug.
+
resume= [SWSUSP]
Specify the partition device for software suspend
diff --git a/Documentation/misc-devices/apds990x.txt b/Documentation/misc-devices/apds990x.txt
new file mode 100644
index 000000000000..d5408cade32f
--- /dev/null
+++ b/Documentation/misc-devices/apds990x.txt
@@ -0,0 +1,111 @@
+Kernel driver apds990x
+======================
+
+Supported chips:
+Avago APDS990X
+
+Data sheet:
+Not freely available
+
+Author:
+Samu Onkalo <samu.p.onkalo@nokia.com>
+
+Description
+-----------
+
+APDS990x is a combined ambient light and proximity sensor. ALS and proximity
+functionality are highly connected. ALS measurement path must be running
+while the proximity functionality is enabled.
+
+ALS produces raw measurement values for two channels: Clear channel
+(infrared + visible light) and IR only. However, threshold comparisons happen
+using clear channel only. Lux value and the threshold level on the HW
+might vary quite much depending the spectrum of the light source.
+
+Driver makes necessary conversions to both directions so that user handles
+only lux values. Lux value is calculated using information from the both
+channels. HW threshold level is calculated from the given lux value to match
+with current type of the lightning. Sometimes inaccuracy of the estimations
+lead to false interrupt, but that doesn't harm.
+
+ALS contains 4 different gain steps. Driver automatically
+selects suitable gain step. After each measurement, reliability of the results
+is estimated and new measurement is trigged if necessary.
+
+Platform data can provide tuned values to the conversion formulas if
+values are known. Otherwise plain sensor default values are used.
+
+Proximity side is little bit simpler. There is no need for complex conversions.
+It produces directly usable values.
+
+Driver controls chip operational state using pm_runtime framework.
+Voltage regulators are controlled based on chip operational state.
+
+SYSFS
+-----
+
+
+chip_id
+ RO - shows detected chip type and version
+
+power_state
+ RW - enable / disable chip. Uses counting logic
+ 1 enables the chip
+ 0 disables the chip
+lux0_input
+ RO - measured lux value
+ sysfs_notify called when threshold interrupt occurs
+
+lux0_sensor_range
+ RO - lux0_input max value. Actually never reaches since sensor tends
+ to saturate much before that. Real max value varies depending
+ on the light spectrum etc.
+
+lux0_rate
+ RW - measurement rate in Hz
+
+lux0_rate_avail
+ RO - supported measurement rates
+
+lux0_calibscale
+ RW - calibration value. Set to neutral value by default.
+ Output results are multiplied with calibscale / calibscale_default
+ value.
+
+lux0_calibscale_default
+ RO - neutral calibration value
+
+lux0_thresh_above_value
+ RW - HI level threshold value. All results above the value
+ trigs an interrupt. 65535 (i.e. sensor_range) disables the above
+ interrupt.
+
+lux0_thresh_below_value
+ RW - LO level threshold value. All results below the value
+ trigs an interrupt. 0 disables the below interrupt.
+
+prox0_raw
+ RO - measured proximity value
+ sysfs_notify called when threshold interrupt occurs
+
+prox0_sensor_range
+ RO - prox0_raw max value (1023)
+
+prox0_raw_en
+ RW - enable / disable proximity - uses counting logic
+ 1 enables the proximity
+ 0 disables the proximity
+
+prox0_reporting_mode
+ RW - trigger / periodic. In "trigger" mode the driver tells two possible
+ values: 0 or prox0_sensor_range value. 0 means no proximity,
+ 1023 means proximity. This causes minimal number of interrupts.
+ In "periodic" mode the driver reports all values above
+ prox0_thresh_above. This causes more interrupts, but it can give
+ _rough_ estimate about the distance.
+
+prox0_reporting_mode_avail
+ RO - accepted values to prox0_reporting_mode (trigger, periodic)
+
+prox0_thresh_above_value
+ RW - threshold level which trigs proximity events.
diff --git a/Documentation/misc-devices/bh1770glc.txt b/Documentation/misc-devices/bh1770glc.txt
new file mode 100644
index 000000000000..7d64c014dc70
--- /dev/null
+++ b/Documentation/misc-devices/bh1770glc.txt
@@ -0,0 +1,116 @@
+Kernel driver bh1770glc
+=======================
+
+Supported chips:
+ROHM BH1770GLC
+OSRAM SFH7770
+
+Data sheet:
+Not freely available
+
+Author:
+Samu Onkalo <samu.p.onkalo@nokia.com>
+
+Description
+-----------
+BH1770GLC and SFH7770 are combined ambient light and proximity sensors.
+ALS and proximity parts operates on their own, but they shares common I2C
+interface and interrupt logic. In principle they can run on their own,
+but ALS side results are used to estimate reliability of the proximity sensor.
+
+ALS produces 16 bit lux values. The chip contains interrupt logic to produce
+low and high threshold interrupts.
+
+Proximity part contains IR-led driver up to 3 IR leds. The chip measures
+amount of reflected IR light and produces proximity result. Resolution is
+8 bit. Driver supports only one channel. Driver uses ALS results to estimate
+reliability of the proximity results. Thus ALS is always running while
+proximity detection is needed.
+
+Driver uses threshold interrupts to avoid need for polling the values.
+Proximity low interrupt doesn't exists in the chip. This is simulated
+by using a delayed work. As long as there is proximity threshold above
+interrupts the delayed work is pushed forward. So, when proximity level goes
+below the threshold value, there is no interrupt and the delayed work will
+finally run. This is handled as no proximity indication.
+
+Chip state is controlled via runtime pm framework when enabled in config.
+
+Calibscale factor is used to hide differences between the chips. By default
+value set to neutral state meaning factor of 1.00. To get proper values,
+calibrated source of light is needed as a reference. Calibscale factor is set
+so that measurement produces about the expected lux value.
+
+SYSFS
+-----
+
+chip_id
+ RO - shows detected chip type and version
+
+power_state
+ RW - enable / disable chip. Uses counting logic
+ 1 enables the chip
+ 0 disables the chip
+
+lux0_input
+ RO - measured lux value
+ sysfs_notify called when threshold interrupt occurs
+
+lux0_sensor_range
+ RO - lux0_input max value
+
+lux0_rate
+ RW - measurement rate in Hz
+
+lux0_rate_avail
+ RO - supported measurement rates
+
+lux0_thresh_above_value
+ RW - HI level threshold value. All results above the value
+ trigs an interrupt. 65535 (i.e. sensor_range) disables the above
+ interrupt.
+
+lux0_thresh_below_value
+ RW - LO level threshold value. All results below the value
+ trigs an interrupt. 0 disables the below interrupt.
+
+lux0_calibscale
+ RW - calibration value. Set to neutral value by default.
+ Output results are multiplied with calibscale / calibscale_default
+ value.
+
+lux0_calibscale_default
+ RO - neutral calibration value
+
+prox0_raw
+ RO - measured proximity value
+ sysfs_notify called when threshold interrupt occurs
+
+prox0_sensor_range
+ RO - prox0_raw max value
+
+prox0_raw_en
+ RW - enable / disable proximity - uses counting logic
+ 1 enables the proximity
+ 0 disables the proximity
+
+prox0_thresh_above_count
+ RW - number of proximity interrupts needed before triggering the event
+
+prox0_rate_above
+ RW - Measurement rate (in Hz) when the level is above threshold
+ i.e. when proximity on has been reported.
+
+prox0_rate_below
+ RW - Measurement rate (in Hz) when the level is below threshold
+ i.e. when proximity off has been reported.
+
+prox0_rate_avail
+ RO - Supported proximity measurement rates in Hz
+
+prox0_thresh_above0_value
+ RW - threshold level which trigs proximity events.
+ Filtered by persistence filter (prox0_thresh_above_count)
+
+prox0_thresh_above1_value
+ RW - threshold level which trigs event immediately
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 88bb71b46da4..9eb1ba52013d 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -177,18 +177,6 @@ Doing it all yourself
A convenience function to print out the PHY status neatly.
- int phy_clear_interrupt(struct phy_device *phydev);
- int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
-
- Clear the PHY's interrupt, and configure which ones are allowed,
- respectively. Currently only supports all on, or all off.
-
- int phy_enable_interrupts(struct phy_device *phydev);
- int phy_disable_interrupts(struct phy_device *phydev);
-
- Functions which enable/disable PHY interrupts, clearing them
- before and after, respectively.
-
int phy_start_interrupts(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
@@ -213,12 +201,6 @@ Doing it all yourself
Fills the phydev structure with up-to-date information about the current
settings in the PHY.
- void phy_sanitize_settings(struct phy_device *phydev)
-
- Resolves differences between currently desired settings, and
- supported settings for the given PHY device. Does not make
- the changes in the hardware, though.
-
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index b606c2c4dd37..30289fab86eb 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -80,8 +80,10 @@ dirty_background_bytes
Contains the amount of dirty memory at which the pdflush background writeback
daemon will start writeback.
-If dirty_background_bytes is written, dirty_background_ratio becomes a function
-of its value (dirty_background_bytes / the amount of dirtyable system memory).
+Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
+one of them may be specified at a time. When one sysctl is written it is
+immediately taken into account to evaluate the dirty memory limits and the
+other appears as 0 when read.
==============================================================
@@ -97,8 +99,10 @@ dirty_bytes
Contains the amount of dirty memory at which a process generating disk writes
will itself start writeback.
-If dirty_bytes is written, dirty_ratio becomes a function of its value
-(dirty_bytes / the amount of dirtyable system memory).
+Note: dirty_bytes is the counterpart of dirty_ratio. Only one of them may be
+specified at a time. When one sysctl is written it is immediately taken into
+account to evaluate the dirty memory limits and the other appears as 0 when
+read.
Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
value lower than this limit will be ignored and the old configuration will be
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 5c17196c8fe9..312e3754e8c5 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -75,7 +75,7 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
'f' - Will call oom_kill to kill a memory hog process.
-'g' - Used by kgdb on ppc and sh platforms.
+'g' - Used by kgdb (kernel debugger)
'h' - Will display help (actually any other key than those listed
here will display help. but 'h' is easy to remember :-)
@@ -110,12 +110,15 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
'u' - Will attempt to remount all mounted filesystems read-only.
-'v' - Dumps Voyager SMP processor info to your console.
+'v' - Forcefully restores framebuffer console
+'v' - Causes ETM buffer dump [ARM-specific]
'w' - Dumps tasks that are in uninterruptable (blocked) state.
'x' - Used by xmon interface on ppc/powerpc platforms.
+'y' - Show global CPU Registers [SPARC-64 specific]
+
'z' - Dump the ftrace buffer
'0'-'9' - Sets the console log level, controlling which kernel messages
diff --git a/Documentation/timers/hpet_example.c b/Documentation/timers/hpet_example.c
index 4bfafb7bc4c5..9a3e7012c190 100644
--- a/Documentation/timers/hpet_example.c
+++ b/Documentation/timers/hpet_example.c
@@ -97,6 +97,33 @@ hpet_open_close(int argc, const char **argv)
void
hpet_info(int argc, const char **argv)
{
+ struct hpet_info info;
+ int fd;
+
+ if (argc != 1) {
+ fprintf(stderr, "hpet_info: device-name\n");
+ return;
+ }
+
+ fd = open(argv[0], O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "hpet_info: open of %s failed\n", argv[0]);
+ return;
+ }
+
+ if (ioctl(fd, HPET_INFO, &info) < 0) {
+ fprintf(stderr, "hpet_info: failed to get info\n");
+ goto out;
+ }
+
+ fprintf(stderr, "hpet_info: hi_irqfreq 0x%lx hi_flags 0x%lx ",
+ info.hi_ireqfreq, info.hi_flags);
+ fprintf(stderr, "hi_hpet %d hi_timer %d\n",
+ info.hi_hpet, info.hi_timer);
+
+out:
+ close(fd);
+ return;
}
void
diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
index 1b55146d1c8d..b3e73ddb1567 100644
--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
@@ -46,7 +46,7 @@ use constant HIGH_KSWAPD_LATENCY => 20;
use constant HIGH_KSWAPD_REWAKEUP => 21;
use constant HIGH_NR_SCANNED => 22;
use constant HIGH_NR_TAKEN => 23;
-use constant HIGH_NR_RECLAIM => 24;
+use constant HIGH_NR_RECLAIMED => 24;
use constant HIGH_NR_CONTIG_DIRTY => 25;
my %perprocesspid;
@@ -58,11 +58,13 @@ my $opt_read_procstat;
my $total_wakeup_kswapd;
my ($total_direct_reclaim, $total_direct_nr_scanned);
my ($total_direct_latency, $total_kswapd_latency);
+my ($total_direct_nr_reclaimed);
my ($total_direct_writepage_file_sync, $total_direct_writepage_file_async);
my ($total_direct_writepage_anon_sync, $total_direct_writepage_anon_async);
my ($total_kswapd_nr_scanned, $total_kswapd_wake);
my ($total_kswapd_writepage_file_sync, $total_kswapd_writepage_file_async);
my ($total_kswapd_writepage_anon_sync, $total_kswapd_writepage_anon_async);
+my ($total_kswapd_nr_reclaimed);
# Catch sigint and exit on request
my $sigint_report = 0;
@@ -104,7 +106,7 @@ my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)';
-my $regex_lru_shrink_inactive_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*)';
+my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
@@ -203,8 +205,8 @@ $regex_lru_shrink_inactive = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_inactive",
$regex_lru_shrink_inactive_default,
"nid", "zid",
- "lru",
- "nr_scanned", "nr_reclaimed", "priority");
+ "nr_scanned", "nr_reclaimed", "priority",
+ "flags");
$regex_lru_shrink_active = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_active",
$regex_lru_shrink_active_default,
@@ -375,6 +377,16 @@ EVENT_PROCESS:
my $nr_contig_dirty = $7;
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
+ } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
+ $details = $5;
+ if ($details !~ /$regex_lru_shrink_inactive/o) {
+ print "WARNING: Failed to parse mm_vmscan_lru_shrink_inactive as expected\n";
+ print " $details\n";
+ print " $regex_lru_shrink_inactive/o\n";
+ next;
+ }
+ my $nr_reclaimed = $4;
+ $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed;
} elsif ($tracepoint eq "mm_vmscan_writepage") {
$details = $5;
if ($details !~ /$regex_writepage/o) {
@@ -464,8 +476,8 @@ sub dump_stats {
# Print out process activity
printf("\n");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s\n", "Process", "Direct", "Wokeup", "Pages", "Pages", "Pages", "Time");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s\n", "details", "Rclms", "Kswapd", "Scanned", "Sync-IO", "ASync-IO", "Stalled");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s\n", "Process", "Direct", "Wokeup", "Pages", "Pages", "Pages", "Pages", "Time");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s\n", "details", "Rclms", "Kswapd", "Scanned", "Rclmed", "Sync-IO", "ASync-IO", "Stalled");
foreach $process_pid (keys %stats) {
if (!$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}) {
@@ -475,6 +487,7 @@ sub dump_stats {
$total_direct_reclaim += $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN};
$total_wakeup_kswapd += $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
$total_direct_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
+ $total_direct_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
$total_direct_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$total_direct_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$total_direct_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
@@ -489,11 +502,12 @@ sub dump_stats {
$index++;
}
- printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8u %8.3f",
+ printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8u %8u %8.3f",
$process_pid,
$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN},
$stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD},
$stats{$process_pid}->{HIGH_NR_SCANNED},
+ $stats{$process_pid}->{HIGH_NR_RECLAIMED},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC},
$this_reclaim_delay / 1000);
@@ -529,8 +543,8 @@ sub dump_stats {
# Print out kswapd activity
printf("\n");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Kswapd", "Kswapd", "Order", "Pages", "Pages", "Pages");
- printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Sync-IO", "ASync-IO");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Kswapd", "Kswapd", "Order", "Pages", "Pages", "Pages", "Pages");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Rclmed", "Sync-IO", "ASync-IO");
foreach $process_pid (keys %stats) {
if (!$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE}) {
@@ -539,16 +553,18 @@ sub dump_stats {
$total_kswapd_wake += $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE};
$total_kswapd_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
+ $total_kswapd_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
$total_kswapd_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$total_kswapd_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$total_kswapd_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
$total_kswapd_writepage_anon_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC};
- printf("%-" . $max_strlen . "s %8d %10d %8u %8i %8u",
+ printf("%-" . $max_strlen . "s %8d %10d %8u %8u %8i %8u",
$process_pid,
$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE},
$stats{$process_pid}->{HIGH_KSWAPD_REWAKEUP},
$stats{$process_pid}->{HIGH_NR_SCANNED},
+ $stats{$process_pid}->{HIGH_NR_RECLAIMED},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC});
@@ -579,6 +595,7 @@ sub dump_stats {
print "\nSummary\n";
print "Direct reclaims: $total_direct_reclaim\n";
print "Direct reclaim pages scanned: $total_direct_nr_scanned\n";
+ print "Direct reclaim pages reclaimed: $total_direct_nr_reclaimed\n";
print "Direct reclaim write file sync I/O: $total_direct_writepage_file_sync\n";
print "Direct reclaim write anon sync I/O: $total_direct_writepage_anon_sync\n";
print "Direct reclaim write file async I/O: $total_direct_writepage_file_async\n";
@@ -588,6 +605,7 @@ sub dump_stats {
print "\n";
print "Kswapd wakeups: $total_kswapd_wake\n";
print "Kswapd pages scanned: $total_kswapd_nr_scanned\n";
+ print "Kswapd pages reclaimed: $total_kswapd_nr_reclaimed\n";
print "Kswapd reclaim write file sync I/O: $total_kswapd_writepage_file_sync\n";
print "Kswapd reclaim write anon sync I/O: $total_kswapd_writepage_anon_sync\n";
print "Kswapd reclaim write file async I/O: $total_kswapd_writepage_file_async\n";
@@ -612,6 +630,7 @@ sub aggregate_perprocesspid() {
$perprocess{$process}->{MM_VMSCAN_WAKEUP_KSWAPD} += $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
$perprocess{$process}->{HIGH_KSWAPD_REWAKEUP} += $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP};
$perprocess{$process}->{HIGH_NR_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_SCANNED};
+ $perprocess{$process}->{HIGH_NR_RECLAIMED} += $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index f2510541373b..42517d9121de 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -83,3 +83,4 @@
82 -> WinFast DTV2000 H rev. J [107d:6f2b]
83 -> Prof 7301 DVB-S/S2 [b034:3034]
84 -> Samsung SMT 7020 DVB-S [18ac:dc00,18ac:dccd]
+ 85 -> Twinhan VP-1027 DVB-S [1822:0023]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index 5c568757c301..ac2616a62fc3 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -31,6 +31,7 @@
30 -> Videology 20K14XUSB USB2.0 (em2820/em2840)
31 -> Usbgear VD204v9 (em2821)
32 -> Supercomp USB 2.0 TV (em2821)
+ 33 -> Elgato Video Capture (em2860) [0fd9:0033]
34 -> Terratec Cinergy A Hybrid XS (em2860) [0ccd:004f]
35 -> Typhoon DVD Maker (em2860)
36 -> NetGMBH Cam (em2860)
@@ -45,7 +46,7 @@
45 -> Pinnacle PCTV DVB-T (em2870)
46 -> Compro, VideoMate U3 (em2870) [185b:2870]
47 -> KWorld DVB-T 305U (em2880) [eb1a:e305]
- 48 -> KWorld DVB-T 310U (em2880) [eb1a:e310]
+ 48 -> KWorld DVB-T 310U (em2880)
49 -> MSI DigiVox A/D (em2880) [eb1a:e310]
50 -> MSI DigiVox A/D II (em2880) [eb1a:e320]
51 -> Terratec Hybrid XS Secam (em2880) [0ccd:004c]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 4000c29fcfb6..8d9afc7d8014 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -126,7 +126,7 @@
125 -> Beholder BeholdTV 409 [0000:4090]
126 -> Beholder BeholdTV 505 FM [5ace:5050]
127 -> Beholder BeholdTV 507 FM / BeholdTV 509 FM [5ace:5070,5ace:5090]
-128 -> Beholder BeholdTV Columbus TVFM [0000:5201]
+128 -> Beholder BeholdTV Columbus TV/FM [0000:5201]
129 -> Beholder BeholdTV 607 FM [5ace:6070]
130 -> Beholder BeholdTV M6 [5ace:6190]
131 -> Twinhan Hybrid DTV-DVB 3056 PCI [1822:0022]
diff --git a/Documentation/video4linux/bttv/MAKEDEV b/Documentation/video4linux/bttv/MAKEDEV
index 9d112f7fd5f7..093c0cd18042 100644
--- a/Documentation/video4linux/bttv/MAKEDEV
+++ b/Documentation/video4linux/bttv/MAKEDEV
@@ -19,7 +19,6 @@ function makedev () {
echo "*** new device names ***"
makedev video 0
makedev radio 64
-makedev vtx 192
makedev vbi 224
#echo "*** old device names (for compatibility only) ***"
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 56ba7bba7168..6a562eeeb4cd 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -302,12 +302,14 @@ sonixj 0c45:60fb Surfer NoName
sonixj 0c45:60fc LG-LIC300
sonixj 0c45:60fe Microdia Audio
sonixj 0c45:6100 PC Camera (SN9C128)
+sonixj 0c45:6102 PC Camera (SN9C128)
sonixj 0c45:610a PC Camera (SN9C128)
sonixj 0c45:610b PC Camera (SN9C128)
sonixj 0c45:610c PC Camera (SN9C128)
sonixj 0c45:610e PC Camera (SN9C128)
sonixj 0c45:6128 Microdia/Sonix SNP325
sonixj 0c45:612a Avant Camera
+sonixj 0c45:612b Speed-Link REFLECT2
sonixj 0c45:612c Typhoon Rasy Cam 1.3MPix
sonixj 0c45:6130 Sonix Pccam
sonixj 0c45:6138 Sn9c120 Mo4000
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index e831aaca66f8..f22f35c271f3 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -44,8 +44,8 @@ All drivers have the following structure:
2) A way of initializing and commanding sub-devices (if any).
-3) Creating V4L2 device nodes (/dev/videoX, /dev/vbiX, /dev/radioX and
- /dev/vtxX) and keeping track of device-node specific data.
+3) Creating V4L2 device nodes (/dev/videoX, /dev/vbiX and /dev/radioX)
+ and keeping track of device-node specific data.
4) Filehandle-specific structs containing per-filehandle data;
@@ -192,6 +192,11 @@ You also need a way to go from the low-level struct to v4l2_subdev. For the
common i2c_client struct the i2c_set_clientdata() call is used to store a
v4l2_subdev pointer, for other busses you may have to use other methods.
+Bridges might also need to store per-subdev private data, such as a pointer to
+bridge-specific per-subdev private data. The v4l2_subdev structure provides
+host private data for that purpose that can be accessed with
+v4l2_get_subdev_hostdata() and v4l2_set_subdev_hostdata().
+
From the bridge driver perspective you load the sub-device module and somehow
obtain the v4l2_subdev pointer. For i2c devices this is easy: you call
i2c_get_clientdata(). For other busses something similar needs to be done.
@@ -448,6 +453,10 @@ You should also set these fields:
- ioctl_ops: if you use the v4l2_ioctl_ops to simplify ioctl maintenance
(highly recommended to use this and it might become compulsory in the
future!), then set this to your v4l2_ioctl_ops struct.
+- lock: leave to NULL if you want to do all the locking in the driver.
+ Otherwise you give it a pointer to a struct mutex_lock and before any
+ of the v4l2_file_operations is called this lock will be taken by the
+ core and released afterwards.
- parent: you only set this if v4l2_device was registered with NULL as
the parent device struct. This only happens in cases where one hardware
device has multiple PCI devices that all share the same v4l2_device core.
@@ -464,6 +473,22 @@ If you use v4l2_ioctl_ops, then you should set either .unlocked_ioctl or
The v4l2_file_operations struct is a subset of file_operations. The main
difference is that the inode argument is omitted since it is never used.
+v4l2_file_operations and locking
+--------------------------------
+
+You can set a pointer to a mutex_lock in struct video_device. Usually this
+will be either a top-level mutex or a mutex per device node. If you want
+finer-grained locking then you have to set it to NULL and do you own locking.
+
+If a lock is specified then all file operations will be serialized on that
+lock. If you use videobuf then you must pass the same lock to the videobuf
+queue initialize function: if videobuf has to wait for a frame to arrive, then
+it will temporarily unlock the lock and relock it afterwards. If your driver
+also waits in the code, then you should do the same to allow other processes
+to access the device node while the first process is waiting for something.
+
+The implementation of a hotplug disconnect should also take the lock before
+calling v4l2_device_disconnect.
video_device registration
-------------------------
@@ -483,7 +508,6 @@ types exist:
VFL_TYPE_GRABBER: videoX for video input/output devices
VFL_TYPE_VBI: vbiX for vertical blank data (i.e. closed captions, teletext)
VFL_TYPE_RADIO: radioX for radio tuners
-VFL_TYPE_VTX: vtxX for teletext devices (deprecated, don't use)
The last argument gives you a certain amount of control over the device
device node number used (i.e. the X in videoX). Normally you will pass -1
@@ -547,9 +571,8 @@ from /dev).
After video_unregister_device() returns no new opens can be done. However,
in the case of USB devices some application might still have one of these
-device nodes open. So after the unregister all file operations will return
-an error as well, except for the ioctl and unlocked_ioctl file operations:
-those will still be passed on since some buffer ioctls may still be needed.
+device nodes open. So after the unregister all file operations (except
+release, of course) will return an error as well.
When the last user of the video device node exits, then the vdev->release()
callback is called and you can do the final cleanup there.
diff --git a/Documentation/vm/highmem.txt b/Documentation/vm/highmem.txt
new file mode 100644
index 000000000000..4324d24ffacd
--- /dev/null
+++ b/Documentation/vm/highmem.txt
@@ -0,0 +1,162 @@
+
+ ====================
+ HIGH MEMORY HANDLING
+ ====================
+
+By: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+Contents:
+
+ (*) What is high memory?
+
+ (*) Temporary virtual mappings.
+
+ (*) Using kmap_atomic.
+
+ (*) Cost of temporary mappings.
+
+ (*) i386 PAE.
+
+
+====================
+WHAT IS HIGH MEMORY?
+====================
+
+High memory (highmem) is used when the size of physical memory approaches or
+exceeds the maximum size of virtual memory. At that point it becomes
+impossible for the kernel to keep all of the available physical memory mapped
+at all times. This means the kernel needs to start using temporary mappings of
+the pieces of physical memory that it wants to access.
+
+The part of (physical) memory not covered by a permanent mapping is what we
+refer to as 'highmem'. There are various architecture dependent constraints on
+where exactly that border lies.
+
+In the i386 arch, for example, we choose to map the kernel into every process's
+VM space so that we don't have to pay the full TLB invalidation costs for
+kernel entry/exit. This means the available virtual memory space (4GiB on
+i386) has to be divided between user and kernel space.
+
+The traditional split for architectures using this approach is 3:1, 3GiB for
+userspace and the top 1GiB for kernel space:
+
+ +--------+ 0xffffffff
+ | Kernel |
+ +--------+ 0xc0000000
+ | |
+ | User |
+ | |
+ +--------+ 0x00000000
+
+This means that the kernel can at most map 1GiB of physical memory at any one
+time, but because we need virtual address space for other things - including
+temporary maps to access the rest of the physical memory - the actual direct
+map will typically be less (usually around ~896MiB).
+
+Other architectures that have mm context tagged TLBs can have separate kernel
+and user maps. Some hardware (like some ARMs), however, have limited virtual
+space when they use mm context tags.
+
+
+==========================
+TEMPORARY VIRTUAL MAPPINGS
+==========================
+
+The kernel contains several ways of creating temporary mappings:
+
+ (*) vmap(). This can be used to make a long duration mapping of multiple
+ physical pages into a contiguous virtual space. It needs global
+ synchronization to unmap.
+
+ (*) kmap(). This permits a short duration mapping of a single page. It needs
+ global synchronization, but is amortized somewhat. It is also prone to
+ deadlocks when using in a nested fashion, and so it is not recommended for
+ new code.
+
+ (*) kmap_atomic(). This permits a very short duration mapping of a single
+ page. Since the mapping is restricted to the CPU that issued it, it
+ performs well, but the issuing task is therefore required to stay on that
+ CPU until it has finished, lest some other task displace its mappings.
+
+ kmap_atomic() may also be used by interrupt contexts, since it is does not
+ sleep and the caller may not sleep until after kunmap_atomic() is called.
+
+ It may be assumed that k[un]map_atomic() won't fail.
+
+
+=================
+USING KMAP_ATOMIC
+=================
+
+When and where to use kmap_atomic() is straightforward. It is used when code
+wants to access the contents of a page that might be allocated from high memory
+(see __GFP_HIGHMEM), for example a page in the pagecache. The API has two
+functions, and they can be used in a manner similar to the following:
+
+ /* Find the page of interest. */
+ struct page *page = find_get_page(mapping, offset);
+
+ /* Gain access to the contents of that page. */
+ void *vaddr = kmap_atomic(page);
+
+ /* Do something to the contents of that page. */
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* Unmap that page. */
+ kunmap_atomic(vaddr);
+
+Note that the kunmap_atomic() call takes the result of the kmap_atomic() call
+not the argument.
+
+If you need to map two pages because you want to copy from one page to
+another you need to keep the kmap_atomic calls strictly nested, like:
+
+ vaddr1 = kmap_atomic(page1);
+ vaddr2 = kmap_atomic(page2);
+
+ memcpy(vaddr1, vaddr2, PAGE_SIZE);
+
+ kunmap_atomic(vaddr2);
+ kunmap_atomic(vaddr1);
+
+
+==========================
+COST OF TEMPORARY MAPPINGS
+==========================
+
+The cost of creating temporary mappings can be quite high. The arch has to
+manipulate the kernel's page tables, the data TLB and/or the MMU's registers.
+
+If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping
+simply with a bit of arithmetic that will convert the page struct address into
+a pointer to the page contents rather than juggling mappings about. In such a
+case, the unmap operation may be a null operation.
+
+If CONFIG_MMU is not set, then there can be no temporary mappings and no
+highmem. In such a case, the arithmetic approach will also be used.
+
+
+========
+i386 PAE
+========
+
+The i386 arch, under some circumstances, will permit you to stick up to 64GiB
+of RAM into your 32-bit machine. This has a number of consequences:
+
+ (*) Linux needs a page-frame structure for each page in the system and the
+ pageframes need to live in the permanent mapping, which means:
+
+ (*) you can have 896M/sizeof(struct page) page-frames at most; with struct
+ page being 32-bytes that would end up being something in the order of 112G
+ worth of pages; the kernel, however, needs to store more than just
+ page-frames in that memory...
+
+ (*) PAE makes your page tables larger - which slows the system down as more
+ data has to be accessed to traverse in TLB fills and the like. One
+ advantage is that PAE has more PTE bits and can provide advanced features
+ like NX and PAT.
+
+The general recommendation is that you don't use more than 8GiB on a 32-bit
+machine - although more might work for you and your workload, you're pretty
+much on your own - don't expect kernel developers to really care much if things
+come apart.
diff --git a/Kbuild b/Kbuild
index 431f7ca2404c..b00037ad7e03 100644
--- a/Kbuild
+++ b/Kbuild
@@ -53,7 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
# Default sed regexp - multiline due to syntax constraints
define sed-y
"/^->/{s:->#\(.*\):/* \1 */:; \
- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 (\2) /* \3 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
s:->::; p;}"
endef
diff --git a/MAINTAINERS b/MAINTAINERS
index 146b8a068a4e..1e6b6bdf6340 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -243,21 +243,6 @@ F: drivers/pnp/pnpacpi/
F: include/linux/acpi.h
F: include/acpi/
-ACPI BATTERY DRIVERS
-M: Alexey Starikovskiy <astarikovskiy@suse.de>
-L: linux-acpi@vger.kernel.org
-W: http://www.lesswatts.org/projects/acpi/
-S: Supported
-F: drivers/acpi/battery.c
-F: drivers/acpi/*sbs*
-
-ACPI EC DRIVER
-M: Alexey Starikovskiy <astarikovskiy@suse.de>
-L: linux-acpi@vger.kernel.org
-W: http://www.lesswatts.org/projects/acpi/
-S: Supported
-F: drivers/acpi/ec.c
-
ACPI FAN DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
@@ -657,7 +642,7 @@ ARM/FARADAY FA526 PORT
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git://git.berlios.de/gemini-board
+T: git git://git.berlios.de/gemini-board
F: arch/arm/mm/*-fa*
ARM/FOOTBRIDGE ARCHITECTURE
@@ -672,7 +657,7 @@ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git://git.pengutronix.de/git/imx/linux-2.6.git
+T: git git://git.pengutronix.de/git/imx/linux-2.6.git
F: arch/arm/mach-mx*/
F: arch/arm/plat-mxc/
@@ -710,8 +695,7 @@ ARM/INCOME PXA270 SUPPORT
M: Marek Vasut <marek.vasut@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: arch/arm/mach-pxa/income.c
-F: arch/arm/mach-pxa/include/mach-pxa/income.h
+F: arch/arm/mach-pxa/colibri-pxa270-income.c
ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -758,13 +742,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ixp4xx/
-ARM/INTEL RESEARCH IMOTE 2 MACHINE SUPPORT
-M: Jonathan Cameron <jic23@cam.ac.uk>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-pxa/imote2.c
-
-ARM/INTEL RESEARCH STARGATE 2 MACHINE SUPPORT
+ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
M: Jonathan Cameron <jic23@cam.ac.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -929,40 +907,20 @@ W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2410/
-ARM/S3C2440 ARM ARCHITECTURE
+ARM/S3C244x ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2440/
-
-ARM/S3C2442 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2442/
-
-ARM/S3C2443 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
F: arch/arm/mach-s3c2443/
-ARM/S3C6400 ARM ARCHITECTURE
+ARM/S3C64xx ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
-F: arch/arm/mach-s3c6400/
-
-ARM/S3C6410 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c6410/
+F: arch/arm/mach-s3c64xx/
ARM/S5P ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
@@ -2102,6 +2060,15 @@ S: Maintained
F: drivers/gpu/drm/
F: include/drm/
+INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
+M: Chris Wilson <chris@chris-wilson.co.uk>
+L: intel-gfx@lists.freedesktop.org
+L: dri-devel@lists.freedesktop.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
+S: Supported
+F: drivers/gpu/drm/i915
+F: include/drm/i915*
+
DSCC4 DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -3015,7 +2982,7 @@ M: Roland Dreier <rolandd@cisco.com>
M: Sean Hefty <sean.hefty@intel.com>
M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: linux-rdma@vger.kernel.org
-W: http://www.openib.org/
+W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
S: Supported
@@ -3867,7 +3834,7 @@ F: drivers/net/wireless/mwl8k.c
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
-F: drivers/mmc/host/mvsdio.*
+F: drivers/mmc/host/mvsdio.*
MARVELL YUKON / SYSKONNECT DRIVER
M: Mirko Lindner <mlindner@syskonnect.de>
@@ -4481,7 +4448,7 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/panasonic-laptop.c
-PANASONIC MN10300/AM33 PORT
+PANASONIC MN10300/AM33/AM34 PORT
M: David Howells <dhowells@redhat.com>
M: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
L: linux-am33-list@redhat.com (moderated for non-subscribers)
@@ -4958,7 +4925,7 @@ RCUTORTURE MODULE
M: Josh Triplett <josh@freedesktop.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c
@@ -4983,7 +4950,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
@@ -5173,6 +5140,16 @@ W: http://www.kernel.dk
S: Maintained
F: drivers/scsi/sr*
+SCSI RDMA PROTOCOL (SRP) INITIATOR
+M: David Dillow <dillowda@ornl.gov>
+L: linux-rdma@vger.kernel.org
+S: Supported
+W: http://www.openfabrics.org
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dad/srp-initiator.git
+F: drivers/infiniband/ulp/srp/
+F: include/scsi/srp.h
+
SCSI SG DRIVER
M: Doug Gilbert <dgilbert@interlog.com>
L: linux-scsi@vger.kernel.org
@@ -6141,13 +6118,6 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/serial/option.c
-USB OV511 DRIVER
-M: Mark McClelland <mmcclell@bigfoot.com>
-L: linux-usb@vger.kernel.org
-W: http://alpha.dyndns.org/ov511/
-S: Maintained
-F: drivers/media/video/ov511.*
-
USB PEGASUS DRIVER
M: Petko Manolov <petkan@users.sourceforge.net>
L: linux-usb@vger.kernel.org
@@ -6308,16 +6278,6 @@ S: Supported
F: drivers/usb/host/xhci*
F: drivers/usb/host/pci-quirks*
-USB ZC0301 DRIVER
-M: Luca Risolia <luca.risolia@studio.unibo.it>
-L: linux-usb@vger.kernel.org
-L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
-W: http://www.linux-projects.org
-S: Maintained
-F: Documentation/video4linux/zc0301.txt
-F: drivers/media/video/zc0301/
-
USB ZD1201 DRIVER
L: linux-wireless@vger.kernel.org
W: http://linux-lc100020.sourceforge.net
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index d04ccd73af45..28f93a6c0fde 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -55,6 +55,9 @@ config ZONE_DMA
bool
default y
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+
config NEED_DMA_MAP_STATE
def_bool y
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h
index 21ac53383b37..9f67a056b461 100644
--- a/arch/alpha/include/asm/core_mcpcia.h
+++ b/arch/alpha/include/asm/core_mcpcia.h
@@ -247,7 +247,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
-#ifdef MCPCIA_ONE_HAE_WINDOW
+#ifndef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_FROB_MMIO \
if (__mcpcia_is_mmio(hose)) { \
set_hae(hose & 0xffffffff); \
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 471c07292e0b..91b46801b290 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -1,6 +1,9 @@
#ifndef __ALPHA_T2__H__
#define __ALPHA_T2__H__
+/* Fit everything into one 128MB HAE window. */
+#define T2_ONE_HAE_WINDOW 1
+
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/compiler.h>
@@ -19,7 +22,7 @@
*
*/
-#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */
+#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
@@ -85,7 +88,9 @@
#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
+#ifndef T2_ONE_HAE_WINDOW
#define T2_HAE_ADDRESS T2_HAE_1
+#endif
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
3.8fff.ffff
@@ -429,13 +434,15 @@ extern inline void t2_outl(u32 b, unsigned long addr)
*
*/
+#ifdef T2_ONE_HAE_WINDOW
+#define t2_set_hae
+#else
#define t2_set_hae { \
- msb = addr >> 27; \
+ unsigned long msb = addr >> 27; \
addr &= T2_MEM_R1_MASK; \
set_hae(msb); \
}
-
-extern raw_spinlock_t t2_hae_lock;
+#endif
/*
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -446,28 +453,22 @@ extern raw_spinlock_t t2_hae_lock;
__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long result, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long result;
t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3);
}
__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long result, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long result;
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3);
}
@@ -478,59 +479,47 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long result, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long result;
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL;
}
__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long r0, r1, work, msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long r0, r1, work;
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5));
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0;
}
__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb, w;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long w;
t2_set_hae;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb, w;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long w;
t2_set_hae;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
/*
@@ -540,29 +529,22 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
- unsigned long msb, work;
- unsigned long flags;
- raw_spin_lock_irqsave(&t2_hae_lock, flags);
+ unsigned long work;
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32;
- raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 71a243294142..de98a732683d 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
}
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
extern pgd_t swapper_pg_dir[1024];
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index e6d90568b65d..2f770e994289 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -74,8 +74,6 @@
# define DBG(args)
#endif
-DEFINE_RAW_SPINLOCK(t2_hae_lock);
-
static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken;
@@ -406,6 +404,7 @@ void __init
t2_init_arch(void)
{
struct pci_controller *hose;
+ struct resource *hae_mem;
unsigned long temp;
unsigned int i;
@@ -433,7 +432,13 @@ t2_init_arch(void)
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
+ hae_mem = alloc_resource();
+ hae_mem->start = 0;
+ hae_mem->end = T2_MEM_R1_MASK;
+ hae_mem->name = pci_hae0_name;
+ if (request_resource(&iomem_resource, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM\n");
+ hose->mem_space = hae_mem;
hose->index = 0;
hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index 512685f78097..7fa62488bd16 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -25,6 +25,9 @@
#ifdef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache)
#endif
+#ifdef T2_ONE_HAE_WINDOW
+#define T2_HAE_ADDRESS (&alpha_mv.hae_cache)
+#endif
/* Only a few systems don't define IACK_SC, handling all interrupts through
the SRM console. But splitting out that one case from IO() below
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index d1dbd9acd1df..022c2748fa41 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -223,7 +223,7 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
*/
static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
{
- dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
+ dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
int ok = 1;
/* If this is not set, the machine doesn't support DAC at all. */
@@ -756,7 +756,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
spin_lock_irqsave(&arena->lock, flags);
for (end = sg + nents; sg < end; ++sg) {
- dma64_addr_t addr;
+ dma_addr_t addr;
size_t size;
long npages, ofs;
dma_addr_t tend;
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index baa903602f6a..e2af5eb59bb4 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -269,7 +269,8 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
size_t copied;
@@ -292,7 +293,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR:
force_successful_syscall_return();
ret = get_reg(child, addr);
- DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
+ DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret));
break;
/* When I and D space are separate, this will have to be fixed. */
@@ -302,7 +303,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_POKEUSR: /* write the specified register */
- DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data));
+ DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data));
ret = put_reg(child, addr, data);
break;
default:
diff --git a/arch/arm/configs/pcontrol_g20_defconfig b/arch/arm/configs/pcontrol_g20_defconfig
new file mode 100644
index 000000000000..b42ee62c4d77
--- /dev/null
+++ b/arch/arm/configs/pcontrol_g20_defconfig
@@ -0,0 +1,175 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_CROSS_COMPILE="/opt/arm-2010q1/bin/arm-none-linux-gnueabi-"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_ARCH_AT91=y
+CONFIG_ARCH_AT91SAM9G20=y
+CONFIG_MACH_PCONTROL_G20=y
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyS0,115200 mem=128M mtdparts=atmel_nand:128k(bootstrap)ro,256k(uboot)ro,128k(env1)ro,128k(env2)ro,2M(linux),-(root) root=/dev/mmcblk0p1 rootwait rw"
+CONFIG_VFP=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHRAM=m
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_ATMEL_TCLIB=y
+CONFIG_EEPROM_AT24=m
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_MACB=y
+CONFIG_SMSC911X=m
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=m
+CONFIG_INPUT_EVBUG=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_MATRIX=m
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
+# CONFIG_SERIO is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_MAX3100=m
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_R3964=m
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_GPIO=m
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=m
+CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
+CONFIG_W1=m
+CONFIG_W1_MASTER_GPIO=m
+CONFIG_W1_SLAVE_DS2431=m
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_AT91SAM9X_WATCHDOG=y
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_LIBUSUAL=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_G_HID=m
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_ATMELMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_AUXDISPLAY=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV=y
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+CONFIG_IIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 5aff58126602..1fc684e70ab6 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a9672e8406a3..b155414192da 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -263,17 +263,15 @@ extern struct page *empty_zero_page;
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
-#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr))
-#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr))
-#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0)
-#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1)
+#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr))
+#define pte_unmap(pte) __pte_unmap(pte)
#ifndef CONFIG_HIGHPTE
-#define __pte_map(dir,km) pmd_page_vaddr(*(dir))
-#define __pte_unmap(pte,km) do { } while (0)
+#define __pte_map(dir) pmd_page_vaddr(*(dir))
+#define __pte_unmap(pte) do { } while (0)
#else
-#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
-#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km)
+#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE)
+#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE))
#endif
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index e0cb6370ed14..3e97483abcf0 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -1075,13 +1075,15 @@ out:
}
#endif
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
case PTRACE_POKEUSR:
@@ -1089,34 +1091,34 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (void __user *)data);
+ ret = ptrace_getfpregs(child, datap);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (void __user *)data);
+ ret = ptrace_setfpregs(child, datap);
break;
#ifdef CONFIG_IWMMXT
case PTRACE_GETWMMXREGS:
- ret = ptrace_getwmmxregs(child, (void __user *)data);
+ ret = ptrace_getwmmxregs(child, datap);
break;
case PTRACE_SETWMMXREGS:
- ret = ptrace_setwmmxregs(child, (void __user *)data);
+ ret = ptrace_setwmmxregs(child, datap);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ datap);
break;
case PTRACE_SET_SYSCALL:
@@ -1126,21 +1128,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef CONFIG_CRUNCH
case PTRACE_GETCRUNCHREGS:
- ret = ptrace_getcrunchregs(child, (void __user *)data);
+ ret = ptrace_getcrunchregs(child, datap);
break;
case PTRACE_SETCRUNCHREGS:
- ret = ptrace_setcrunchregs(child, (void __user *)data);
+ ret = ptrace_setcrunchregs(child, datap);
break;
#endif
#ifdef CONFIG_VFP
case PTRACE_GETVFPREGS:
- ret = ptrace_getvfpregs(child, (void __user *)data);
+ ret = ptrace_getvfpregs(child, datap);
break;
case PTRACE_SETVFPREGS:
- ret = ptrace_setvfpregs(child, (void __user *)data);
+ ret = ptrace_setvfpregs(child, datap);
break;
#endif
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index abed4d15a7fd..c015b684b4fe 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -375,6 +375,12 @@ config MACH_STAMP9G20
evaluation board.
<http://www.taskit.de/en/>
+config MACH_PCONTROL_G20
+ bool "PControl G20 CPU module"
+ help
+ Select this if you are using taskit's Stamp9G20 CPU module on this
+ carrier board, beeing the decentralized unit of a building automation
+ system; featuring nvram, eth-switch, iso-rs485, display, io
endif
if (ARCH_AT91SAM9260 || ARCH_AT91SAM9G20)
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 412b3a471a4b..821eb842795f 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -11,12 +11,12 @@ obj-$(CONFIG_AT91_PMC_UNIT) += clock.o
# CPU-specific support
obj-$(CONFIG_ARCH_AT91RM9200) += at91rm9200.o at91rm9200_time.o at91rm9200_devices.o
-obj-$(CONFIG_ARCH_AT91SAM9260) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
-obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9260) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
@@ -65,6 +65,7 @@ obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o
obj-$(CONFIG_MACH_STAMP9G20) += board-stamp9g20.o
obj-$(CONFIG_MACH_PORTUXG20) += board-stamp9g20.o
+obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o
# AT91SAM9260/AT91SAM9G20 board-specific support
obj-$(CONFIG_MACH_SNAPPER_9260) += board-snapper9260.o
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 0894f1077be7..195208b30024 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -279,11 +279,6 @@ static struct at91_gpio_bank at91sam9260_gpio[] = {
}
};
-static void at91sam9260_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9260_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -327,7 +322,7 @@ void __init at91sam9260_initialize(unsigned long main_clock)
else
iotable_init(at91sam9260_sram_desc, ARRAY_SIZE(at91sam9260_sram_desc));
- at91_arch_reset = at91sam9260_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9260_poweroff;
at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
| (1 << AT91SAM9260_ID_IRQ2);
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 4ecf37996c77..fcad88668504 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -257,11 +257,6 @@ static struct at91_gpio_bank at91sam9261_gpio[] = {
}
};
-static void at91sam9261_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9261_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -283,7 +278,7 @@ void __init at91sam9261_initialize(unsigned long main_clock)
iotable_init(at91sam9261_sram_desc, ARRAY_SIZE(at91sam9261_sram_desc));
- at91_arch_reset = at91sam9261_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9261_poweroff;
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
| (1 << AT91SAM9261_ID_IRQ2);
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 942792d630d8..249f900954d8 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -269,11 +269,6 @@ static struct at91_gpio_bank at91sam9263_gpio[] = {
}
};
-static void at91sam9263_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9263_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -289,7 +284,7 @@ void __init at91sam9263_initialize(unsigned long main_clock)
/* Map peripherals */
iotable_init(at91sam9263_io_desc, ARRAY_SIZE(at91sam9263_io_desc));
- at91_arch_reset = at91sam9263_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9263_poweroff;
at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
diff --git a/arch/arm/mach-at91/at91sam9_alt_reset.S b/arch/arm/mach-at91/at91sam9_alt_reset.S
new file mode 100644
index 000000000000..e0256deb91fb
--- /dev/null
+++ b/arch/arm/mach-at91/at91sam9_alt_reset.S
@@ -0,0 +1,48 @@
+/*
+ * reset AT91SAM9G20 as per errata
+ *
+ * (C) BitBox Ltd 2010
+ *
+ * unless the SDRAM is cleanly shutdown before we hit the
+ * reset register it can be left driving the data bus and
+ * killing the chance of a subsequent boot from NAND
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/system.h>
+#include <mach/hardware.h>
+#include <mach/at91sam9_sdramc.h>
+#include <mach/at91_rstc.h>
+
+ .arm
+
+ .globl at91sam9_alt_reset
+
+at91sam9_alt_reset: mrc p15, 0, r0, c1, c0, 0
+ orr r0, r0, #CR_I
+ mcr p15, 0, r0, c1, c0, 0 @ enable I-cache
+
+ ldr r0, .at91_va_base_sdramc @ preload constants
+ ldr r1, .at91_va_base_rstc_cr
+
+ mov r2, #1
+ mov r3, #AT91_SDRAMC_LPCB_POWER_DOWN
+ ldr r4, =AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST
+
+ .balign 32 @ align to cache line
+
+ str r2, [r0, #AT91_SDRAMC_TR] @ disable SDRAM access
+ str r3, [r0, #AT91_SDRAMC_LPR] @ power down SDRAM
+ str r4, [r1] @ reset processor
+
+ b .
+
+.at91_va_base_sdramc:
+ .word AT91_VA_BASE_SYS + AT91_SDRAMC0
+.at91_va_base_rstc_cr:
+ .word AT91_VA_BASE_SYS + AT91_RSTC_CR
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 1276babf84d5..1e8f275c17f6 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/i2c-gpio.h>
+#include <linux/atmel-mci.h>
#include <linux/fb.h>
#include <video/atmel_lcdc.h>
@@ -25,6 +26,7 @@
#include <mach/at91sam9g45_matrix.h>
#include <mach/at91sam9_smc.h>
#include <mach/at_hdmac.h>
+#include <mach/atmel-mci.h>
#include "generic.h"
@@ -350,6 +352,169 @@ void __init at91_add_device_eth(struct at91_eth_data *data) {}
/* --------------------------------------------------------------------
+ * MMC / SD
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static u64 mmc_dmamask = DMA_BIT_MASK(32);
+static struct mci_platform_data mmc0_data, mmc1_data;
+
+static struct resource mmc0_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_MCI0,
+ .end = AT91SAM9G45_BASE_MCI0 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_MCI0,
+ .end = AT91SAM9G45_ID_MCI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_mmc0_device = {
+ .name = "atmel_mci",
+ .id = 0,
+ .dev = {
+ .dma_mask = &mmc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &mmc0_data,
+ },
+ .resource = mmc0_resources,
+ .num_resources = ARRAY_SIZE(mmc0_resources),
+};
+
+static struct resource mmc1_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_MCI1,
+ .end = AT91SAM9G45_BASE_MCI1 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_MCI1,
+ .end = AT91SAM9G45_ID_MCI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_mmc1_device = {
+ .name = "atmel_mci",
+ .id = 1,
+ .dev = {
+ .dma_mask = &mmc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &mmc1_data,
+ },
+ .resource = mmc1_resources,
+ .num_resources = ARRAY_SIZE(mmc1_resources),
+};
+
+/* Consider only one slot : slot 0 */
+void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
+{
+
+ if (!data)
+ return;
+
+ /* Must have at least one usable slot */
+ if (!data->slot[0].bus_width)
+ return;
+
+#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
+ {
+ struct at_dma_slave *atslave;
+ struct mci_dma_data *alt_atslave;
+
+ alt_atslave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
+ atslave = &alt_atslave->sdata;
+
+ /* DMA slave channel configuration */
+ atslave->dma_dev = &at_hdmac_device.dev;
+ atslave->reg_width = AT_DMA_SLAVE_WIDTH_32BIT;
+ atslave->cfg = ATC_FIFOCFG_HALFFIFO
+ | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
+ atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
+ if (mmc_id == 0) /* MCI0 */
+ atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0)
+ | ATC_DST_PER(AT_DMA_ID_MCI0);
+
+ else /* MCI1 */
+ atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI1)
+ | ATC_DST_PER(AT_DMA_ID_MCI1);
+
+ data->dma_slave = alt_atslave;
+ }
+#endif
+
+
+ /* input/irq */
+ if (data->slot[0].detect_pin) {
+ at91_set_gpio_input(data->slot[0].detect_pin, 1);
+ at91_set_deglitch(data->slot[0].detect_pin, 1);
+ }
+ if (data->slot[0].wp_pin)
+ at91_set_gpio_input(data->slot[0].wp_pin, 1);
+
+ if (mmc_id == 0) { /* MCI0 */
+
+ /* CLK */
+ at91_set_A_periph(AT91_PIN_PA0, 0);
+
+ /* CMD */
+ at91_set_A_periph(AT91_PIN_PA1, 1);
+
+ /* DAT0, maybe DAT1..DAT3 and maybe DAT4..DAT7 */
+ at91_set_A_periph(AT91_PIN_PA2, 1);
+ if (data->slot[0].bus_width == 4) {
+ at91_set_A_periph(AT91_PIN_PA3, 1);
+ at91_set_A_periph(AT91_PIN_PA4, 1);
+ at91_set_A_periph(AT91_PIN_PA5, 1);
+ if (data->slot[0].bus_width == 8) {
+ at91_set_A_periph(AT91_PIN_PA6, 1);
+ at91_set_A_periph(AT91_PIN_PA7, 1);
+ at91_set_A_periph(AT91_PIN_PA8, 1);
+ at91_set_A_periph(AT91_PIN_PA9, 1);
+ }
+ }
+
+ mmc0_data = *data;
+ at91_clock_associate("mci0_clk", &at91sam9g45_mmc0_device.dev, "mci_clk");
+ platform_device_register(&at91sam9g45_mmc0_device);
+
+ } else { /* MCI1 */
+
+ /* CLK */
+ at91_set_A_periph(AT91_PIN_PA31, 0);
+
+ /* CMD */
+ at91_set_A_periph(AT91_PIN_PA22, 1);
+
+ /* DAT0, maybe DAT1..DAT3 and maybe DAT4..DAT7 */
+ at91_set_A_periph(AT91_PIN_PA23, 1);
+ if (data->slot[0].bus_width == 4) {
+ at91_set_A_periph(AT91_PIN_PA24, 1);
+ at91_set_A_periph(AT91_PIN_PA25, 1);
+ at91_set_A_periph(AT91_PIN_PA26, 1);
+ if (data->slot[0].bus_width == 8) {
+ at91_set_A_periph(AT91_PIN_PA27, 1);
+ at91_set_A_periph(AT91_PIN_PA28, 1);
+ at91_set_A_periph(AT91_PIN_PA29, 1);
+ at91_set_A_periph(AT91_PIN_PA30, 1);
+ }
+ }
+
+ mmc1_data = *data;
+ at91_clock_associate("mci1_clk", &at91sam9g45_mmc1_device.dev, "mci_clk");
+ platform_device_register(&at91sam9g45_mmc1_device);
+
+ }
+}
+#else
+void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
* NAND / SmartMedia
* -------------------------------------------------------------------- */
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 211c5c14a1e6..6a9d24e5ed8e 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -242,11 +242,6 @@ static struct at91_gpio_bank at91sam9rl_gpio[] = {
}
};
-static void at91sam9rl_reset(void)
-{
- at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
static void at91sam9rl_poweroff(void)
{
at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
@@ -281,7 +276,7 @@ void __init at91sam9rl_initialize(unsigned long main_clock)
/* Map SRAM */
iotable_init(at91sam9rl_sram_desc, ARRAY_SIZE(at91sam9rl_sram_desc));
- at91_arch_reset = at91sam9rl_reset;
+ at91_arch_reset = at91sam9_alt_reset;
pm_power_off = at91sam9rl_poweroff;
at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
new file mode 100644
index 000000000000..bba5a560e02b
--- /dev/null
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de>
+ * taskit GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * copied and adjusted from board-stamp9g20.c
+ * by Peter Gsellmann <pgsellmann@portner-elektronik.at>
+ */
+
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/w1-gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+
+static void __init pcontrol_g20_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DGBU on ttyS0. (Rx, Tx) only TTL -> JTAG connector X7 17,19 ) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */
+ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
+ | ATMEL_UART_RTS);
+
+ /* USART1 on ttyS2. (Rx, Tx, CTS, RTS) isolated RS485 X5 */
+ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS
+ | ATMEL_UART_RTS);
+
+ /* USART2 on ttyS3. (Rx, Tx) 9bit-Bus Multidrop-mode X4 */
+ at91_register_uart(AT91SAM9260_ID_US4, 3, 0);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+
+static void __init init_irq(void)
+{
+ at91sam9260_init_interrupts(NULL);
+}
+
+
+/*
+ * NAND flash 512MiB 1,8V 8-bit, sector size 128 KiB
+ */
+static struct atmel_nand_data __initdata nand_data = {
+ .ale = 21,
+ .cle = 22,
+ .rdy_pin = AT91_PIN_PC13,
+ .enable_pin = AT91_PIN_PC14,
+};
+
+/*
+ * Bus timings; unit = 7.57ns
+ */
+static struct sam9_smc_config __initdata nand_smc_config = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 2,
+ .ncs_write_setup = 0,
+ .nwe_setup = 2,
+
+ .ncs_read_pulse = 4,
+ .nrd_pulse = 4,
+ .ncs_write_pulse = 4,
+ .nwe_pulse = 4,
+
+ .read_cycle = 7,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE
+ | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
+ .tdf_cycles = 3,
+};
+
+static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
+ .ncs_read_setup = 16,
+ .nrd_setup = 18,
+ .ncs_write_setup = 16,
+ .nwe_setup = 18,
+
+ .ncs_read_pulse = 63,
+ .nrd_pulse = 55,
+ .ncs_write_pulse = 63,
+ .nwe_pulse = 55,
+
+ .read_cycle = 127,
+ .write_cycle = 127,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE
+ | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_SELECT
+ | AT91_SMC_DBW_8 | AT91_SMC_PS_4
+ | AT91_SMC_TDFMODE,
+ .tdf_cycles = 3,
+}, {
+ .ncs_read_setup = 0,
+ .nrd_setup = 0,
+ .ncs_write_setup = 0,
+ .nwe_setup = 1,
+
+ .ncs_read_pulse = 8,
+ .nrd_pulse = 8,
+ .ncs_write_pulse = 5,
+ .nwe_pulse = 4,
+
+ .read_cycle = 8,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE
+ | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_SELECT
+ | AT91_SMC_DBW_16 | AT91_SMC_PS_8
+ | AT91_SMC_TDFMODE,
+ .tdf_cycles = 1,
+} };
+
+static void __init add_device_nand(void)
+{
+ /* configure chip-select 3 (NAND) */
+ sam9_smc_configure(3, &nand_smc_config);
+ at91_add_device_nand(&nand_data);
+}
+
+
+static void __init add_device_pcontrol(void)
+{
+ /* configure chip-select 4 (IO compatible to 8051 X4 ) */
+ sam9_smc_configure(4, &pcontrol_smc_config[0]);
+ /* configure chip-select 7 (FerroRAM 256KiBx16bit MR2A16A D4 ) */
+ sam9_smc_configure(7, &pcontrol_smc_config[1]);
+}
+
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static struct mci_platform_data __initdata mmc_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ },
+};
+#else
+static struct at91_mmc_data __initdata mmc_data = {
+ .wire4 = 1,
+};
+#endif
+
+
+/*
+ * USB Host port
+ */
+static struct at91_usbh_data __initdata usbh_data = {
+ .ports = 2,
+};
+
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata pcontrol_g20_udc_data = {
+ .vbus_pin = AT91_PIN_PA22, /* Detect +5V bus voltage */
+ .pullup_pin = AT91_PIN_PA4, /* K-state, active low */
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata macb_data = {
+ .phy_irq_pin = AT91_PIN_PA28,
+ .is_rmii = 1,
+};
+
+
+/*
+ * I2C devices: eeprom and phy/switch
+ */
+static struct i2c_board_info __initdata pcontrol_g20_i2c_devices[] = {
+{ /* D7 address width=2, 8KiB */
+ I2C_BOARD_INFO("24c64", 0x50)
+}, { /* D8 address width=1, 1 byte has 32 bits! */
+ I2C_BOARD_INFO("lan9303", 0x0a)
+}, };
+
+
+/*
+ * LEDs
+ */
+static struct gpio_led pcontrol_g20_leds[] = {
+ {
+ .name = "LED1", /* red H5 */
+ .gpio = AT91_PIN_PB18,
+ .active_low = 1,
+ .default_trigger = "none", /* supervisor */
+ }, {
+ .name = "LED2", /* yellow H7 */
+ .gpio = AT91_PIN_PB19,
+ .active_low = 1,
+ .default_trigger = "mmc0", /* SD-card activity */
+ }, {
+ .name = "LED3", /* green H2 */
+ .gpio = AT91_PIN_PB20,
+ .active_low = 1,
+ .default_trigger = "heartbeat", /* blinky */
+ }, {
+ .name = "LED4", /* red H3 */
+ .gpio = AT91_PIN_PC6,
+ .active_low = 1,
+ .default_trigger = "none", /* connection lost */
+ }, {
+ .name = "LED5", /* yellow H6 */
+ .gpio = AT91_PIN_PC7,
+ .active_low = 1,
+ .default_trigger = "none", /* unsent data */
+ }, {
+ .name = "LED6", /* green H1 */
+ .gpio = AT91_PIN_PC9,
+ .active_low = 1,
+ .default_trigger = "none", /* snafu */
+ }
+};
+
+
+/*
+ * SPI devices
+ */
+static struct spi_board_info pcontrol_g20_spi_devices[] = {
+ {
+ .modalias = "spidev", /* HMI port X4 */
+ .chip_select = 1,
+ .max_speed_hz = 50 * 1000 * 1000,
+ .bus_num = 0,
+ }, {
+ .modalias = "spidev", /* piggyback A2 */
+ .chip_select = 0,
+ .max_speed_hz = 50 * 1000 * 1000,
+ .bus_num = 1,
+ },
+};
+
+
+/*
+ * Dallas 1-Wire DS2431
+ */
+static struct w1_gpio_platform_data w1_gpio_pdata = {
+ .pin = AT91_PIN_PA29,
+ .is_open_drain = 1,
+};
+
+static struct platform_device w1_device = {
+ .name = "w1-gpio",
+ .id = -1,
+ .dev.platform_data = &w1_gpio_pdata,
+};
+
+static void add_wire1(void)
+{
+ at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
+ at91_set_multi_drive(w1_gpio_pdata.pin, 1);
+ platform_device_register(&w1_device);
+}
+
+
+static void __init pcontrol_g20_board_init(void)
+{
+ at91_add_device_serial();
+ add_device_nand();
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+ at91_add_device_mci(0, &mmc_data);
+#else
+ at91_add_device_mmc(0, &mmc_data);
+#endif
+ at91_add_device_usbh(&usbh_data);
+ at91_add_device_eth(&macb_data);
+ at91_add_device_i2c(pcontrol_g20_i2c_devices,
+ ARRAY_SIZE(pcontrol_g20_i2c_devices));
+ add_wire1();
+ add_device_pcontrol();
+ at91_add_device_spi(pcontrol_g20_spi_devices,
+ ARRAY_SIZE(pcontrol_g20_spi_devices));
+ at91_add_device_udc(&pcontrol_g20_udc_data);
+ at91_gpio_leds(pcontrol_g20_leds,
+ ARRAY_SIZE(pcontrol_g20_leds));
+ /* piggyback A2 */
+ at91_set_gpio_output(AT91_PIN_PB31, 1);
+}
+
+
+MACHINE_START(PCONTROL_G20, "PControl G20")
+ /* Maintainer: pgsellmann@portner-elektronik.at */
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = pcontrol_g20_map_io,
+ .init_irq = init_irq,
+ .init_machine = pcontrol_g20_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 7913984f6de9..86ff4b52db32 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -24,7 +24,9 @@
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/clk.h>
+#include <linux/atmel-mci.h>
+#include <mach/hardware.h>
#include <video/atmel_lcdc.h>
#include <asm/setup.h>
@@ -98,6 +100,25 @@ static struct spi_board_info ek_spi_devices[] = {
/*
+ * MCI (SD/MMC)
+ */
+static struct mci_platform_data __initdata mci0_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ .detect_pin = AT91_PIN_PD10,
+ },
+};
+
+static struct mci_platform_data __initdata mci1_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ .detect_pin = AT91_PIN_PD11,
+ .wp_pin = AT91_PIN_PD29,
+ },
+};
+
+
+/*
* MACB Ethernet device
*/
static struct at91_eth_data __initdata ek_macb_data = {
@@ -380,6 +401,9 @@ static void __init ek_board_init(void)
at91_add_device_usba(&ek_usba_udc_data);
/* SPI */
at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices));
+ /* MMC */
+ at91_add_device_mci(0, &mci0_data);
+ at91_add_device_mci(1, &mci1_data);
/* Ethernet */
at91_add_device_eth(&ek_macb_data);
/* NAND */
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 65c3dc5ba0d0..0c66deb2db39 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -46,6 +46,9 @@ extern void __init at91_clock_associate(const char *id, struct device *dev, cons
extern void at91_irq_suspend(void);
extern void at91_irq_resume(void);
+/* reset */
+extern void at91sam9_alt_reset(void);
+
/* GPIO */
#define AT91RM9200_PQFP 3 /* AT91RM9200 PQFP package has 3 banks */
#define AT91RM9200_BGA 4 /* AT91RM9200 BGA package has 4 banks */
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 615668986480..dafbacc25eb1 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -258,16 +258,23 @@ static int at91_pm_enter(suspend_state_t state)
* NOTE: the Wait-for-Interrupt instruction needs to be
* in icache so no SDRAM accesses are needed until the
* wakeup IRQ occurs and self-refresh is terminated.
+ * For ARM 926 based chips, this requirement is weaker
+ * as at91sam9 can access a RAM in self-refresh mode.
*/
- asm("b 1f; .align 5; 1:");
- asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */
+ asm volatile ( "mov r0, #0\n\t"
+ "b 1f\n\t"
+ ".align 5\n\t"
+ "1: mcr p15, 0, r0, c7, c10, 4\n\t"
+ : /* no output */
+ : /* no input */
+ : "r0");
saved_lpr = sdram_selfrefresh_enable();
- asm("mcr p15, 0, r0, c7, c0, 4"); /* wait for interrupt */
+ wait_for_interrupt_enable();
sdram_selfrefresh_disable(saved_lpr);
break;
case PM_SUSPEND_ON:
- asm("mcr p15, 0, r0, c7, c0, 4"); /* wait for interrupt */
+ cpu_do_idle();
break;
default:
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 8c87d0c1b8f8..ce9a20699111 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -21,6 +21,8 @@ static inline u32 sdram_selfrefresh_enable(void)
}
#define sdram_selfrefresh_disable(saved_lpr) at91_sys_write(AT91_SDRAMC_LPR, saved_lpr)
+#define wait_for_interrupt_enable() asm volatile ("mcr p15, 0, %0, c7, c0, 4" \
+ : : "r" (0))
#elif defined(CONFIG_ARCH_AT91CAP9)
#include <mach/at91cap9_ddrsdr.h>
@@ -38,6 +40,7 @@ static inline u32 sdram_selfrefresh_enable(void)
}
#define sdram_selfrefresh_disable(saved_lpr) at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr)
+#define wait_for_interrupt_enable() cpu_do_idle()
#elif defined(CONFIG_ARCH_AT91SAM9G45)
#include <mach/at91sam9_ddrsdr.h>
@@ -74,6 +77,7 @@ static inline u32 sdram_selfrefresh_enable(void)
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); \
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); \
} while (0)
+#define wait_for_interrupt_enable() cpu_do_idle()
#else
#include <mach/at91sam9_sdramc.h>
@@ -98,5 +102,6 @@ static inline u32 sdram_selfrefresh_enable(void)
}
#define sdram_selfrefresh_disable(saved_lpr) at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr)
+#define wait_for_interrupt_enable() cpu_do_idle()
#endif
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index b6b00a1f6125..f7922a436172 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -124,6 +124,7 @@ ENTRY(at91_slow_clock)
ldr r5, .at91_va_base_ramc1
/* Drain write buffer */
+ mov r0, #0
mcr p15, 0, r0, c7, c10, 4
#ifdef CONFIG_ARCH_AT91RM9200
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 4566bd1c8660..ef06c66a6f16 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -358,8 +358,7 @@ static int calc_clk_div(struct clk *clk, unsigned long rate,
int i, found = 0, __div = 0, __pdiv = 0;
/* Don't exceed the maximum rate */
- max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
- clk_xtali.rate / 4);
+ max_rate = max3(clk_pll1.rate / 4, clk_pll2.rate / 4, clk_xtali.rate / 4);
rate = min(rate, max_rate);
/*
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h
index 287431cc13e5..ac6fd713828a 100644
--- a/arch/arm/mach-imx/include/mach/dma-v1.h
+++ b/arch/arm/mach-imx/include/mach/dma-v1.h
@@ -27,6 +27,8 @@
#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
+#include <mach/dma.h>
+
#define IMX_DMA_CHANNELS 16
#define DMA_MODE_READ 0
@@ -96,12 +98,6 @@ int imx_dma_request(int channel, const char *name);
void imx_dma_free(int channel);
-enum imx_dma_prio {
- DMA_PRIO_HIGH = 0,
- DMA_PRIO_MEDIUM = 1,
- DMA_PRIO_LOW = 2
-};
-
int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
#endif /* __MACH_DMA_V1_H__ */
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index 86e86c1300d5..2ff3f661a48e 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -311,7 +311,6 @@ static struct soc_camera_link iclink_mt9v022 = {
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm037_i2c_camera[1],
.i2c_adapter_id = 2,
- .module_name = "mt9v022",
};
static struct soc_camera_link iclink_mt9t031 = {
@@ -319,7 +318,6 @@ static struct soc_camera_link iclink_mt9t031 = {
.power = pcm037_camera_power,
.board_info = &pcm037_i2c_camera[0],
.i2c_adapter_id = 2,
- .module_name = "mt9t031",
};
static struct i2c_board_info pcm037_i2c_devices[] = {
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index 0551eb39d97e..18069cb7d068 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -179,7 +179,6 @@ static struct soc_camera_link base_iclink = {
.reset = marxbot_basecam_reset,
.board_info = &marxbot_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9t031",
};
static struct platform_device marxbot_camera[] = {
diff --git a/arch/arm/mach-mx3/mx31moboard-smartbot.c b/arch/arm/mach-mx3/mx31moboard-smartbot.c
index 417757e78c65..04760a53005a 100644
--- a/arch/arm/mach-mx3/mx31moboard-smartbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-smartbot.c
@@ -88,7 +88,6 @@ static struct soc_camera_link base_iclink = {
.reset = smartbot_cam_reset,
.board_info = &smartbot_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9t031",
};
static struct platform_device smartbot_camera[] = {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 41285297eafc..3fec4d62a91a 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -107,6 +107,10 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
},
};
+static struct platform_device rx51_charger_device = {
+ .name = "isp1704_charger",
+};
+
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#define RX51_GPIO_CAMERA_LENS_COVER 110
@@ -919,5 +923,6 @@ void __init rx51_peripherals_init(void)
spi_register_board_info(rx51_peripherals_spi_board_info,
ARRAY_SIZE(rx51_peripherals_spi_board_info));
omap2_hsmmc_init(mmc);
+ platform_device_register(&rx51_charger_device);
}
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index ab48bb81b570..ed0dbfdb22ed 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1015,7 +1015,6 @@ static struct soc_camera_link iclink = {
.power = em_x270_sensor_power,
.board_info = &em_x270_i2c_cam_info[0],
.i2c_adapter_id = 0,
- .module_name = "mt9m111",
};
static struct platform_device em_x270_camera = {
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 80a9352d43f3..142c711f4cda 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -755,7 +755,6 @@ static struct soc_camera_link a780_iclink = {
.flags = SOCAM_SENSOR_INVERT_PCLK,
.i2c_adapter_id = 0,
.board_info = &a780_camera_i2c_board_info,
- .module_name = "mt9m111",
.power = a780_camera_power,
.reset = a780_camera_reset,
};
@@ -1024,7 +1023,6 @@ static struct soc_camera_link a910_iclink = {
.bus_id = 0,
.i2c_adapter_id = 0,
.board_info = &a910_camera_i2c_board_info,
- .module_name = "mt9m111",
.power = a910_camera_power,
.reset = a910_camera_reset,
};
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 0c31fabfc7fd..f5fb915e1315 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -711,7 +711,6 @@ static struct soc_camera_link iclink = {
.bus_id = 0, /* Match id in pxa27x_device_camera in device.c */
.board_info = &mioa701_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9m111",
};
struct i2c_pxa_platform_data i2c_pdata = {
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index f56ae1008759..f33647a8e0b7 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -453,7 +453,6 @@ static struct soc_camera_link iclink[] = {
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
- .module_name = "mt9v022",
}, {
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm990_camera_i2c[1],
@@ -461,7 +460,6 @@ static struct soc_camera_link iclink[] = {
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
- .module_name = "mt9m001",
},
};
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 2f420210d406..9057d6fd1d31 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -28,7 +28,6 @@
#include <linux/cnt32_to_63.h>
#include <asm/mach/time.h>
-#include <asm/mach/time.h>
#include <asm/localtimer.h>
#include <mach/iomap.h>
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index cbbe69a76a7c..4a94be3304b9 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -208,35 +208,25 @@ static struct resource dma40_resources[] = {
/* Default configuration for physcial memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
- .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE |
- STEDMA40_LOW_PRIORITY_CHANNEL |
- STEDMA40_PCHAN_BASIC_MODE),
+ .mode = STEDMA40_MODE_PHYSICAL,
.dir = STEDMA40_MEM_TO_MEM,
- .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_PHY_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_PHY_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
};
/* Default configuration for logical memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_log = {
- .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE |
- STEDMA40_LOW_PRIORITY_CHANNEL |
- STEDMA40_LCHAN_SRC_LOG_DST_LOG |
- STEDMA40_NO_TIM_FOR_LINK),
.dir = STEDMA40_MEM_TO_MEM,
- .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_LOG_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_LOG_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
@@ -269,7 +259,6 @@ static struct stedma40_platform_data dma40_plat_data = {
.memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
.memcpy_conf_phy = &dma40_memcpy_conf_phy,
.memcpy_conf_log = &dma40_memcpy_conf_log,
- .llis_per_log = 8,
.disabled_channels = {-1},
};
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 8440d952ba6d..c493d7244d3d 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -89,13 +89,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
* open-code the spin-locking.
*/
ptl = pte_lockptr(vma->vm_mm, pmd);
- pte = pte_offset_map_nested(pmd, address);
+ pte = pte_offset_map(pmd, address);
spin_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
spin_unlock(ptl);
- pte_unmap_nested(pte);
+ pte_unmap(pte);
return ret;
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 1fbdb55bfd1b..c435fd9e1da9 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
unsigned int idx;
unsigned long vaddr;
void *kmap;
+ int type;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
-
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (kmap)
return kmap;
+ type = kmap_atomic_idx_push();
+
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
+ int idx, type;
if (kvaddr >= (void *)FIXADDR_START) {
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -97,21 +101,23 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
#else
(void) idx; /* to kill a warning */
#endif
+ kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- unsigned int idx;
unsigned long vaddr;
+ int idx, type;
pagefault_disable();
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index be5f58e153bf..69bbfc6645a6 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
goto no_pte;
init_pmd = pmd_offset(init_pgd, 0);
- init_pte = pte_offset_map_nested(init_pmd, 0);
+ init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte, *init_pte, 0);
- pte_unmap_nested(init_pte);
+ pte_unmap(init_pte);
pte_unmap(new_pte);
}
diff --git a/arch/arm/plat-mxc/include/mach/dma.h b/arch/arm/plat-mxc/include/mach/dma.h
new file mode 100644
index 000000000000..ef7751546f5f
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/dma.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_MXC_DMA_H__
+#define __ASM_ARCH_MXC_DMA_H__
+
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/*
+ * This enumerates peripheral types. Used for SDMA.
+ */
+enum sdma_peripheral_type {
+ IMX_DMATYPE_SSI, /* MCU domain SSI */
+ IMX_DMATYPE_SSI_SP, /* Shared SSI */
+ IMX_DMATYPE_MMC, /* MMC */
+ IMX_DMATYPE_SDHC, /* SDHC */
+ IMX_DMATYPE_UART, /* MCU domain UART */
+ IMX_DMATYPE_UART_SP, /* Shared UART */
+ IMX_DMATYPE_FIRI, /* FIRI */
+ IMX_DMATYPE_CSPI, /* MCU domain CSPI */
+ IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
+ IMX_DMATYPE_SIM, /* SIM */
+ IMX_DMATYPE_ATA, /* ATA */
+ IMX_DMATYPE_CCM, /* CCM */
+ IMX_DMATYPE_EXT, /* External peripheral */
+ IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
+ IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
+ IMX_DMATYPE_DSP, /* DSP */
+ IMX_DMATYPE_MEMORY, /* Memory */
+ IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
+ IMX_DMATYPE_SPDIF, /* SPDIF */
+ IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
+ IMX_DMATYPE_ASRC, /* ASRC */
+ IMX_DMATYPE_ESAI, /* ESAI */
+};
+
+enum imx_dma_prio {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MEDIUM = 1,
+ DMA_PRIO_LOW = 2
+};
+
+struct imx_dma_data {
+ int dma_request; /* DMA request line */
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
+};
+
+static inline int imx_dma_is_ipu(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ipu-core");
+}
+
+static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "imx-sdma") ||
+ !strcmp(dev_name(chan->device->dev), "imx-dma");
+}
+
+#endif
diff --git a/arch/arm/plat-mxc/include/mach/sdma.h b/arch/arm/plat-mxc/include/mach/sdma.h
new file mode 100644
index 000000000000..9be112227ac4
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/sdma.h
@@ -0,0 +1,17 @@
+#ifndef __MACH_MXC_SDMA_H__
+#define __MACH_MXC_SDMA_H__
+
+/**
+ * struct sdma_platform_data - platform specific data for SDMA engine
+ *
+ * @sdma_version The version of this SDMA engine
+ * @cpu_name used to generate the firmware name
+ * @to_version CPU Tape out version
+ */
+struct sdma_platform_data {
+ int sdma_version;
+ char *cpu_name;
+ int to_version;
+};
+
+#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 5fbde4b8dc12..74b62f10d07f 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -1,10 +1,8 @@
/*
- * arch/arm/plat-nomadik/include/plat/ste_dma40.h
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
@@ -14,43 +12,25 @@
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
/* dev types for memcpy */
#define STEDMA40_DEV_DST_MEMORY (-1)
#define STEDMA40_DEV_SRC_MEMORY (-1)
-/*
- * Description of bitfields of channel_type variable is available in
- * the info structure.
- */
+enum stedma40_mode {
+ STEDMA40_MODE_LOGICAL = 0,
+ STEDMA40_MODE_PHYSICAL,
+ STEDMA40_MODE_OPERATION,
+};
-/* Priority */
-#define STEDMA40_INFO_PRIO_TYPE_POS 2
-#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
-#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
-
-/* Mode */
-#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
-#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-
-/* Mode options */
-#define STEDMA40_INFO_CH_MODE_OPT_POS 8
-#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-
-/* Interrupt */
-#define STEDMA40_INFO_TIM_POS 10
-#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
-#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
-
-/* End of channel_type configuration */
+enum stedma40_mode_opt {
+ STEDMA40_PCHAN_BASIC_MODE = 0,
+ STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
+ STEDMA40_PCHAN_MODULO_MODE,
+ STEDMA40_PCHAN_DOUBLE_DST_MODE,
+ STEDMA40_LCHAN_SRC_PHY_DST_LOG,
+ STEDMA40_LCHAN_SRC_LOG_DST_PHY,
+};
#define STEDMA40_ESIZE_8_BIT 0x0
#define STEDMA40_ESIZE_16_BIT 0x1
@@ -73,16 +53,14 @@
#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
+/* Maximum number of possible physical channels */
+#define STEDMA40_MAX_PHYS 32
+
enum stedma40_flow_ctrl {
STEDMA40_NO_FLOW_CTRL,
STEDMA40_FLOW_CTRL,
};
-enum stedma40_endianess {
- STEDMA40_LITTLE_ENDIAN,
- STEDMA40_BIG_ENDIAN
-};
-
enum stedma40_periph_data_width {
STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
@@ -90,15 +68,8 @@ enum stedma40_periph_data_width {
STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
};
-struct stedma40_half_channel_info {
- enum stedma40_endianess endianess;
- enum stedma40_periph_data_width data_width;
- int psize;
- enum stedma40_flow_ctrl flow_ctrl;
-};
-
enum stedma40_xfer_dir {
- STEDMA40_MEM_TO_MEM,
+ STEDMA40_MEM_TO_MEM = 1,
STEDMA40_MEM_TO_PERIPH,
STEDMA40_PERIPH_TO_MEM,
STEDMA40_PERIPH_TO_PERIPH
@@ -106,18 +77,31 @@ enum stedma40_xfer_dir {
/**
+ * struct stedma40_chan_cfg - dst/src channel configuration
+ *
+ * @big_endian: true if the src/dst should be read as big endian
+ * @data_width: Data width of the src/dst hardware
+ * @p_size: Burst size
+ * @flow_ctrl: Flow control on/off.
+ */
+struct stedma40_half_channel_info {
+ bool big_endian;
+ enum stedma40_periph_data_width data_width;
+ int psize;
+ enum stedma40_flow_ctrl flow_ctrl;
+};
+
+/**
* struct stedma40_chan_cfg - Structure to be filled by client drivers.
*
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
- * @channel_type: priority, mode, mode options and interrupt configuration.
+ * @high_priority: true if high-priority
+ * @mode: channel mode: physical, logical, or operation
+ * @mode_opt: options for the chosen channel mode
* @src_dev_type: Src device type
* @dst_dev_type: Dst device type
* @src_info: Parameters for dst half channel
* @dst_info: Parameters for dst half channel
- * @pre_transfer_data: Data to be passed on to the pre_transfer() function.
- * @pre_transfer: Callback used if needed before preparation of transfer.
- * Only called if device is set. size of bytes to transfer
- * (in case of multiple element transfer size is size of the first element).
*
*
* This structure has to be filled by the client drivers.
@@ -126,15 +110,13 @@ enum stedma40_xfer_dir {
*/
struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir;
- unsigned int channel_type;
+ bool high_priority;
+ enum stedma40_mode mode;
+ enum stedma40_mode_opt mode_opt;
int src_dev_type;
int dst_dev_type;
struct stedma40_half_channel_info src_info;
struct stedma40_half_channel_info dst_info;
- void *pre_transfer_data;
- int (*pre_transfer) (struct dma_chan *chan,
- void *data,
- int size);
};
/**
@@ -147,7 +129,6 @@ struct stedma40_chan_cfg {
* @memcpy_len: length of memcpy
* @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy
- * @llis_per_log: number of max linked list items per logical channel
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
*/
@@ -159,23 +140,10 @@ struct stedma40_platform_data {
u32 memcpy_len;
struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log;
- unsigned int llis_per_log;
- int disabled_channels[8];
+ int disabled_channels[STEDMA40_MAX_PHYS];
};
-/**
- * setdma40_set_psize() - Used for changing the package size of an
- * already configured dma channel.
- *
- * @chan: dmaengine handle
- * @src_psize: new package side for src. (STEDMA40_PSIZE*)
- * @src_psize: new package side for dst. (STEDMA40_PSIZE*)
- *
- * returns 0 on ok, otherwise negative error number.
- */
-int stedma40_set_psize(struct dma_chan *chan,
- int src_psize,
- int dst_psize);
+#ifdef CONFIG_STE_DMA40
/**
* stedma40_filter() - Provides stedma40_chan_cfg to the
@@ -238,4 +206,21 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
direction, flags);
}
+#else
+static inline bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ return false;
+}
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+ dma_addr_t addr,
+ unsigned int size,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index bb78c1532fae..c9e5d7298c40 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -96,7 +96,7 @@ static int fbmem_region_reserved(unsigned long start, size_t size)
* Get the region_idx`th region from board config/ATAG and convert it to
* our internal format.
*/
-static int get_fbmem_region(int region_idx, struct omapfb_mem_region *rg)
+static int __init get_fbmem_region(int region_idx, struct omapfb_mem_region *rg)
{
const struct omap_fbmem_config *conf;
u32 paddr;
@@ -128,7 +128,7 @@ static int set_fbmem_region_type(struct omapfb_mem_region *rg, int mem_type,
* type = 0 && paddr = 0, a default don't care case maps to
* the SDRAM type.
*/
- if (rg->type || (!rg->type && !rg->paddr))
+ if (rg->type || !rg->paddr)
return 0;
if (ranges_overlap(rg->paddr, rg->size, mem_start, mem_size)) {
rg->type = mem_type;
@@ -260,7 +260,7 @@ void __init omapfb_reserve_sdram_memblock(void)
* this point, since the driver built as a module would have problem with
* freeing / reallocating the regions.
*/
-unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
unsigned long sram_vstart,
unsigned long sram_size,
unsigned long pstart_avail,
@@ -334,7 +334,7 @@ void omapfb_set_ctrl_platform_data(void *data)
omapfb_config.ctrl_platform_data = data;
}
-static inline int omap_init_fb(void)
+static int __init omap_init_fb(void)
{
const struct omap_lcd_config *conf;
@@ -379,7 +379,7 @@ void omapfb_set_platform_data(struct omapfb_platform_data *data)
omapfb_config = *data;
}
-static inline int omap_init_fb(void)
+static int __init omap_init_fb(void)
{
return platform_device_register(&omap_fb_device);
}
@@ -390,7 +390,7 @@ void omapfb_reserve_sdram_memblock(void)
{
}
-unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
unsigned long sram_vstart,
unsigned long sram_size,
unsigned long start_avail,
@@ -409,7 +409,7 @@ void omapfb_reserve_sdram_memblock(void)
{
}
-unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+unsigned long __init omapfb_reserve_sram(unsigned long sram_pstart,
unsigned long sram_vstart,
unsigned long sram_size,
unsigned long start_avail,
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
index 8bd15bdb4132..c915a661f1f5 100644
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -81,37 +81,6 @@ enum omap_color_mode {
OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
-
- OMAP_DSS_COLOR_GFX_OMAP2 =
- OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
- OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P,
-
- OMAP_DSS_COLOR_VID_OMAP2 =
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
- OMAP_DSS_COLOR_UYVY,
-
- OMAP_DSS_COLOR_GFX_OMAP3 =
- OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
- OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
-
- OMAP_DSS_COLOR_VID1_OMAP3 =
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
-
- OMAP_DSS_COLOR_VID2_OMAP3 =
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
- OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
};
enum omap_lcd_display_type {
diff --git a/arch/arm/plat-omap/include/plat/vrfb.h b/arch/arm/plat-omap/include/plat/vrfb.h
index d8a03ced3b10..3792bdea2f6d 100644
--- a/arch/arm/plat-omap/include/plat/vrfb.h
+++ b/arch/arm/plat-omap/include/plat/vrfb.h
@@ -35,6 +35,7 @@ struct vrfb {
bool yuv_mode;
};
+#ifdef CONFIG_OMAP2_VRFB
extern int omap_vrfb_request_ctx(struct vrfb *vrfb);
extern void omap_vrfb_release_ctx(struct vrfb *vrfb);
extern void omap_vrfb_adjust_size(u16 *width, u16 *height,
@@ -47,4 +48,19 @@ extern void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
extern int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot);
extern void omap_vrfb_restore_context(void);
+#else
+static inline int omap_vrfb_request_ctx(struct vrfb *vrfb) { return 0; }
+static inline void omap_vrfb_release_ctx(struct vrfb *vrfb) {}
+static inline void omap_vrfb_adjust_size(u16 *width, u16 *height,
+ u8 bytespp) {}
+static inline u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp)
+ { return 0; }
+static inline u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp)
+ { return 0; }
+static inline void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
+ u16 width, u16 height, unsigned bytespp, bool yuv_mode) {}
+static inline int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot)
+ { return 0; }
+static inline void omap_vrfb_restore_context(void) {}
+#endif
#endif /* __VRFB_H */
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h
new file mode 100644
index 000000000000..e49c5b6fc4e2
--- /dev/null
+++ b/arch/arm/plat-pxa/include/plat/sdhci.h
@@ -0,0 +1,32 @@
+/* linux/arch/arm/plat-pxa/include/plat/sdhci.h
+ *
+ * Copyright 2010 Marvell
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ *
+ * PXA Platform - SDHCI platform data definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PLAT_PXA_SDHCI_H
+#define __PLAT_PXA_SDHCI_H
+
+/* pxa specific flag */
+/* Require clock free running */
+#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
+
+/*
+ * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
+ * @max_speed: the maximum speed supported
+ * @quirks: quirks of specific device
+ * @flags: flags for platform requirement
+ */
+struct sdhci_pxa_platdata {
+ unsigned int max_speed;
+ unsigned int quirks;
+ unsigned int flags;
+};
+
+#endif /* __PLAT_PXA_SDHCI_H */
diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h
index a9ae30c41e74..6fbfea61f7bb 100644
--- a/arch/avr32/include/asm/pgtable.h
+++ b/arch/avr32/include/asm/pgtable.h
@@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 5e73c25f8f85..4aedcab7cd4b 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -146,9 +146,11 @@ static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
return ret;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *datap = (void __user *) data;
switch (request) {
/* Read the word at location addr in the child process */
@@ -158,8 +160,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
/* Write the word in data at location addr */
@@ -173,11 +174,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (const void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
default:
diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h
index a6886f6e4819..4104d5783e2c 100644
--- a/arch/blackfin/include/asm/entry.h
+++ b/arch/blackfin/include/asm/entry.h
@@ -15,14 +15,6 @@
#define LFLUSH_I_AND_D 0x00000808
#define LSIGTRAP 5
-/* process bits for task_struct.flags */
-#define PF_TRACESYS_OFF 3
-#define PF_TRACESYS_BIT 5
-#define PF_PTRACED_OFF 3
-#define PF_PTRACED_BIT 4
-#define PF_DTRACE_OFF 1
-#define PF_DTRACE_BIT 5
-
/*
* NOTE! The single-stepping code assumes that all interrupt handlers
* start by saving SYSCFG on the stack with their first instruction.
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index b35839354130..75089f80855d 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -38,12 +38,13 @@
* Get contents of register REGNO in task TASK.
*/
static inline long
-get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
+get_reg(struct task_struct *task, unsigned long regno,
+ unsigned long __user *datap)
{
long tmp;
struct pt_regs *regs = task_pt_regs(task);
- if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
return -EIO;
switch (regno) {
@@ -74,11 +75,11 @@ get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
* Write contents of register REGNO in task TASK.
*/
static inline int
-put_reg(struct task_struct *task, long regno, unsigned long data)
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
{
struct pt_regs *regs = task_pt_regs(task);
- if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
return -EIO;
switch (regno) {
@@ -240,7 +241,8 @@ void user_disable_single_step(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
@@ -368,14 +370,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_bfin_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
pr_debug("ptrace: PTRACE_SETREGS\n");
return copy_regset_from_user(child, &user_bfin_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
case_default:
default:
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index e70c804e9377..320065f3cbe5 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -76,9 +76,11 @@ ptrace_disable(struct task_struct *child)
* (in user space) where the result of the ptrace call is written (instead of
* being returned).
*/
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
@@ -93,10 +95,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
ret = put_user(tmp, datap);
break;
}
@@ -110,19 +112,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address in the USER area. */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- addr >>= 2;
-
- if (addr == PT_DCCR) {
+ if (regno == PT_DCCR) {
/* don't allow the tracing process to change stuff like
* interrupt enable, kernel/user bit, dma enables etc.
*/
data &= DCCR_MASK;
data |= get_reg(child, PT_DCCR) & ~DCCR_MASK;
}
- if (put_reg(child, addr, data))
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
@@ -141,7 +141,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- data += sizeof(long);
+ datap++;
}
break;
@@ -165,7 +165,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
break;
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f4ebd1e7d0f5..511ece94a574 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -126,9 +126,11 @@ ptrace_disable(struct task_struct *child)
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
@@ -163,10 +165,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
ret = put_user(tmp, datap);
break;
}
@@ -180,19 +182,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address in the USER area. */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- addr >>= 2;
-
- if (addr == PT_CCS) {
+ if (regno == PT_CCS) {
/* don't allow the tracing process to change stuff like
* interrupt enable, kernel/user bit, dma enables etc.
*/
data &= CCS_MASK;
data |= get_reg(child, PT_CCS) & ~CCS_MASK;
}
- if (put_reg(child, addr, data))
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index f63d6fccbc6c..9eaae217b21b 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -248,10 +248,8 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
index cb4c317eaecc..a8d6565d415d 100644
--- a/arch/frv/include/asm/highmem.h
+++ b/arch/frv/include/asm/highmem.h
@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
(void *) damlr; \
})
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
{
unsigned long paddr;
pagefault_disable();
- debug_kmap_atomic(type);
paddr = page_to_phys(page);
switch (type) {
@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
case 1: return __kmap_atomic_primary(1, paddr, 3);
case 2: return __kmap_atomic_primary(2, paddr, 4);
case 3: return __kmap_atomic_primary(3, paddr, 5);
- case 4: return __kmap_atomic_primary(4, paddr, 6);
- case 5: return __kmap_atomic_primary(5, paddr, 7);
- case 6: return __kmap_atomic_primary(6, paddr, 8);
- case 7: return __kmap_atomic_primary(7, paddr, 9);
- case 8: return __kmap_atomic_primary(8, paddr, 10);
-
- case 9 ... 9 + NR_TLB_LINES - 1:
- return __kmap_atomic_secondary(type - 9, paddr);
default:
BUG();
@@ -152,22 +143,13 @@ do { \
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0)
-static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
{
switch (type) {
case 0: __kunmap_atomic_primary(0, 2); break;
case 1: __kunmap_atomic_primary(1, 3); break;
case 2: __kunmap_atomic_primary(2, 4); break;
case 3: __kunmap_atomic_primary(3, 5); break;
- case 4: __kunmap_atomic_primary(4, 6); break;
- case 5: __kunmap_atomic_primary(5, 7); break;
- case 6: __kunmap_atomic_primary(6, 8); break;
- case 7: __kunmap_atomic_primary(7, 9); break;
- case 8: __kunmap_atomic_primary(8, 10); break;
-
- case 9 ... 9 + NR_TLB_LINES - 1:
- __kunmap_atomic_secondary(type - 9, kvaddr);
- break;
default:
BUG();
@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
pagefault_enable();
}
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index c18b0d32e636..6bc241e4b4f8 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -451,17 +451,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte)
#else
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#endif
/*
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index fac028936a04..9d68f7fac730 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -254,23 +254,26 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
tmp = 0;
ret = -EIO;
- if ((addr & 3) || addr < 0)
+ if (addr & 3)
break;
ret = 0;
- switch (addr >> 2) {
+ switch (regno) {
case 0 ... PT__END - 1:
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
break;
case PT__END + 0:
@@ -299,23 +302,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
if (ret == 0)
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & 3) || addr < 0)
+ if (addr & 3)
break;
- ret = 0;
- switch (addr >> 2) {
+ switch (regno) {
case 0 ... PT__END - 1:
- ret = put_reg(child, addr >> 2, data);
- break;
-
- default:
- ret = -EIO;
+ ret = put_reg(child, regno, data);
break;
}
break;
@@ -324,25 +322,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_frv_native_view,
REGSET_GENERAL,
0, sizeof(child->thread.user->i),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS: /* Set all integer regs in the child. */
return copy_regset_from_user(child, &user_frv_native_view,
REGSET_GENERAL,
0, sizeof(child->thread.user->i),
- (const void __user *)data);
+ datap);
case PTRACE_GETFPREGS: /* Get the child FP/Media state. */
return copy_regset_to_user(child, &user_frv_native_view,
REGSET_FPMEDIA,
0, sizeof(child->thread.user->f),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS: /* Set the child FP/Media state. */
return copy_regset_from_user(child, &user_frv_native_view,
REGSET_FPMEDIA,
0, sizeof(child->thread.user->f),
- (const void __user *)data);
+ datap);
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 85d110b71cf7..41098a3803a2 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dampr2 = __get_DAMPR(2);
for (i = 0; i < nents; i++) {
- vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE);
+ vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE);
}
- kunmap_atomic(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2);
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c
index 0261cbe153b5..b24ade27a0f0 100644
--- a/arch/frv/mm/cache-page.c
+++ b/arch/frv/mm/cache-page.c
@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
dampr2 = __get_DAMPR(2);
- vaddr = kmap_atomic(page, __KM_CACHE);
+ vaddr = kmap_atomic_primary(page, __KM_CACHE);
frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
- kunmap_atomic(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
dampr2 = __get_DAMPR(2);
- vaddr = kmap_atomic(page, __KM_CACHE);
+ vaddr = kmap_atomic_primary(page, __KM_CACHE);
start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
frv_cache_wback_inv(start, start + len);
- kunmap_atomic(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index eadd07658075..fd7fcd4c2e33 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -36,3 +36,54 @@ struct page *kmap_atomic_to_page(void *ptr)
{
return virt_to_page(ptr);
}
+
+void *__kmap_atomic(struct page *page)
+{
+ unsigned long paddr;
+ int type;
+
+ pagefault_disable();
+ type = kmap_atomic_idx_push();
+ paddr = page_to_phys(page);
+
+ switch (type) {
+ /*
+ * The first 4 primary maps are reserved for architecture code
+ */
+ case 0: return __kmap_atomic_primary(4, paddr, 6);
+ case 1: return __kmap_atomic_primary(5, paddr, 7);
+ case 2: return __kmap_atomic_primary(6, paddr, 8);
+ case 3: return __kmap_atomic_primary(7, paddr, 9);
+ case 4: return __kmap_atomic_primary(8, paddr, 10);
+
+ case 5 ... 5 + NR_TLB_LINES - 1:
+ return __kmap_atomic_secondary(type - 5, paddr);
+
+ default:
+ BUG();
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(__kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ int type = kmap_atomic_idx();
+ switch (type) {
+ case 0: __kunmap_atomic_primary(4, 6); break;
+ case 1: __kunmap_atomic_primary(5, 7); break;
+ case 2: __kunmap_atomic_primary(6, 8); break;
+ case 3: __kunmap_atomic_primary(7, 9); break;
+ case 4: __kunmap_atomic_primary(8, 10); break;
+
+ case 5 ... 5 + NR_TLB_LINES - 1:
+ __kunmap_atomic_secondary(type - 5, kvaddr);
+ break;
+
+ default:
+ BUG();
+ }
+ kmap_atomic_idx_pop();
+ pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index df114122ebdf..497fa89b5df4 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -50,27 +50,29 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp = 0;
- if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
ret = 0; /* Default return condition */
- addr = addr >> 2; /* temporary hack. */
- if (addr < H8300_REGS_NO)
- tmp = h8300_get_reg(child, addr);
+ if (regno < H8300_REGS_NO)
+ tmp = h8300_get_reg(child, regno);
else {
- switch(addr) {
+ switch (regno) {
case 49:
tmp = child->mm->start_code;
break ;
@@ -88,24 +90,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
}
if (!ret)
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp, datap);
break ;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
- addr = addr >> 2; /* temporary hack. */
- if (addr == PT_ORIG_ER0) {
+ if (regno == PT_ORIG_ER0) {
ret = -EIO;
break ;
}
- if (addr < H8300_REGS_NO) {
- ret = h8300_put_reg(child, addr, data);
+ if (regno < H8300_REGS_NO) {
+ ret = h8300_put_reg(child, regno, data);
break ;
}
ret = -EIO;
@@ -116,11 +117,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
tmp = h8300_get_reg(child, i);
- if (put_user(tmp, (unsigned long *) data)) {
+ if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -130,12 +131,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int i;
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
- if (get_user(tmp, (unsigned long *) data)) {
+ if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
h8300_put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fa8a8594660..6073b187528a 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -56,10 +56,10 @@ typedef u64 cputime64_t;
#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
/*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
*/
-#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC)
-#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC)
+#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index c3286f42e501..1a97af31ef17 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/* atomic versions of the some PTE manipulations: */
diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h
index 118d42979003..c8fcaa2ac48f 100644
--- a/arch/ia64/include/asm/siginfo.h
+++ b/arch/ia64/include/asm/siginfo.h
@@ -62,6 +62,7 @@ typedef struct siginfo {
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see below */
unsigned long _isr; /* isr */
+ short _addr_lsb; /* lsb of faulting address */
} _sigfault;
/* SIGPOLL */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7c7909f9bc93..8848f43d819e 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1177,7 +1177,8 @@ ptrace_disable (struct task_struct *child)
}
long
-arch_ptrace (struct task_struct *child, long request, long addr, long data)
+arch_ptrace (struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
switch (request) {
case PTRACE_PEEKTEXT:
diff --git a/arch/m32r/include/asm/pgtable.h b/arch/m32r/include/asm/pgtable.h
index e6359c566b50..8a28cfea2729 100644
--- a/arch/m32r/include/asm/pgtable.h
+++ b/arch/m32r/include/asm/pgtable.h
@@ -332,9 +332,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 2) & 0x1f)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 0021ade4cba8..20743754f2b2 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -622,9 +622,11 @@ void ptrace_disable(struct task_struct *child)
}
long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/*
@@ -639,8 +641,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
* read the word at location addr in the USER area.
*/
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
/*
@@ -661,11 +662,11 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
default:
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
index e41fea399bfe..73b8c8fbed9c 100644
--- a/arch/m68k/include/asm/entry_mm.h
+++ b/arch/m68k/include/asm/entry_mm.h
@@ -50,14 +50,6 @@
LFLUSH_I_AND_D = 0x00000808
-/* process bits for task_struct.ptrace */
-PT_TRACESYS_OFF = 3
-PT_TRACESYS_BIT = 1
-PT_PTRACED_OFF = 3
-PT_PTRACED_BIT = 0
-PT_DTRACE_OFF = 3
-PT_DTRACE_BIT = 2
-
#define SAVE_ALL_INT save_all_int
#define SAVE_ALL_SYS save_all_sys
#define RESTORE_ALL restore_all
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 80e41492aa2a..26be277394f9 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -32,16 +32,6 @@
#ifdef __ASSEMBLY__
-/* process bits for task_struct.flags */
-PF_TRACESYS_OFF = 3
-PF_TRACESYS_BIT = 5
-PF_PTRACED_OFF = 3
-PF_PTRACED_BIT = 4
-PF_DTRACE_OFF = 1
-PF_DTRACE_BIT = 5
-
-LENOSYS = 38
-
#define SWITCH_STACK_SIZE (6*4+4) /* Includes return address */
/*
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 8e9a8a754dde..45bd3f589bf0 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -221,9 +221,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
}
#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
#define pte_unmap(pte) ((void)0)
-#define pte_unmap_nested(pte) ((void)0)
/*
* Allocate and free page tables. The xxx_kernel() versions are
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index f847ec732d62..cf5fad9b5250 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -219,9 +219,7 @@ static inline pte_t pgoff_to_pte(unsigned off)
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
/* FIXME: should we bother with kmap() here? */
#define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address))
-#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address)
#define pte_unmap(pte) kunmap(pte)
-#define pte_unmap_nested(pte) kunmap(pte)
/* Macros to (de)construct the fake PTEs representing swap pages. */
#define __swp_type(x) ((x).val & 0x7F)
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 616e59752c29..0b252683cefb 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -156,55 +156,57 @@ void user_disable_single_step(struct task_struct *child)
singlestep_disable(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int i, ret = 0;
+ int regno = addr >> 2; /* temporary hack. */
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
if (addr & 3)
goto out_eio;
- addr >>= 2; /* temporary hack. */
- if (addr >= 0 && addr < 19) {
- tmp = get_reg(child, addr);
- } else if (addr >= 21 && addr < 49) {
- tmp = child->thread.fp[addr - 21];
+ if (regno >= 0 && regno < 19) {
+ tmp = get_reg(child, regno);
+ } else if (regno >= 21 && regno < 49) {
+ tmp = child->thread.fp[regno - 21];
/* Convert internal fpu reg representation
* into long double format
*/
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
+ if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
tmp = ((tmp & 0xffff0000) << 15) |
((tmp & 0x0000ffff) << 16);
} else
goto out_eio;
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ case PTRACE_POKEUSR:
+ /* write the word at location addr in the USER area */
if (addr & 3)
goto out_eio;
- addr >>= 2; /* temporary hack. */
- if (addr == PT_SR) {
+ if (regno == PT_SR) {
data &= SR_MASK;
data |= get_reg(child, PT_SR) & ~SR_MASK;
}
- if (addr >= 0 && addr < 19) {
- if (put_reg(child, addr, data))
+ if (regno >= 0 && regno < 19) {
+ if (put_reg(child, regno, data))
goto out_eio;
- } else if (addr >= 21 && addr < 48) {
+ } else if (regno >= 21 && regno < 48) {
/* Convert long double format
* into internal fpu reg representation
*/
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
- data = (unsigned long)data << 15;
+ if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
+ data <<= 15;
data = (data & 0xffff0000) |
((data & 0x0000ffff) >> 1);
}
- child->thread.fp[addr - 21] = data;
+ child->thread.fp[regno - 21] = data;
} else
goto out_eio;
break;
@@ -212,16 +214,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_GETREGS: /* Get all gp regs from the child. */
for (i = 0; i < 19; i++) {
tmp = get_reg(child, i);
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
if (ret)
break;
- data += sizeof(long);
+ datap++;
}
break;
case PTRACE_SETREGS: /* Set all gp regs in the child. */
for (i = 0; i < 19; i++) {
- ret = get_user(tmp, (unsigned long *)data);
+ ret = get_user(tmp, datap);
if (ret)
break;
if (i == PT_SR) {
@@ -229,25 +231,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp |= get_reg(child, PT_SR) & ~SR_MASK;
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
- if (copy_to_user((void *)data, &child->thread.fp,
+ if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
- if (copy_from_user(&child->thread.fp, (void *)data,
+ if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *)data);
+ ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index 6fe7c38cd556..6709fb707335 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -112,9 +112,12 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -122,53 +125,48 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
tmp = 0; /* Default return condition */
- addr = addr >> 2; /* temporary hack. */
ret = -EIO;
- if (addr < 19) {
- tmp = get_reg(child, addr);
- if (addr == PT_SR)
+ if (regno < 19) {
+ tmp = get_reg(child, regno);
+ if (regno == PT_SR)
tmp >>= 16;
- } else if (addr >= 21 && addr < 49) {
- tmp = child->thread.fp[addr - 21];
- } else if (addr == 49) {
+ } else if (regno >= 21 && regno < 49) {
+ tmp = child->thread.fp[regno - 21];
+ } else if (regno == 49) {
tmp = child->mm->start_code;
- } else if (addr == 50) {
+ } else if (regno == 50) {
tmp = child->mm->start_data;
- } else if (addr == 51) {
+ } else if (regno == 51) {
tmp = child->mm->end_code;
} else
break;
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
- addr = addr >> 2; /* temporary hack. */
-
- if (addr == PT_SR) {
+ if (regno == PT_SR) {
data &= SR_MASK;
data <<= 16;
data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
}
- if (addr < 19) {
- if (put_reg(child, addr, data))
+ if (regno < 19) {
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
}
- if (addr >= 21 && addr < 48)
+ if (regno >= 21 && regno < 48)
{
- child->thread.fp[addr - 21] = data;
+ child->thread.fp[regno - 21] = data;
ret = 0;
}
break;
@@ -180,11 +178,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = get_reg(child, i);
if (i == PT_SR)
tmp >>= 16;
- if (put_user(tmp, (unsigned long *) data)) {
+ if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -194,7 +192,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int i;
unsigned long tmp;
for (i = 0; i < 19; i++) {
- if (get_user(tmp, (unsigned long *) data)) {
+ if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
@@ -204,7 +202,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -213,7 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_GETFPREGS
case PTRACE_GETFPREGS: { /* Get the child FPU state. */
ret = 0;
- if (copy_to_user((void *)data, &child->thread.fp,
+ if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
@@ -223,7 +221,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_SETFPREGS
case PTRACE_SETFPREGS: { /* Set the child FPU state. */
ret = 0;
- if (copy_from_user(&child->thread.fp, (void *)data,
+ if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
@@ -231,8 +229,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#endif
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *)data);
+ ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index d4f421672d3b..cae268c22ba2 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -504,12 +504,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
-#define pte_offset_map_nested(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
+ ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#define pte_unmap(pte) kunmap_atomic(pte)
/* Encode and decode a nonlinear file mapping entry */
#define PTE_FILE_MAX_BITS 29
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index dc03ffc8174a..05ac8cc975d5 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -73,7 +73,8 @@ static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
return (microblaze_reg_t *)((char *)regs + reg_offs);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int rval;
unsigned long val = 0;
@@ -99,7 +100,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
} else {
rval = -EIO;
}
- } else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
+ } else if (addr < PT_SIZE && (addr & 0x3) == 0) {
microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
if (request == PTRACE_PEEKUSR)
val = *reg_addr;
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 75753ca73bfd..77e644082a3b 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
-extern void *__kmap(struct page *page);
-extern void __kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page, enum km_type type);
-extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-extern struct page *__kmap_atomic_to_page(void *ptr);
-
-#define kmap __kmap
-#define kunmap __kunmap
-#define kmap_atomic __kmap_atomic
-#define kunmap_atomic_notypecheck __kunmap_atomic_notypecheck
-#define kmap_atomic_to_page __kmap_atomic_to_page
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 5f4b9d4e4114..f1f508e4f971 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -839,7 +839,7 @@ struct bridge_controller {
nasid_t nasid;
unsigned int widget_id;
unsigned int irq_cpu;
- dma64_addr_t baddr;
+ u64 baddr;
unsigned int pci_int[8];
};
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index ae90412556d0..8a153d2fa62a 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -154,10 +154,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 1be4b0fa30da..f00896087dda 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -257,10 +257,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
/*
* Initialize a new pgd / pmd table with invalid pointers.
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c8777333e198..d21c388c0116 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child,
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
goto out;
}
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__s64 __user *) data);
+ ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__s64 __user *) data);
+ ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, datavp);
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
case PTRACE_GET_WATCH_REGS:
- ret = ptrace_get_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_get_watch_regs(child, addrp);
break;
case PTRACE_SET_WATCH_REGS:
- ret = ptrace_set_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_set_watch_regs(child, addrp);
break;
default:
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 6a2b1bf9ef11..3634c7ea06ac 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -9,7 +9,7 @@ static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
-void *__kmap(struct page *page)
+void *kmap(struct page *page)
{
void *addr;
@@ -21,16 +21,16 @@ void *__kmap(struct page *page)
return addr;
}
-EXPORT_SYMBOL(__kmap);
+EXPORT_SYMBOL(kmap);
-void __kunmap(struct page *page)
+void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
-EXPORT_SYMBOL(__kunmap);
+EXPORT_SYMBOL(kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
* kmaps are appropriate for short, tight code paths only.
*/
-void *__kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -64,43 +64,48 @@ void *__kmap_atomic(struct page *page, enum km_type type)
}
EXPORT_SYMBOL(__kmap_atomic);
-void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+ int type;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
return;
}
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ type = kmap_atomic_idx();
+#ifdef CONFIG_DEBUG_HIGHMEM
+ {
+ int idx = type + KM_TYPE_NR * smp_processor_id();
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_one(vaddr);
-#endif
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_one(vaddr);
+ }
+#endif
+ kmap_atomic_idx_pop();
pagefault_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
pagefault_disable();
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
@@ -109,7 +114,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
return (void*) vaddr;
}
-struct page *__kmap_atomic_to_page(void *ptr)
+struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 7c2a2f7f8dc1..365766a3d536 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -9,8 +9,19 @@ config MN10300
def_bool y
select HAVE_OPROFILE
-config AM33
- def_bool y
+config AM33_2
+ def_bool n
+
+config AM33_3
+ def_bool n
+
+config AM34_2
+ def_bool n
+ select MN10300_HAS_ATOMIC_OPS_UNIT
+ select MN10300_HAS_CACHE_SNOOP
+
+config ERRATUM_NEED_TO_RELOAD_MMUCTR
+ def_bool y if AM33_3 || AM34_2
config MMU
def_bool y
@@ -37,7 +48,7 @@ config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CMOS_UPDATE
- def_bool y
+ def_bool n
config GENERIC_FIND_NEXT_BIT
def_bool y
@@ -45,6 +56,27 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
+config GENERIC_TIME
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ def_bool y
+
+config GENERIC_CLOCKEVENTS_BUILD
+ def_bool y
+ depends on GENERIC_CLOCKEVENTS
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+ bool
+
+config CEVT_MN10300
+ def_bool y
+ depends on GENERIC_CLOCKEVENTS
+
+config CSRC_MN10300
+ def_bool y
+ depends on GENERIC_TIME
+
config GENERIC_BUG
def_bool y
@@ -61,18 +93,14 @@ config GENERIC_HARDIRQS
config HOTPLUG_CPU
def_bool n
-config HZ
- int
- default 1000
-
-mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
+mainmenu "Panasonic MN10300/AM33 Kernel Configuration"
source "init/Kconfig"
source "kernel/Kconfig.freezer"
-menu "Matsushita MN10300 system setup"
+menu "Panasonic MN10300 system setup"
choice
prompt "Unit type"
@@ -87,6 +115,10 @@ config MN10300_UNIT_ASB2303
config MN10300_UNIT_ASB2305
bool "ASB2305"
+config MN10300_UNIT_ASB2364
+ bool "ASB2364"
+ select SMSC911X_ARCH_HOOKS if SMSC911X
+
endchoice
choice
@@ -99,57 +131,51 @@ choice
config MN10300_PROC_MN103E010
bool "MN103E010"
depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
+ select AM33_2
+ select MN10300_PROC_HAS_TTYSM0
+ select MN10300_PROC_HAS_TTYSM1
+ select MN10300_PROC_HAS_TTYSM2
+
+config MN10300_PROC_MN2WS0050
+ bool "MN2WS0050"
+ depends on MN10300_UNIT_ASB2364
+ select AM34_2
select MN10300_PROC_HAS_TTYSM0
select MN10300_PROC_HAS_TTYSM1
select MN10300_PROC_HAS_TTYSM2
endchoice
-choice
- prompt "Processor core support"
- default MN10300_CPU_AM33V2
+config MN10300_HAS_ATOMIC_OPS_UNIT
+ def_bool n
help
- This option specifies the processor core for which the kernel will be
- compiled. It affects the instruction set used.
-
-config MN10300_CPU_AM33V2
- bool "AM33v2"
-
-endchoice
+ This should be enabled if the processor has an atomic ops unit
+ capable of doing LL/SC equivalent operations.
config FPU
bool "FPU present"
default y
- depends on MN10300_PROC_MN103E010
+ depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
-choice
- prompt "CPU Caching mode"
- default MN10300_CACHE_WBACK
+config LAZY_SAVE_FPU
+ bool "Save FPU state lazily"
+ default y
+ depends on FPU && !SMP
help
- This option determines the caching mode for the kernel.
-
- Write-Back caching mode involves the all reads and writes causing
- the affected cacheline to be read into the cache first before being
- operated upon. Memory is not then updated by a write until the cache
- is filled and a cacheline needs to be displaced from the cache to
- make room. Only at that point is it written back.
-
- Write-Through caching only fetches cachelines from memory on a
- read. Writes always get written directly to memory. If the affected
- cacheline is also in cache, it will be updated too.
-
- The final option is to turn of caching entirely.
+ Enable this to be lazy in the saving of the FPU state to the owning
+ task's thread struct. This is useful if most tasks on the system
+ don't use the FPU as only those tasks that use it will pass it
+ between them, and the state needn't be saved for a task that isn't
+ using it.
-config MN10300_CACHE_WBACK
- bool "Write-Back"
+ This can't be so easily used on SMP as the process that owns the FPU
+ state on a CPU may be currently running on another CPU, so for the
+ moment, it is disabled.
-config MN10300_CACHE_WTHRU
- bool "Write-Through"
+source "arch/mn10300/mm/Kconfig.cache"
-config MN10300_CACHE_DISABLED
- bool "Disabled"
-
-endchoice
+config MN10300_TLB_USE_PIDR
+ def_bool y
menu "Memory layout options"
@@ -170,24 +196,55 @@ config KERNEL_TEXT_ADDRESS
config KERNEL_ZIMAGE_BASE_ADDRESS
hex "Base address of compressed vmlinux image"
- default "0x90700000"
+ default "0x50700000"
+config BOOT_STACK_OFFSET
+ hex
+ default "0xF00" if SMP
+ default "0xFF0" if !SMP
+
+config BOOT_STACK_SIZE
+ hex
+ depends on SMP
+ default "0x100"
endmenu
-config PREEMPT
- bool "Preemptible Kernel"
- help
- This option reduces the latency of the kernel when reacting to
- real-time or interactive events by allowing a low priority process to
- be preempted even if it is in kernel mode executing a system call.
- This allows applications to run more reliably even when the system is
- under load.
+config SMP
+ bool "Symmetric multi-processing support"
+ default y
+ depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
- Say Y here if you are building a kernel for a desktop, embedded
- or real-time system. Say N if you are unsure.
+ See also <file:Documentation/i386/IO-APIC.txt>,
+ <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+ <http://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
+
+config NR_CPUS
+ int
+ depends on SMP
+ default "2"
+
+config USE_GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
+source "kernel/Kconfig.preempt"
config MN10300_CURRENT_IN_E2
bool "Hold current task address in E2 register"
+ depends on !SMP
default y
help
This option removes the E2/R2 register from the set available to gcc
@@ -209,12 +266,15 @@ config MN10300_USING_JTAG
suppresses the use of certain hardware debugging features, such as
single-stepping, which are taken over completely by the JTAG unit.
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
config MN10300_RTC
bool "Using MN10300 RTC"
- depends on MN10300_PROC_MN103E010
+ depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
+ select GENERIC_CMOS_UPDATE
default n
help
-
This option enables support for the RTC, thus enabling time to be
tracked, even when system is powered down. This is available on-chip
on the MN103E010.
@@ -306,14 +366,23 @@ config MN10300_TTYSM1
choice
prompt "Select the timer to supply the clock for SIF1"
- default MN10300_TTYSM0_TIMER9
+ default MN10300_TTYSM1_TIMER12 \
+ if !(AM33_2 || AM33_3)
+ default MN10300_TTYSM1_TIMER9 \
+ if AM33_2 || AM33_3
depends on MN10300_TTYSM1
+config MN10300_TTYSM1_TIMER12
+ bool "Use timer 12 (16-bit)"
+ depends on !(AM33_2 || AM33_3)
+
config MN10300_TTYSM1_TIMER9
bool "Use timer 9 (16-bit)"
+ depends on AM33_2 || AM33_3
config MN10300_TTYSM1_TIMER3
bool "Use timer 3 (8-bit)"
+ depends on AM33_2 || AM33_3
endchoice
@@ -328,17 +397,107 @@ config MN10300_TTYSM2
choice
prompt "Select the timer to supply the clock for SIF2"
- default MN10300_TTYSM0_TIMER10
+ default MN10300_TTYSM2_TIMER3 \
+ if !(AM33_2 || AM33_3)
+ default MN10300_TTYSM2_TIMER10 \
+ if AM33_2 || AM33_3
depends on MN10300_TTYSM2
+config MN10300_TTYSM2_TIMER9
+ bool "Use timer 9 (16-bit)"
+ depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER1
+ bool "Use timer 1 (8-bit)"
+ depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER3
+ bool "Use timer 3 (8-bit)"
+ depends on !(AM33_2 || AM33_3)
+
config MN10300_TTYSM2_TIMER10
bool "Use timer 10 (16-bit)"
+ depends on AM33_2 || AM33_3
endchoice
config MN10300_TTYSM2_CTS
bool "Enable the use of the CTS line /dev/ttySM2"
- depends on MN10300_TTYSM2
+ depends on MN10300_TTYSM2 && AM33_2
+
+endmenu
+
+menu "Interrupt request priority options"
+
+comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highest, 6 is lowest)"
+
+comment "____Non-maskable interrupt levels____"
+comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
+
+config GDBSTUB_IRQ_LEVEL
+ int "GDBSTUB interrupt priority"
+ depends on GDBSTUB
+ range 0 1 if LINUX_CLI_LEVEL = 2
+ range 0 2 if LINUX_CLI_LEVEL = 3
+ range 0 3 if LINUX_CLI_LEVEL = 4
+ range 0 4 if LINUX_CLI_LEVEL = 5
+ range 0 5 if LINUX_CLI_LEVEL = 6
+ default 0
+
+comment "The following must be set to a higher priority than local_irq_disable()"
+
+config MN10300_SERIAL_IRQ_LEVEL
+ int "MN10300 on-chip serial interrupt priority"
+ depends on MN10300_TTYSM
+ range 1 1 if LINUX_CLI_LEVEL = 2
+ range 1 2 if LINUX_CLI_LEVEL = 3
+ range 1 3 if LINUX_CLI_LEVEL = 4
+ range 1 4 if LINUX_CLI_LEVEL = 5
+ range 1 5 if LINUX_CLI_LEVEL = 6
+ default 1
+
+comment "-"
+comment "____Maskable interrupt levels____"
+
+config LINUX_CLI_LEVEL
+ int "The highest interrupt priority excluded by local_irq_disable() (2-6)"
+ range 2 6
+ default 2
+ help
+ local_irq_disable() doesn't actually disable maskable interrupts -
+ what it does is restrict the levels of interrupt which are permitted
+ (a lower level indicates a higher priority) by lowering the value in
+ EPSW.IM from 7. Any interrupt is permitted for which the level is
+ lower than EPSW.IM.
+
+ Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
+ serial DMA interrupts are allowed to interrupt normal disabled
+ sections.
+
+comment "The following must be set to a equal to or lower priority than LINUX_CLI_LEVEL"
+
+config TIMER_IRQ_LEVEL
+ int "Kernel timer interrupt priority"
+ range LINUX_CLI_LEVEL 6
+ default 4
+
+config PCI_IRQ_LEVEL
+ int "PCI interrupt priority"
+ depends on PCI
+ range LINUX_CLI_LEVEL 6
+ default 5
+
+config ETHERNET_IRQ_LEVEL
+ int "Ethernet interrupt priority"
+ depends on SMC91X || SMC911X || SMSC911X
+ range LINUX_CLI_LEVEL 6
+ default 6
+
+config EXT_SERIAL_IRQ_LEVEL
+ int "External serial port interrupt priority"
+ depends on SERIAL_8250
+ range LINUX_CLI_LEVEL 6
+ default 6
endmenu
diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
index ac5c6bdb2f05..7120282bf0d8 100644
--- a/arch/mn10300/Makefile
+++ b/arch/mn10300/Makefile
@@ -36,6 +36,9 @@ endif
ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
PROCESSOR := mn103e010
endif
+ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
+PROCESSOR := mn2ws0050
+endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
UNIT := asb2303
@@ -43,6 +46,9 @@ endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
UNIT := asb2305
endif
+ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
+UNIT := asb2364
+endif
head-y := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
diff --git a/arch/mn10300/boot/compressed/head.S b/arch/mn10300/boot/compressed/head.S
index 502e1eb56709..7b50345b9e84 100644
--- a/arch/mn10300/boot/compressed/head.S
+++ b/arch/mn10300/boot/compressed/head.S
@@ -14,10 +14,29 @@
#include <linux/linkage.h>
#include <asm/cpu-regs.h>
+#include <asm/cache.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif
.globl startup_32
startup_32:
- # first save off parameters from bootloader
+#ifdef CONFIG_SMP
+ #
+ # Secondary CPUs jump directly to the kernel entry point
+ #
+ # Must save primary CPU's D0-D2 registers as they hold boot parameters
+ #
+ mov (CPUID), d3
+ and CPUID_MASK,d3
+ beq startup_primary
+ mov CONFIG_KERNEL_TEXT_ADDRESS,a0
+ jmp (a0)
+
+startup_primary:
+#endif /* CONFIG_SMP */
+
+ # first save parameters from bootloader
mov param_save_area,a0
mov d0,(a0)
mov d1,(4,a0)
@@ -37,8 +56,15 @@ startup_32:
mov (a0),d0
btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy
lne
- mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD,d0 # writethru dcache
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
movhu d0,(a0) # enable
+#endif /* !ENABLED */
# clear the BSS area
mov __bss_start,a0
@@ -54,6 +80,9 @@ bssclear_end:
# decompress the kernel
call decompress_kernel[],0
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ call mn10300_dcache_flush_inv[],0
+#endif
# disable caches again
mov CHCTR,a0
@@ -69,10 +98,46 @@ bssclear_end:
mov (4,a0),d1
mov (8,a0),d2
+ # jump to the kernel proper entry point
mov a3,sp
mov CONFIG_KERNEL_TEXT_ADDRESS,a0
jmp (a0)
+
+###############################################################################
+#
+# Cache flush routines
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_WBACK
+mn10300_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_flush_inv_end
+
+ mov L1_CACHE_NENTRIES,d1
+ clr a1
+
+mn10300_dcache_flush_inv_loop:
+ mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+ ret [],0
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+
+###############################################################################
+#
+# Data areas
+#
+###############################################################################
.data
.align 4
param_save_area:
diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig
index d80dfcb2c902..3f749b69ca71 100644
--- a/arch/mn10300/configs/asb2303_defconfig
+++ b/arch/mn10300/configs/asb2303_defconfig
@@ -12,6 +12,8 @@ CONFIG_SLAB=y
CONFIG_PROFILING=y
# CONFIG_BLOCK is not set
CONFIG_PREEMPT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_MN10300_RTC=y
CONFIG_MN10300_TTYSM_CONSOLE=y
CONFIG_MN10300_TTYSM0=y
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
new file mode 100644
index 000000000000..83ce2f27b12a
--- /dev/null
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -0,0 +1,98 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RELAY=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_MN10300_UNIT_ASB2364=y
+CONFIG_PREEMPT=y
+# CONFIG_MN10300_USING_JTAG is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MN10300_TTYSM_CONSOLE=y
+CONFIG_MN10300_TTYSM0=y
+CONFIG_MN10300_TTYSM0_TIMER2=y
+CONFIG_MN10300_TTYSM1=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index f0cc1f84a72f..92d2f9298e38 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " mov %5,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+ : "memory", "cc");
+
+ return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " cmp %5,%1 \n"
+ " bne 2f \n"
+ " mov %6,(_ADR,%3) \n"
+ "2: mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+ "r"(old), "r"(new)
+ : "memory", "cc");
+
+ return oldval;
+}
+#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ *m = val;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ if (oldval == old)
+ *m = new;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
+ (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
#include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " add %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval += i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " sub %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval -= i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+ atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+ atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " and %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ mask = ~mask;
+ flags = arch_local_cli_save();
+ *addr &= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " or %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ *addr |= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index 3f50e9661076..3b8a868188f5 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -57,7 +57,7 @@
#define clear_bit(nr, addr) ___clear_bit((nr), (addr))
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
{
unsigned int *a = (unsigned int *) addr;
int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
/*
* test bit
*/
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
{
- return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
}
/*
* change bit
*/
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
{
int mask;
unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*a ^= mask;
}
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
/*
* test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
/*
* test and change bit
*/
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
{
int mask, retval;
unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
return retval;
}
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
#include <asm-generic/bitops/lock.h>
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index 781bf613366d..f29cde2cfc91 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -43,14 +43,18 @@
/* instruction cache access registers */
#define ICACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define ICACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
-/* instruction cache access registers */
+/* data cache access registers */
#define DCACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define DCACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
#endif /* _ASM_CACHE_H */
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index 29e692f7f030..faed90240ded 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -17,66 +17,55 @@
#include <linux/mm.h>
/*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
*/
-#define flush_cache_all() do {} while (0)
-#define flush_cache_mm(mm) do {} while (0)
-#define flush_cache_dup_mm(mm) do {} while (0)
-#define flush_cache_range(mm, start, end) do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
-#define flush_cache_vmap(start, end) do {} while (0)
-#define flush_cache_vunmap(start, end) do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end) do {} while (0)
-#define flush_icache_page(vma, pg) do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
- flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_page(vma, page); \
- } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
#ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
#else
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+ mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+ mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+ mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+ mn10300_local_dcache_inv_range2(start, size)
#define mn10300_dcache_flush() do {} while (0)
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
mn10300_dcache_inv_range2((start), (size))
#endif /* CONFIG_MN10300_CACHE_WBACK */
#else
+#define mn10300_local_icache_inv() do {} while (0)
+#define mn10300_local_icache_inv_page(start) do {} while (0)
+#define mn10300_local_icache_inv_range(start, end) do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_inv() do {} while (0)
+#define mn10300_local_dcache_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end) do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_inv() do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
#define mn10300_icache_inv() do {} while (0)
+#define mn10300_icache_inv_page(start) do {} while (0)
+#define mn10300_icache_inv_range(start, end) do {} while (0)
+#define mn10300_icache_inv_range2(start, size) do {} while (0)
#define mn10300_dcache_inv() do {} while (0)
#define mn10300_dcache_inv_page(start) do {} while (0)
#define mn10300_dcache_inv_range(start, end) do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
#define mn10300_dcache_flush_range2(start, size) do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all() do {} while (0)
+#define flush_cache_mm(mm) do {} while (0)
+#define flush_cache_dup_mm(mm) do {} while (0)
+#define flush_cache_range(mm, start, end) do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
+#define flush_cache_vmap(start, end) do {} while (0)
+#define flush_cache_vunmap(start, end) do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do {} while (0)
+#define flush_dcache_mmap_lock(mapping) do {} while (0)
+#define flush_dcache_mmap_unlock(mapping) do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+ mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end) do {} while (0)
+#define flush_icache_page(vma, pg) do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+ flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_page(vma, page); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
/*
- * internal debugging function
+ * Internal debugging function
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/mn10300/include/asm/cpu-regs.h b/arch/mn10300/include/asm/cpu-regs.h
index 757e9b5388ea..90ed4a365c97 100644
--- a/arch/mn10300/include/asm/cpu-regs.h
+++ b/arch/mn10300/include/asm/cpu-regs.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#endif
-#ifdef CONFIG_MN10300_CPU_AM33V2
/* we tell the compiler to pretend to be AM33 so that it doesn't try and use
* the FP regs, but tell the assembler that we're actually allowed AM33v2
* instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
#else
.am33_2
#endif
-#endif
#ifdef __KERNEL__
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
#define EPSW_nAR 0x00040000 /* register bank control */
#define EPSW_ML 0x00080000 /* monitor level */
#define EPSW_FE 0x00100000 /* FPU enable */
+#define EPSW_IM_SHIFT 8 /* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num) ((num) << EPSW_IM_SHIFT)
/* FPU registers */
#define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
#define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */
#define CPUREV_TYPE 0x0000000f /* CPU type */
#define CPUREV_TYPE_S 0
-#define CPUREV_TYPE_AM33V1 0x00000000 /* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2 0x00000001 /* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1 0x00000002 /* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1 0x00000000 /* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2 0x00000001 /* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1 0x00000002 /* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3 0x00000003 /* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2 0x00000004 /* - AM34-2 core, AM33/3.00 arch */
#define CPUREV_REVISION 0x000000f0 /* CPU revision */
#define CPUREV_REVISION_S 4
#define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
#define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */
#define CHCTR_DCWMD 0xf000 /* data cache way mode */
+#ifdef CONFIG_AM34_2
+#define ICIVCR __SYSREG(0xc0000c00, u32) /* icache area invalidate control */
+#define ICIVCR_ICIVBSY 0x00000008 /* icache area invalidate busy */
+#define ICIVCR_ICI 0x00000001 /* icache area invalidate */
+
+#define ICIVMR __SYSREG(0xc0000c04, u32) /* icache area invalidate mask */
+
+#define DCPGCR __SYSREG(0xc0000c10, u32) /* data cache area purge control */
+#define DCPGCR_DCPGBSY 0x00000008 /* data cache area purge busy */
+#define DCPGCR_DCP 0x00000002 /* data cache area purge */
+#define DCPGCR_DCI 0x00000001 /* data cache area invalidate */
+
+#define DCPGMR __SYSREG(0xc0000c14, u32) /* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
/* MMU control registers */
#define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */
#define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
#define MMUCTR_DTL_LOCK0_3 0x03000000 /* - entry 0-3 locked */
#define MMUCTR_DTL_LOCK0_7 0x04000000 /* - entry 0-7 locked */
#define MMUCTR_DTL_LOCK0_15 0x05000000 /* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE 0x80000000 /* write-through cache TLB entry bit enable */
+#endif
#define PIDR __SYSREG(0xc0000094, u16) /* PID register */
#define PIDR_PID 0x00ff /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
#define xPTEL_PS_4Mb 0x00000c00 /* - 4Mb page */
#define xPTEL_PPN 0xfffff006 /* physical page number */
-#define xPTEL_V_BIT 0 /* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT 1
-#define xPTEL_UNUSED2_BIT 2
-#define xPTEL_C_BIT 3
-#define xPTEL_PV_BIT 4
-#define xPTEL_D_BIT 5
-#define xPTEL_G_BIT 9
-
#define IPTEU __SYSREG(0xc00000a4, u32) /* instruction TLB virtual addr */
#define DPTEU __SYSREG(0xc00000b4, u32) /* data TLB virtual addr */
#define xPTEU_VPN 0xfffffc00 /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
#define xPTEL2_PS_128Kb 0x00000100 /* - 128Kb page */
#define xPTEL2_PS_1Kb 0x00000200 /* - 1Kb page */
#define xPTEL2_PS_4Mb 0x00000300 /* - 4Mb page */
-#define xPTEL2_PPN 0xfffffc00 /* physical page number */
+#define xPTEL2_CWT 0x00000400 /* cacheable write-through */
+#define xPTEL2_UNUSED1 0x00000800 /* unused bit (broadcast mask) */
+#define xPTEL2_PPN 0xfffff000 /* physical page number */
+
+#define xPTEL2_V_BIT 0 /* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT 1
+#define xPTEL2_PV_BIT 2
+#define xPTEL2_D_BIT 3
+#define xPTEL2_G_BIT 7
+#define xPTEL2_UNUSED1_BIT 11
#define MMUFCR __SYSREGC(0xc000009c, u32) /* MMU exception cause */
#define MMUFCR_IFC __SYSREGC(0xc000009c, u16) /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
#define MMUFCR_xFC_PR_RWK_RWU 0x01c0 /* - R/W kernel and R/W user */
#define MMUFCR_xFC_ILLADDR 0x0200 /* illegal address excep flag */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR __SYSREG(0xc0000a00, u32) /* cacheable address */
+#define AAR2 __SYSREG(0xc0000a04, u32) /* uncacheable address */
+#define ADR __SYSREG(0xc0000a08, u32) /* data */
+#define ASR __SYSREG(0xc0000a0c, u32) /* status */
+#define AARU __SYSREG(0xd400aa00, u32) /* user address */
+#define ADRU __SYSREG(0xd400aa08, u32) /* user data */
+#define ASRU __SYSREG(0xd400aa0c, u32) /* user status */
+
+#define ASR_RW 0x00000008 /* read */
+#define ASR_BW 0x00000004 /* bus error */
+#define ASR_IW 0x00000002 /* interrupt */
+#define ASR_LW 0x00000001 /* bus lock */
+
+#define ASRU_RW ASR_RW /* read */
+#define ASRU_BW ASR_BW /* bus error */
+#define ASRU_IW ASR_IW /* interrupt */
+#define ASRU_LW ASR_LW /* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+ "_AAR = 0\n"
+ "_AAR2 = 4\n"
+ "_ADR = 8\n"
+ "_ASR = 12\n");
+#else
+#define _AAR 0
+#define _AAR2 4
+#define _ADR 8
+#define _ASR 12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR 0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
#endif /* __KERNEL__ */
#endif /* _ASM_CPU_REGS_H */
diff --git a/arch/mn10300/include/asm/dmactl-regs.h b/arch/mn10300/include/asm/dmactl-regs.h
index 58a199da0f4a..80337b339c90 100644
--- a/arch/mn10300/include/asm/dmactl-regs.h
+++ b/arch/mn10300/include/asm/dmactl-regs.h
@@ -11,91 +11,6 @@
#ifndef _ASM_DMACTL_REGS_H
#define _ASM_DMACTL_REGS_H
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
-#define DMxCTR_BG 0x0000001f /* transfer request source */
-#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
-#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
-#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
-#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
-#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
-#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
-#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
-#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
-#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
-#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
-#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
-#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
-#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
-#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
-#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
-#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
-#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
-#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
-#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
-#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
-#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
-#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
-#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
-#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
-#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
-#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
-#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
-#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
-#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
-#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
-#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
-#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
-#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
-#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
-#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
-#define DMxCTR_RQM 0x00060000 /* external request input source mode */
-#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
-#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
-#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
-#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
-#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
-#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
-
-#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
-
-#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
-
-#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
-#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
-
-#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
- * size reg */
-#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
-
-#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
-#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
-#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
-#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
-
-#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
-#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
-#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
-#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
- u32 ctr;
- const void *src;
- void *dst;
- u32 siz;
- u32 cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
#endif /* _ASM_DMACTL_REGS_H */
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index e5fa97cd9a14..8157c9267f42 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -32,6 +32,12 @@
#define R_MN10300_ALIGN 34 /* Alignment requirement. */
/*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT 1 /* Has AM34 Atomic Operations */
+
+
+/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
@@ -47,8 +53,6 @@ typedef struct {
u_int32_t fpcr;
} elf_fpregset_t;
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
/*
* This is used to ensure we don't load something for the wrong architecture
*/
@@ -130,7 +134,11 @@ do { \
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP (HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
#define ELF_HWCAP (0)
+#endif
/*
* This yields a string that ld.so will use to load implementation
diff --git a/arch/mn10300/include/asm/exceptions.h b/arch/mn10300/include/asm/exceptions.h
index fa16466ef3f9..ca3e20508c77 100644
--- a/arch/mn10300/include/asm/exceptions.h
+++ b/arch/mn10300/include/asm/exceptions.h
@@ -15,8 +15,8 @@
/*
* define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- * (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ * the same time.
*/
#define GDBSTUB_BKPT 0xFF
@@ -90,7 +90,6 @@ enum exception_code {
extern void __set_intr_stub(enum exception_code code, void *handler);
extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
struct pt_regs;
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
extern asmlinkage void raw_bus_error(void);
extern asmlinkage void double_fault(void);
extern asmlinkage int system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
extern asmlinkage void uninitialised_exception(struct pt_regs *,
enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
+#define NUM2EXCEP_IRQ_LEVEL(num) (EXCEP_IRQ_LEVEL0 + (num) * 8)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_EXCEPTIONS_H */
diff --git a/arch/mn10300/include/asm/fpu.h b/arch/mn10300/include/asm/fpu.h
index 64a2b83a7a6a..b7625de8eade 100644
--- a/arch/mn10300/include/asm/fpu.h
+++ b/arch/mn10300/include/asm/fpu.h
@@ -12,74 +12,125 @@
#ifndef _ASM_FPU_H
#define _ASM_FPU_H
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
#include <asm/sigcontext.h>
-#include <asm/user.h>
#ifdef __KERNEL__
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
extern struct task_struct *fpu_state_owner;
+#endif
-#define set_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags |= THREAD_USING_FPU; \
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
-#define clear_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU; \
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bset %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bclr %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define unlazy_fpu(tsk) \
-do { \
- preempt_disable(); \
- if (fpu_state_owner == (tsk)) \
- fpu_save(&tsk->thread.fpu_state); \
- preempt_enable(); \
-} while (0)
-
-#define exit_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) \
- fpu_state_owner = NULL; \
- preempt_enable(); \
-} while (0)
-
-#define flush_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) { \
- fpu_state_owner = NULL; \
- __tsk->thread.uregs->epsw &= ~EPSW_FE; \
- } \
- preempt_enable(); \
- clear_using_fpu(__tsk); \
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
-extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU */
-
-/*
- * signal frame handlers
- */
extern int fpu_setup_sigcontext(struct fpucontext *buf);
extern int fpu_restore_sigcontext(struct fpucontext *buf);
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ fpu_save(&tsk->thread.fpu_state);
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk)
+ fpu_save(&tsk->thread.fpu_state);
+#endif
+ preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+ if (fpu_state_owner == tsk)
+ fpu_state_owner = NULL;
+ preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk) {
+ fpu_state_owner = NULL;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#endif
+ preempt_enable();
+ clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU */
+
#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_FPU_H */
diff --git a/arch/mn10300/include/asm/frame.inc b/arch/mn10300/include/asm/frame.inc
index 5b1949bdf039..2ee58e3eb6b3 100644
--- a/arch/mn10300/include/asm/frame.inc
+++ b/arch/mn10300/include/asm/frame.inc
@@ -18,6 +18,7 @@
#ifndef __ASM_OFFSETS_H__
#include <asm/asm-offsets.h>
#endif
+#include <asm/thread_info.h>
#define pi break
@@ -37,11 +38,15 @@
movm [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
mov sp,fp # FRAME pointer in A3
add -12,sp # allow for calls to be made
- mov (__frame),a1
- mov a1,(REG_NEXT,fp)
- mov fp,(__frame)
- and ~EPSW_FE,epsw # disable the FPU inside the kernel
+ # push the exception frame onto the front of the list
+ GET_THREAD_INFO a1
+ mov (TI_frame,a1),a0
+ mov a0,(REG_NEXT,fp)
+ mov fp,(TI_frame,a1)
+
+ # disable the FPU inside the kernel
+ and ~EPSW_FE,epsw
# we may be holding current in E2
#ifdef CONFIG_MN10300_CURRENT_IN_E2
@@ -57,10 +62,11 @@
.macro RESTORE_ALL
# peel back the stack to the calling frame
# - this permits execve() to discard extra frames due to kernel syscalls
- mov (__frame),fp
+ GET_THREAD_INFO a0
+ mov (TI_frame,a0),fp
mov fp,sp
- mov (REG_NEXT,fp),d0 # userspace has regs->next == 0
- mov d0,(__frame)
+ mov (REG_NEXT,fp),d0
+ mov d0,(TI_frame,a0) # userspace has regs->next == 0
#ifndef CONFIG_MN10300_USING_JTAG
mov (REG_EPSW,fp),d0
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index 41ed26763964..f5495ad82b77 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void __gdbstub_bug_trap(void);
extern asmlinkage void __gdbstub_pause(void);
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
extern asmlinkage void gdbstub_purge_cache(void);
#else
#define gdbstub_purge_cache() do {} while (0)
diff --git a/arch/mn10300/include/asm/hardirq.h b/arch/mn10300/include/asm/hardirq.h
index 54d950117674..0000d650b55f 100644
--- a/arch/mn10300/include/asm/hardirq.h
+++ b/arch/mn10300/include/asm/hardirq.h
@@ -19,9 +19,10 @@
/* assembly code in softirq.h is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned long idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
unsigned int __nmi_count; /* arch dependent */
unsigned int __irq_count; /* arch dependent */
+#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index b0b187a29b88..bfe2d88604d9 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
-static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
+static inline unsigned long __kmap_atomic(struct page *page)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
+ pagefault_disable();
if (page < highmem_start_page)
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG
@@ -86,31 +87,42 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
BUG();
#endif
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
return vaddr;
}
-static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type)
+static inline void __kunmap_atomic(unsigned long vaddr)
{
-#if HIGHMEM_DEBUG
- enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
+ int type;
- if (vaddr < FIXADDR_START) /* FIXME */
+ if (vaddr < FIXADDR_START) { /* FIXME */
+ pagefault_enable();
return;
+ }
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
- BUG();
+ type = kmap_atomic_idx();
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(kmap_pte - idx);
- __flush_tlb_one(vaddr);
+#if HIGHMEM_DEBUG
+ {
+ unsigned int idx;
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
+ BUG();
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(kmap_pte - idx);
+ local_flush_tlb_one(vaddr);
+ }
#endif
-}
+ kmap_atomic_idx_pop();
+ pagefault_enable();
+}
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/mn10300/include/asm/intctl-regs.h b/arch/mn10300/include/asm/intctl-regs.h
index ba544c796c5a..585b708c2bc0 100644
--- a/arch/mn10300/include/asm/intctl-regs.h
+++ b/arch/mn10300/include/asm/intctl-regs.h
@@ -15,24 +15,19 @@
#ifdef __KERNEL__
-/* interrupt controller registers */
-#define GxICR(X) __SYSREG(0xd4000000 + (X) * 4, u16) /* group irq ctrl regs */
-
-#define IAGR __SYSREG(0xd4000100, u16) /* intr acceptance group reg */
-#define IAGR_GN 0x00fc /* group number register
- * (documentation _has_ to be wrong)
- */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
-#define EXTMD __SYSREG(0xd4000200, u16) /* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
-#define SET_XIRQ_TRIGGER(X,Y) \
-do { \
- u16 x = EXTMD; \
- x &= ~(3 << ((X) * 2)); \
- x |= ((Y) & 3) << ((X) * 2); \
- EXTMD = x; \
-} while (0)
+#include <proc/intctl-regs.h>
#define XIRQ_TRIGGER_LOWLEVEL 0
#define XIRQ_TRIGGER_HILEVEL 1
@@ -59,10 +54,18 @@ do { \
#define GxICR_LEVEL_5 0x5000 /* - level 5 */
#define GxICR_LEVEL_6 0x6000 /* - level 6 */
#define GxICR_LEVEL_SHIFT 12
+#define GxICR_NMI 0x8000 /* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num) ((num) << GxICR_LEVEL_SHIFT)
#ifndef __ASSEMBLY__
extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
#endif
/* external interrupts */
diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h
index c1a4119e6497..787255da744e 100644
--- a/arch/mn10300/include/asm/io.h
+++ b/arch/mn10300/include/asm/io.h
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
+#define readsb(p, dst, count) \
+ insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+ insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+ insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+ outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+ outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+ outsl((unsigned long) (p), (src), (count))
#define IO_SPACE_LIMIT 0xffffffff
diff --git a/arch/mn10300/include/asm/irq.h b/arch/mn10300/include/asm/irq.h
index 25c045d16d1c..1a73fb3f60c6 100644
--- a/arch/mn10300/include/asm/irq.h
+++ b/arch/mn10300/include/asm/irq.h
@@ -21,8 +21,16 @@
/* this number is used when no interrupt has been assigned */
#define NO_IRQ INT_MAX
-/* hardware irq numbers */
-#define NR_IRQS GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS GxICR_NUM_IRQS
+#define NR_IRQS NR_CPU_IRQS
+#endif
/* external hardware irq numbers */
#define NR_XIRQS GxICR_NUM_XIRQS
diff --git a/arch/mn10300/include/asm/irq_regs.h b/arch/mn10300/include/asm/irq_regs.h
index a848cd232eb4..97d0cb5af807 100644
--- a/arch/mn10300/include/asm/irq_regs.h
+++ b/arch/mn10300/include/asm/irq_regs.h
@@ -18,7 +18,11 @@
#define ARCH_HAS_OWN_IRQ_REGS
#ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+ return current_frame();
+}
#endif
#endif /* _ASM_IRQ_REGS_H */
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
index 5e529a117cb2..7a7ae12c7119 100644
--- a/arch/mn10300/include/asm/irqflags.h
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -13,6 +13,9 @@
#define _ASM_IRQFLAGS_H
#include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
/*
* interrupt control
@@ -23,11 +26,7 @@
* - level 6 - timer interrupt
* - "enabled": run in IM7
*/
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL EPSW_IM_1
-#endif
+#define MN10300_CLI_LEVEL (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
#ifndef __ASSEMBLY__
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
/*
* we make sure arch_irq_enable() doesn't cause priority inversion
*/
-extern unsigned long __mn10300_irq_enabled_epsw;
+extern unsigned long __mn10300_irq_enabled_epsw[];
static inline void arch_local_irq_enable(void)
{
unsigned long tmp;
+ int cpu = raw_smp_processor_id();
asm volatile(
" mov epsw,%0 \n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
" or %2,%0 \n"
" mov %0,epsw \n"
: "=&d"(tmp)
- : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
- : "memory");
+ : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+ : "memory", "cc");
}
static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
+ return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
}
static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
*/
static inline void arch_safe_halt(void)
{
+#ifdef CONFIG_SMP
+ arch_local_irq_enable();
+#else
asm volatile(
" or %0,epsw \n"
" nop \n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
:
: "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
: "cc");
+#endif
}
+#define __sleep_cpu() \
+do { \
+ asm volatile( \
+ " bset %1,(%0)\n" \
+ "1: btst %1,(%0)\n" \
+ " bne 1b\n" \
+ : \
+ : "i"(&CPUM), "i"(CPUM_SLEEP) \
+ : "cc" \
+ ); \
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ :
+ : "i"(~EPSW_IE)
+ : "memory"
+ );
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ arch_local_cli();
+ return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+ asm volatile(
+ " or %0,epsw \n"
+ :
+ : "i"(EPSW_IE)
+ : "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " or %1,epsw \n"
+ :
+ : "i"(~EPSW_IM), "i"(EPSW_IE | level)
+ : "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg) \
+ mov epsw,reg
+
+#define LOCAL_IRQ_DISABLE \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|MN10300_CLI_LEVEL,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_IRQ_ENABLE \
+ or EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg) \
+ mov reg,epsw
+
+#define LOCAL_CLI_SAVE(reg) \
+ mov epsw,reg; \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_CLI \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_STI \
+ or EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level) \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|(level),epsw
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index cb294c244de3..c8f6c82672ad 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -27,28 +27,38 @@
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
+#define MMU_CONTEXT_TLBPID_NR 256
#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
#define MMU_NO_CONTEXT 0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR 0
#define enter_lazy_tlb(mm, tsk) do {} while (0)
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
#ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
- cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
- cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
#else
-#define cpu_ran_vm(cpu, mm) do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm) true
-#endif /* CONFIG_SMP */
+ return true;
+#endif
+}
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
*/
static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
{
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
/* we exhausted the TLB PIDs of this version on this CPU, so we
* flush this CPU's TLB in its entirety and start new cycle */
- flush_tlb_all();
+ local_flush_tlb_all();
/* fix the TLB version if needed (we avoid version #0 so as to
* distingush MMU_NO_CONTEXT) */
@@ -101,22 +111,34 @@ static inline int init_new_context(struct task_struct *tsk,
}
/*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm) do { } while (0)
-
-/*
* after we have set current->mm to a new value, this activates the context for
* the new mm so we see the new mappings.
*/
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
{
PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
}
+#else /* CONFIG_MN10300_TLB_USE_PIDR */
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm) (0)
+#define activate_context(mm) local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm) do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
int cpu = smp_processor_id();
if (prev != next) {
+#ifdef CONFIG_SMP
+ per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
cpu_ran_vm(cpu, next);
- activate_context(next, cpu);
PTBR = (unsigned long) next->pgd;
- } else if (!cpu_maybe_ran_vm(cpu, next)) {
- activate_context(next, cpu);
+ activate_context(next);
}
}
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index a19f11327cd8..146bacf193ea 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -11,7 +11,6 @@
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
-#include <asm/processor.h>
#include <asm/page.h>
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index 16d88577f3e0..a1e894b5f65b 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* The vmalloc() routines also leaves a hole of 4kB between each vmalloced
* area to catch addressing errors.
*/
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET (8UL * 1024 * 1024)
+#define VMALLOC_START (0x70000000UL)
+#define VMALLOC_END (0x7C000000UL)
+#else
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START (0x70000000)
#define VMALLOC_END (0x7C000000)
+#endif
#ifndef __ASSEMBLY__
extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#endif
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL xPTEL_G_BIT
-
-#define _PAGE_VALID xPTEL_V
-#define _PAGE_ACCESSED xPTEL_UNUSED1
-#define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */
-#define _PAGE_CACHE xPTEL_C
-#define _PAGE_PRESENT xPTEL_PV
-#define _PAGE_DIRTY xPTEL_D
-#define _PAGE_PROT xPTEL_PR
-#define _PAGE_PROT_RKNU xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL xPTEL_G
-#define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */
-
-#define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX 0x040
-#define __PAGE_PROT_USER 0x080
-#define __PAGE_PROT_WRITE 0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED xPTEL2_UNUSED1_BIT /* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID xPTEL2_V
+#define _PAGE_CACHE xPTEL2_C
+#define _PAGE_PRESENT xPTEL2_PV
+#define _PAGE_DIRTY xPTEL2_D
+#define _PAGE_PROT xPTEL2_PR
+#define _PAGE_PROT_RKNU xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL xPTEL2_G
+#define _PAGE_PS_MASK xPTEL2_PS
+#define _PAGE_PS_4Kb xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb xPTEL2_PS_4Mb
+#define _PAGE_PSE xPTEL2_PS_4Mb /* 4MB page */
+#define _PAGE_CACHE_WT xPTEL2_CWT
+#define _PAGE_ACCESSED xPTEL2_UNUSED1
+#define _PAGE_NX 0 /* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x000 /* If not present */
+
+#define __PAGE_PROT_UWAUX 0x010
+#define __PAGE_PROT_USER 0x020
+#define __PAGE_PROT_WRITE 0x040
#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE 0x000 /* If not present */
#ifndef __ASSEMBLY__
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO __pgprot(__PAGE_USERIO)
+
/*
* Whilst the MN10300 can do page protection for execute (given separate data
* and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
return 1;
}
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS 29
+#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
* Macro to mark a page protection value as "uncacheable". On processors which
* do not support it, this is a no-op.
*/
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -457,9 +473,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable)
#define pte_offset_map(dir, address) \
((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do {} while (0)
-#define pte_unmap_nested(pte) do {} while (0)
/*
* The MN10300 has external MMU info in the form of a TLB: this is adapted from
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index f7d4b0d285e8..4c1b5cc14c19 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -13,10 +13,13 @@
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
+#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
__pc; \
})
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
extern void show_registers(struct pt_regs *regs);
/*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
struct mn10300_cpuinfo {
int type;
- unsigned long loops_per_sec;
+ unsigned long loops_per_jiffy;
char hard_math;
- unsigned long *pgd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
};
extern struct mn10300_cpuinfo boot_cpu_data;
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else /* CONFIG_SMP */
#define cpu_data &boot_cpu_data
#define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
extern void identify_cpu(struct mn10300_cpuinfo *);
extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
*/
#define TASK_UNMAPPED_BASE 0x30000000
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
struct fpu_state_struct {
unsigned long fs[32]; /* fpu registers */
unsigned long fpcr; /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
unsigned long a3; /* kernel FP */
unsigned long wchan;
unsigned long usp;
- struct pt_regs *__frame;
unsigned long fpu_flags;
#define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */
+#define THREAD_HAS_FPU 0x00000002 /* T if this task owns the FPU right now */
struct fpu_state_struct fpu_state;
};
-#define INIT_THREAD \
-{ \
- .uregs = init_uregs, \
- .pc = 0, \
- .sp = 0, \
- .a3 = 0, \
- .wchan = 0, \
- .__frame = NULL, \
+#define INIT_THREAD \
+{ \
+ .uregs = init_uregs, \
+ .pc = 0, \
+ .sp = 0, \
+ .a3 = 0, \
+ .wchan = 0, \
}
#define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
* - need to discard the frame stacked by the kernel thread invoking the execve
* syscall (see RESTORE_ALL macro)
*/
-#define start_thread(regs, new_pc, new_sp) do { \
- set_fs(USER_DS); \
- __frame = current->thread.uregs; \
- __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM; \
- __frame->pc = new_pc; \
- __frame->sp = new_sp; \
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+ unsigned long new_pc, unsigned long new_sp)
+{
+ struct thread_info *ti = current_thread_info();
+ struct pt_regs *frame0;
+ set_fs(USER_DS);
+
+ frame0 = thread_info_to_uregs(ti);
+ frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+ frame0->pc = new_pc;
+ frame0->sp = new_sp;
+ ti->frame = frame0;
+}
+
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
static inline void prefetch(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
static inline void prefetchw(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h
index 7c2e911052b6..b6961811d445 100644
--- a/arch/mn10300/include/asm/ptrace.h
+++ b/arch/mn10300/include/asm/ptrace.h
@@ -40,7 +40,6 @@
#define PT_PC 26
#define NR_PTREGS 27
-#ifndef __ASSEMBLY__
/*
* This defines the way registers are stored in the event of an exception
* - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
unsigned long epsw;
unsigned long pc;
};
-#endif
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
@@ -86,12 +84,7 @@ struct pt_regs {
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame; /* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
#define instruction_pointer(regs) ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
-#endif /* !__ASSEMBLY */
-
#define profile_pc(regs) ((regs)->pc)
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_PTRACE_H */
diff --git a/arch/mn10300/include/asm/reset-regs.h b/arch/mn10300/include/asm/reset-regs.h
index 174523d50132..10c7502a113f 100644
--- a/arch/mn10300/include/asm/reset-regs.h
+++ b/arch/mn10300/include/asm/reset-regs.h
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
RSTCTR |= RSTCTR_CHIPRST;
}
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
extern void watchdog_go(void);
extern asmlinkage void watchdog_handler(void);
diff --git a/arch/mn10300/include/asm/rtc.h b/arch/mn10300/include/asm/rtc.h
index c295194cc703..6c14bb1d0d9b 100644
--- a/arch/mn10300/include/asm/rtc.h
+++ b/arch/mn10300/include/asm/rtc.h
@@ -15,25 +15,14 @@
#include <linux/init.h>
-extern void check_rtc_time(void);
extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
#else /* !CONFIG_MN10300_RTC */
-static inline void check_rtc_time(void)
-{
-}
-
static inline void calibrate_clock(void)
{
}
-static inline unsigned long get_initial_rtc_time(void)
-{
- return 0;
-}
-
#endif /* !CONFIG_MN10300_RTC */
#include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644
index 000000000000..6d594d4a0e10
--- /dev/null
+++ b/arch/mn10300/include/asm/rwlock.h
@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS 0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_read_lock_const(rw, helper); \
+ else \
+ __build_read_lock_ptr(rw, helper); \
+ } while (0)
+
+#define __build_write_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_write_lock_const(rw, helper); \
+ else \
+ __build_write_lock_ptr(rw, helper); \
+ } while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */
diff --git a/arch/mn10300/include/asm/serial-regs.h b/arch/mn10300/include/asm/serial-regs.h
index 6498469e93ac..8320cda32f5a 100644
--- a/arch/mn10300/include/asm/serial-regs.h
+++ b/arch/mn10300/include/asm/serial-regs.h
@@ -20,18 +20,25 @@
/* serial port 0 */
#define SC0CTR __SYSREG(0xd4002000, u16) /* control reg */
#define SC01CTR_CK 0x0007 /* clock source select */
-#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
#define SC01CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
#define SC01CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_2 0x0003 /* - 1/2 timer 2 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
-#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 2 underflow (serial port 1 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 1 only) */
#define SC1CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow (serial port 1 only) */
-#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
-#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM12UFLOW_8 0x0000 /* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
#define SC01CTR_STB 0x0008 /* stop bit select */
#define SC01CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC01CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -100,11 +107,23 @@
/* serial port 2 */
#define SC2CTR __SYSREG(0xd4002020, u16) /* control reg */
+#ifdef CONFIG_AM33_2
#define SC2CTR_CK 0x0003 /* clock source select */
#define SC2CTR_CK_TM10UFLOW 0x0000 /* - timer 10 underflow */
#define SC2CTR_CK_TM2UFLOW 0x0001 /* - timer 2 underflow */
#define SC2CTR_CK_EXTERN 0x0002 /* - external closk */
#define SC2CTR_CK_TM3UFLOW 0x0003 /* - timer 3 underflow */
+#else /* CONFIG_AM33_2 */
+#define SC2CTR_CK 0x0007 /* clock source select */
+#define SC2CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow */
+#define SC2CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
+#define SC2CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC2CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow */
+#define SC2CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow */
+#define SC2CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow */
+#define SC2CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC2CTR_CK_EXTERN 0x0007 /* - external closk */
+#endif /* CONFIG_AM33_2 */
#define SC2CTR_STB 0x0008 /* stop bit select */
#define SC2CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC2CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -134,9 +153,14 @@
#define SC2ICR_RES 0x04 /* receive error select */
#define SC2ICR_RI 0x01 /* receive interrupt cause */
-#define SC2TXB __SYSREG(0xd4002018, u8) /* transmit buffer reg */
-#define SC2RXB __SYSREG(0xd4002019, u8) /* receive buffer reg */
-#define SC2STR __SYSREG(0xd400201c, u8) /* status reg */
+#define SC2TXB __SYSREG(0xd4002028, u8) /* transmit buffer reg */
+#define SC2RXB __SYSREG(0xd4002029, u8) /* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define SC2STR __SYSREG(0xd400202c, u8) /* status reg */
+#else /* CONFIG_AM33_2 */
+#define SC2STR __SYSREG(0xd400202c, u16) /* status reg */
+#endif /* CONFIG_AM33_2 */
#define SC2STR_OEF 0x0001 /* overrun error found */
#define SC2STR_PEF 0x0002 /* parity error found */
#define SC2STR_FEF 0x0004 /* framing error found */
@@ -146,10 +170,17 @@
#define SC2STR_RXF 0x0040 /* receive status */
#define SC2STR_TXF 0x0080 /* transmit status */
+#ifdef CONFIG_AM33_2
#define SC2TIM __SYSREG(0xd400202d, u8) /* status reg */
+#endif
+#ifdef CONFIG_AM33_2
#define SC2RXIRQ 24 /* serial 2 Receive IRQ */
#define SC2TXIRQ 25 /* serial 2 Transmit IRQ */
+#else /* CONFIG_AM33_2 */
+#define SC2RXIRQ 68 /* serial 2 Receive IRQ */
+#define SC2TXIRQ 69 /* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
#define SC2RXICR GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
#define SC2TXICR GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
diff --git a/arch/mn10300/include/asm/serial.h b/arch/mn10300/include/asm/serial.h
index a29445cddd6f..23a799293599 100644
--- a/arch/mn10300/include/asm/serial.h
+++ b/arch/mn10300/include/asm/serial.h
@@ -9,10 +9,8 @@
* 2 of the Licence, or (at your option) any later version.
*/
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD (18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
#endif
#include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h
index 4eb8c61b7dab..a3930e43a958 100644
--- a/arch/mn10300/include/asm/smp.h
+++ b/arch/mn10300/include/asm/smp.h
@@ -3,6 +3,16 @@
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ * for SMP support.
+ * 22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ * 23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ * 23-Jun-2008 MEI Delete INTC_IPI.
+ * 22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ * 04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
@@ -11,8 +21,85 @@
#ifndef _ASM_SMP_H
#define _ASM_SMP_H
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
#endif
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI 63
+#define CALL_FUNC_SINGLE_IPI 192
+#define LOCAL_TIMER_IPI 193
+#define FLUSH_CACHE_IPI 194
+#define CALL_FUNCTION_NMI_IPI 195
+#define GDB_NMI_IPI 196
+
+#define SMP_BOOT_IRQ 195
+
+#define RESCHEDULE_GxICR_LV GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI 100
+#define DELAY_TIME_BOOT_IPI 75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed. A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id() (CPUID)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif
+
+static inline int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644
index 000000000000..2fcd1080322b
--- /dev/null
+++ b/arch/mn10300/include/asm/smsc911x.h
@@ -0,0 +1 @@
+#include <unit/smsc911x.h>
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 4bf9c8b169e0..93429154e898 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -11,6 +11,183 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ " bclr 1,(0,%0) \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int ret;
+
+ asm volatile(
+ " mov 1,%0 \n"
+ " bset %0,(%1) \n"
+ " bne 1f \n"
+ " clr %0 \n"
+ "1: xor 1,%0 \n"
+ : "=d"(ret)
+ : "a"(&lock->slock)
+ : "memory", "cc");
+
+ return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ "1: bset 1,(0,%0) \n"
+ " bne 1b \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+ unsigned long flags)
+{
+ int temp;
+
+ asm volatile(
+ "1: bset 1,(0,%2) \n"
+ " beq 3f \n"
+ " mov %1,epsw \n"
+ "2: mov (0,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 2b \n"
+ " mov %3,%0 \n"
+ " mov %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " bra 1b\n"
+ "3: \n"
+ : "=&d" (temp)
+ : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+ : "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_lock(rw, "__read_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (atomic_dec_return(count) < 0)
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_lock(rw, "__write_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ atomic_dec(count);
+ if (atomic_read(count) >= 0)
+ return 1;
+ atomic_inc(count);
+ return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+ return 1;
+ atomic_add(RW_LOCK_BIAS, count);
+ return 0;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* __KERNEL__ */
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..653dc519b405
--- /dev/null
+++ b/arch/mn10300/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+ unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
index 9f7c7e17c01e..8ff3e5aaca41 100644
--- a/arch/mn10300/include/asm/system.h
+++ b/arch/mn10300/include/asm/system.h
@@ -12,12 +12,29 @@
#define _ASM_SYSTEM_H
#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next) \
+ do { \
+ if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
+ (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
+ (prev)->thread.uregs->epsw &= ~EPSW_FE; \
+ fpu_save(&(prev)->thread.fpu_state); \
+ } \
+ } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
struct task_struct;
struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
/* context switching is now performed out-of-line in switch_to.S */
#define switch_to(prev, next, last) \
do { \
+ switch_fpu(prev, next); \
current->thread.wchan = (u_long) __builtin_return_address(0); \
(last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
mb(); \
@@ -40,8 +58,6 @@ do { \
#define nop() asm volatile ("nop")
-#endif /* !__ASSEMBLY__ */
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do { \
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
-#else
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#endif
-
#define set_mb(var, value) do { var = value; mb(); } while (0)
+#endif /* CONFIG_SMP */
+
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- *m = val;
- local_irq_restore(flags);
- return retval;
-}
-
-#define xchg(ptr, v) \
- ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
- (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
- unsigned long old, unsigned long new)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- if (retval == old)
- *m = new;
- local_irq_restore(flags);
- return retval;
-}
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
- (unsigned long)(o), \
- (unsigned long)(n)))
-
#endif /* !__ASSEMBLY__ */
-
#endif /* __KERNEL__ */
#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 2001cb657a95..aa07a4a5d794 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -16,10 +16,6 @@
#include <asm/page.h>
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
#define PREEMPT_ACTIVE 0x10000000
#ifdef CONFIG_4KSTACKS
@@ -38,10 +34,14 @@
* must also be changed
*/
#ifndef __ASSEMBLY__
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
+ struct pt_regs *frame; /* current exception frame */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
__u8 supervisor_stack[0];
};
+#define thread_info_to_uregs(ti) \
+ ((struct pt_regs *) \
+ ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
#else /* !__ASSEMBLY__ */
#ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
return ti;
}
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+ return current_thread_info()->frame;
+}
+
/* how to get the current stack pointer from C */
static inline unsigned long current_stack_pointer(void)
{
diff --git a/arch/mn10300/include/asm/timer-regs.h b/arch/mn10300/include/asm/timer-regs.h
index 1d883b7f94ab..c634977caf66 100644
--- a/arch/mn10300/include/asm/timer-regs.h
+++ b/arch/mn10300/include/asm/timer-regs.h
@@ -17,21 +17,27 @@
#ifdef __KERNEL__
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
#define TMPSCNT __SYSREG(0xd4003071, u8) /* timer prescaler control */
#define TMPSCNT_ENABLE 0x80 /* timer prescaler enable */
#define TMPSCNT_DISABLE 0x00 /* timer prescaler disable */
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
#define TM0MD __SYSREG(0xd4003000, u8) /* timer 0 mode register */
#define TM0MD_SRC 0x07 /* timer source */
#define TM0MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM0MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM0MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM0MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM0IO 0x07 /* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM0MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM0MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -43,7 +49,9 @@
#define TM1MD_SRC_TM0CASCADE 0x03 /* - cascade with timer 0 */
#define TM1MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM1MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM1MD_SRC_TM1IO 0x07 /* - TM1IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM1MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM1MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -55,7 +63,9 @@
#define TM2MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 1 */
#define TM2MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM2MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
#define TM2MD_SRC_TM2IO 0x07 /* - TM2IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM2MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM2MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -64,11 +74,13 @@
#define TM3MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM3MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM3MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM3MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 2 */
+#define TM3MD_SRC_TM2CASCADE 0x03 /* - cascade with timer 2 */
#define TM3MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM3MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM3MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM3MD_SRC_TM3IO 0x07 /* - TM3IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM3MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM3MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -96,7 +108,9 @@
#define TM2ICR GxICR(TM2IRQ) /* timer 2 uflow intr ctrl reg */
#define TM3ICR GxICR(TM3IRQ) /* timer 3 uflow intr ctrl reg */
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
#define TM4MD __SYSREG(0xd4003080, u8) /* timer 4 mode register */
#define TM4MD_SRC 0x07 /* timer source */
#define TM4MD_SRC_IOCLK 0x00 /* - IOCLK */
@@ -105,7 +119,9 @@
#define TM4MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM4MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM4MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM4MD_SRC_TM4IO 0x07 /* - TM4IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM4MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM4MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -118,7 +134,11 @@
#define TM5MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM5MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM5MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM5MD_SRC_TM5IO 0x07 /* - TM5IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM5MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM5MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM5MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -130,7 +150,9 @@
#define TM7MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM7MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM7MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM7MD_SRC_TM7IO 0x07 /* - TM7IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM7MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM7MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -143,7 +165,11 @@
#define TM8MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM8MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM8MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM8MD_SRC_TM8IO 0x07 /* - TM8IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM8MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM8MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM8MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -156,7 +182,11 @@
#define TM9MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM9MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM9MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM9MD_SRC_TM9IO 0x07 /* - TM9IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM9MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM9MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM9MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -169,7 +199,11 @@
#define TM10MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM10MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM10MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM10MD_SRC_TM10IO 0x07 /* - TM10IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM10MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM10MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM10MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -178,32 +212,101 @@
#define TM11MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM11MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM11MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM11MD_SRC_TM7CASCADE 0x03 /* - cascade with timer 7 */
#define TM11MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM11MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM11MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM11MD_SRC_TM11IO 0x07 /* - TM11IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM11MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM11MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM11MD_COUNT_ENABLE 0x80 /* timer count enable */
+#if defined(CONFIG_AM34_2)
+#define TM12MD __SYSREG(0xd4003180, u8) /* timer 11 mode register */
+#define TM12MD_SRC 0x07 /* timer source */
+#define TM12MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM12MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM12MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM12MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM12MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM12MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM12MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM12MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM12MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM13MD __SYSREG(0xd4003182, u8) /* timer 11 mode register */
+#define TM13MD_SRC 0x07 /* timer source */
+#define TM13MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM13MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM13MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM13MD_SRC_TM12CASCADE 0x03 /* - cascade with timer 12 */
+#define TM13MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM13MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM13MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM13MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM13MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM13MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM14MD __SYSREG(0xd4003184, u8) /* timer 11 mode register */
+#define TM14MD_SRC 0x07 /* timer source */
+#define TM14MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM14MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM14MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM14MD_SRC_TM13CASCADE 0x03 /* - cascade with timer 13 */
+#define TM14MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM14MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM14MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM14MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM14MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM14MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM15MD __SYSREG(0xd4003186, u8) /* timer 11 mode register */
+#define TM15MD_SRC 0x07 /* timer source */
+#define TM15MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM15MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM15MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM15MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM15MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM15MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM15MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM15MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM15MD_COUNT_ENABLE 0x80 /* timer count enable */
+#endif /* CONFIG_AM34_2 */
+
+
#define TM4BR __SYSREG(0xd4003090, u16) /* timer 4 base register */
#define TM5BR __SYSREG(0xd4003092, u16) /* timer 5 base register */
+#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
#define TM7BR __SYSREG(0xd4003096, u16) /* timer 7 base register */
#define TM8BR __SYSREG(0xd4003098, u16) /* timer 8 base register */
#define TM9BR __SYSREG(0xd400309a, u16) /* timer 9 base register */
+#define TM89BR __SYSREG(0xd4003098, u32) /* timer 8:9 base register */
#define TM10BR __SYSREG(0xd400309c, u16) /* timer 10 base register */
#define TM11BR __SYSREG(0xd400309e, u16) /* timer 11 base register */
-#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define TM12BR __SYSREG(0xd4003190, u16) /* timer 12 base register */
+#define TM13BR __SYSREG(0xd4003192, u16) /* timer 13 base register */
+#define TM14BR __SYSREG(0xd4003194, u16) /* timer 14 base register */
+#define TM15BR __SYSREG(0xd4003196, u16) /* timer 15 base register */
+#endif /* CONFIG_AM34_2 */
#define TM4BC __SYSREG(0xd40030a0, u16) /* timer 4 binary counter */
#define TM5BC __SYSREG(0xd40030a2, u16) /* timer 5 binary counter */
#define TM45BC __SYSREG(0xd40030a0, u32) /* timer 4:5 binary counter */
-
#define TM7BC __SYSREG(0xd40030a6, u16) /* timer 7 binary counter */
#define TM8BC __SYSREG(0xd40030a8, u16) /* timer 8 binary counter */
#define TM9BC __SYSREG(0xd40030aa, u16) /* timer 9 binary counter */
+#define TM89BC __SYSREG(0xd40030a8, u32) /* timer 8:9 binary counter */
#define TM10BC __SYSREG(0xd40030ac, u16) /* timer 10 binary counter */
#define TM11BC __SYSREG(0xd40030ae, u16) /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define TM12BC __SYSREG(0xd40031a0, u16) /* timer 12 binary counter */
+#define TM13BC __SYSREG(0xd40031a2, u16) /* timer 13 binary counter */
+#define TM14BC __SYSREG(0xd40031a4, u16) /* timer 14 binary counter */
+#define TM15BC __SYSREG(0xd40031a6, u16) /* timer 15 binary counter */
+#endif /* CONFIG_AM34_2 */
#define TM4IRQ 6 /* timer 4 IRQ */
#define TM5IRQ 7 /* timer 5 IRQ */
@@ -212,6 +315,12 @@
#define TM9IRQ 13 /* timer 9 IRQ */
#define TM10IRQ 14 /* timer 10 IRQ */
#define TM11IRQ 15 /* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ 64 /* timer 12 IRQ */
+#define TM13IRQ 65 /* timer 13 IRQ */
+#define TM14IRQ 66 /* timer 14 IRQ */
+#define TM15IRQ 67 /* timer 15 IRQ */
+#endif /* CONFIG_AM34_2 */
#define TM4ICR GxICR(TM4IRQ) /* timer 4 uflow intr ctrl reg */
#define TM5ICR GxICR(TM5IRQ) /* timer 5 uflow intr ctrl reg */
@@ -220,8 +329,16 @@
#define TM9ICR GxICR(TM9IRQ) /* timer 9 uflow intr ctrl reg */
#define TM10ICR GxICR(TM10IRQ) /* timer 10 uflow intr ctrl reg */
#define TM11ICR GxICR(TM11IRQ) /* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define TM12ICR GxICR(TM12IRQ) /* timer 12 uflow intr ctrl reg */
+#define TM13ICR GxICR(TM13IRQ) /* timer 13 uflow intr ctrl reg */
+#define TM14ICR GxICR(TM14IRQ) /* timer 14 uflow intr ctrl reg */
+#define TM15ICR GxICR(TM15IRQ) /* timer 15 uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
#define TM6MD __SYSREG(0xd4003084, u16) /* timer6 mode register */
#define TM6MD_SRC 0x0007 /* timer source */
#define TM6MD_SRC_IOCLK 0x0000 /* - IOCLK */
@@ -229,10 +346,14 @@
#define TM6MD_SRC_IOCLK_32 0x0002 /* - 1/32 IOCLK */
#define TM6MD_SRC_TM0UFLOW 0x0004 /* - timer 0 underflow */
#define TM6MD_SRC_TM1UFLOW 0x0005 /* - timer 1 underflow */
-#define TM6MD_SRC_TM6IOB_BOTH 0x0006 /* - TM6IOB pin input (both edges) */
+#define TM6MD_SRC_TM2UFLOW 0x0006 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define TM6MD_SRC_TM6IOB_BOTH 0x0006 */ /* - TM6IOB pin input (both edges) */
#define TM6MD_SRC_TM6IOB_SINGLE 0x0007 /* - TM6IOB pin input (single edge) */
-#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#endif /* CONFIG_AM33_2 */
#define TM6MD_ONESHOT_ENABLE 0x0040 /* oneshot count */
+#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#if defined(CONFIG_AM33_2)
#define TM6MD_TRIG_ENABLE 0x0080 /* TM6IOB pin trigger enable */
#define TM6MD_PWM 0x3800 /* PWM output mode */
#define TM6MD_PWM_DIS 0x0000 /* - disabled */
@@ -240,10 +361,15 @@
#define TM6MD_PWM_11BIT 0x1800 /* - 11 bits mode */
#define TM6MD_PWM_12BIT 0x3000 /* - 12 bits mode */
#define TM6MD_PWM_14BIT 0x3800 /* - 14 bits mode */
+#endif /* CONFIG_AM33_2 */
+
#define TM6MD_INIT_COUNTER 0x4000 /* initialize TMnBC to zero */
#define TM6MD_COUNT_ENABLE 0x8000 /* timer count enable */
#define TM6MDA __SYSREG(0xd40030b4, u8) /* timer6 cmp/cap A mode reg */
+#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDA_OUT 0x07 /* output select */
#define TM6MDA_OUT_SETA_RESETB 0x00 /* - set at match A, reset at match B */
#define TM6MDA_OUT_SETA_RESETOV 0x01 /* - set at match A, reset at overflow */
@@ -251,30 +377,35 @@
#define TM6MDA_OUT_RESETA 0x03 /* - reset at match A */
#define TM6MDA_OUT_TOGGLE 0x04 /* - toggle on match A */
#define TM6MDA_MODE 0xc0 /* compare A register mode */
-#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDA_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDA_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDA_EDGE 0x20 /* compare A edge select */
#define TM6MDA_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDA_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDA_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDA_MODE 0x40 /* compare A register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6MDB __SYSREG(0xd40030b5, u8) /* timer6 cmp/cap B mode reg */
+#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDB_OUT 0x07 /* output select */
#define TM6MDB_OUT_SETB_RESETA 0x00 /* - set at match B, reset at match A */
#define TM6MDB_OUT_SETB_RESETOV 0x01 /* - set at match B */
#define TM6MDB_OUT_RESETB 0x03 /* - reset at match B */
#define TM6MDB_OUT_TOGGLE 0x04 /* - toggle on match B */
#define TM6MDB_MODE 0xc0 /* compare B register mode */
-#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDB_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDB_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDB_EDGE 0x20 /* compare B edge select */
#define TM6MDB_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDB_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDB_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDB_MODE 0x40 /* compare B register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6CA __SYSREG(0xd40030c4, u16) /* timer6 cmp/capture reg A */
#define TM6CB __SYSREG(0xd40030d4, u16) /* timer6 cmp/capture reg B */
@@ -288,6 +419,34 @@
#define TM6AICR GxICR(TM6AIRQ) /* timer 6A intr control reg */
#define TM6BICR GxICR(TM6BIRQ) /* timer 6B intr control reg */
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define TMTMD __SYSREG(0xd4004100, u8) /* Tick Timer mode register */
+#define TMTMD_TMTLDE 0x40 /* initialize TMTBC = TMTBR */
+#define TMTMD_TMTCNE 0x80 /* timer count enable */
+
+#define TMTBR __SYSREG(0xd4004110, u32) /* Tick Timer mode reg */
+#define TMTBC __SYSREG(0xd4004120, u32) /* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define TMSMD __SYSREG(0xd4004140, u8) /* Tick Timer mode register */
+#define TMSMD_TMSLDE 0x40 /* initialize TMSBC = TMSBR */
+#define TMSMD_TMSCNE 0x80 /* timer count enable */
+
+#define TMSBR __SYSREG(0xd4004150, u32) /* Tick Timer mode register */
+#define TMSBC __SYSREG(0xd4004160, u32) /* Tick Timer mode register */
+
+#define TMTIRQ 119 /* OS Tick timer IRQ */
+#define TMSIRQ 120 /* Timestamp timer IRQ */
+
+#define TMTICR GxICR(TMTIRQ) /* OS Tick timer uflow intr ctrl reg */
+#define TMSICR GxICR(TMSIRQ) /* Timestamp timer uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMER_REGS_H */
diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h
index 8d031f9e117d..bd4e90dfe6c2 100644
--- a/arch/mn10300/include/asm/timex.h
+++ b/arch/mn10300/include/asm/timex.h
@@ -16,18 +16,30 @@
#define TICK_SIZE (tick_nsec / 1000)
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
- * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
#ifdef __KERNEL__
+extern cycles_t cacheflush_time;
+
static inline cycles_t get_cycles(void)
{
return read_timestamp_counter();
}
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+ struct irqaction *action)
+{
+ u16 tmp;
+ setup_irq(irq, action);
+ set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+ GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+ tmp = GxICR(irq);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMEX_H */
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e29281c5d..efddd6e1adea 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -11,24 +11,78 @@
#ifndef _ASM_TLBFLUSH_H
#define _ASM_TLBFLUSH_H
+#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() \
-do { \
- int w; \
- __asm__ __volatile__ \
- (" mov %1,%0 \n" \
- " or %2,%0 \n" \
- " mov %0,%1 \n" \
- : "=d"(w) \
- : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
- : "cc", "memory" \
- ); \
-} while (0)
+struct tlb_state {
+ struct mm_struct *active_mm;
+ int state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+ int w;
+ asm volatile(
+ " mov %1,%0 \n"
+ " or %2,%0 \n"
+ " mov %0,%1 \n"
+ : "=d"(w)
+ : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+ : "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+ local_flush_tlb();
+}
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+ local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long pteu, flags, cnx;
+
+ addr &= PAGE_MASK;
+
+ local_irq_save(flags);
+
+ cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+ if (cnx) {
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & xPTEU_PID;
+#endif
+ IPTEU = pteu;
+ DPTEU = pteu;
+ if (IPTEL & xPTEL_V)
+ IPTEL = 0;
+ if (DPTEL & xPTEL_V)
+ DPTEL = 0;
+ }
+ local_irq_restore(flags);
+}
/*
* TLB flushing:
@@ -40,41 +94,61 @@ do { \
* - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
-#define flush_tlb_all() \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_mm(mm) \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_range(vma, start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-
-#define __flush_tlb_global() flush_tlb_all()
-#define flush_tlb() flush_tlb_all()
-#define flush_tlb_kernel_range(start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- flush_tlb_all(); \
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end) do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#else /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb() flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
#endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 197a7af3dd8a..679dee0bbd08 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -14,9 +14,8 @@
/*
* User space memory access functions
*/
-#include <linux/sched.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/errno.h>
#define VERIFY_READ 0
@@ -29,7 +28,6 @@
*
* For historical reasons, these macros are grossly misnamed.
*/
-
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
#if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile
index 23f2ab67574c..8f5f1e81baf5 100644
--- a/arch/mn10300/kernel/Makefile
+++ b/arch/mn10300/kernel/Makefile
@@ -3,13 +3,16 @@
#
extra-y := head.o init_task.o vmlinux.lds
-obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \
+fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
+fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
+
+obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
- switch_to.o mn10300_ksyms.o kernel_execve.o
+ switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
-obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
+obj-$(CONFIG_SMP) += smp.o smp-low.o
-obj-$(CONFIG_FPU) += fpu-low.o
+obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
mn10300-debug.o
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
obj-$(CONFIG_GDBSTUB) += gdb-cache.o
endif
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o
obj-$(CONFIG_PROFILE) += profile.o profile-low.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o
+obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index 02dc7e461fef..96f24fab7de6 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -23,6 +23,7 @@ void foo(void)
OFFSET(TI_task, thread_info, task);
OFFSET(TI_exec_domain, thread_info, exec_domain);
+ OFFSET(TI_frame, thread_info, frame);
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_cpu, thread_info, cpu);
OFFSET(TI_preempt_count, thread_info, preempt_count);
@@ -66,7 +67,15 @@ void foo(void)
OFFSET(THREAD_SP, thread_struct, sp);
OFFSET(THREAD_A3, thread_struct, a3);
OFFSET(THREAD_USP, thread_struct, usp);
- OFFSET(THREAD_FRAME, thread_struct, __frame);
+#ifdef CONFIG_FPU
+ OFFSET(THREAD_FPU_FLAGS, thread_struct, fpu_flags);
+ OFFSET(THREAD_FPU_STATE, thread_struct, fpu_state);
+ DEFINE(__THREAD_USING_FPU, THREAD_USING_FPU);
+ DEFINE(__THREAD_HAS_FPU, THREAD_HAS_FPU);
+#endif /* CONFIG_FPU */
+ BLANK();
+
+ OFFSET(TASK_THREAD, task_struct, thread);
BLANK();
DEFINE(CLONE_VM_asm, CLONE_VM);
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
new file mode 100644
index 000000000000..d4cb535bf786
--- /dev/null
+++ b/arch/mn10300/kernel/cevt-mn10300.c
@@ -0,0 +1,131 @@
+/* MN10300 clockevents
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+#ifdef CONFIG_SMP
+#if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST)
+#error "This doesn't scale well! Need per-core local timers."
+#endif
+#else /* CONFIG_SMP */
+#define stop_jiffies_counter1()
+#define reload_jiffies_counter1(x)
+#define TMJC1IRQ TMJCIRQ
+#endif
+
+
+static int next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0) {
+ stop_jiffies_counter();
+ reload_jiffies_counter(delta - 1);
+ } else {
+ stop_jiffies_counter1();
+ reload_jiffies_counter1(delta - 1);
+ }
+ return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
+static DEFINE_PER_CPU(struct irqaction, timer_irq);
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd;
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ stop_jiffies_counter();
+ else
+ stop_jiffies_counter1();
+
+ cd = &per_cpu(mn10300_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static void event_handler(struct clock_event_device *dev)
+{
+}
+
+int __init init_clockevents(void)
+{
+ struct clock_event_device *cd;
+ struct irqaction *iact;
+ unsigned int cpu = smp_processor_id();
+
+ cd = &per_cpu(mn10300_clockevent_device, cpu);
+
+ if (cpu == 0) {
+ stop_jiffies_counter();
+ cd->irq = TMJCIRQ;
+ } else {
+ stop_jiffies_counter1();
+ cd->irq = TMJC1IRQ;
+ }
+
+ cd->name = "Timestamp";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ clockevent_set_clock(cd, MN10300_JCCLK);
+
+ cd->max_delta_ns = clockevent_delta2ns(TMJCBR_MAX, cd);
+ cd->min_delta_ns = clockevent_delta2ns(100, cd);
+
+ cd->rating = 200;
+ cd->cpumask = cpumask_of(smp_processor_id());
+ cd->set_mode = set_clock_mode;
+ cd->event_handler = event_handler;
+ cd->set_next_event = next_event;
+
+ iact = &per_cpu(timer_irq, cpu);
+ iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+ iact->handler = timer_interrupt;
+
+ clockevents_register_device(cd);
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+ /* setup timer irq affinity so it only runs on this cpu */
+ {
+ struct irq_desc *desc;
+ desc = irq_to_desc(cd->irq);
+ cpumask_copy(desc->affinity, cpumask_of(cpu));
+ iact->flags |= IRQF_NOBALANCING;
+ }
+#endif
+
+ if (cpu == 0) {
+ reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+ iact->name = "CPU0 Timer";
+ } else {
+ reload_jiffies_counter1(MN10300_JC_PER_HZ - 1);
+ iact->name = "CPU1 Timer";
+ }
+
+ setup_jiffies_interrupt(cd->irq, iact);
+
+ return 0;
+}
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c
new file mode 100644
index 000000000000..ba2f0c4d6e01
--- /dev/null
+++ b/arch/mn10300/kernel/csrc-mn10300.c
@@ -0,0 +1,35 @@
+/* MN10300 clocksource
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+static cycle_t mn10300_read(struct clocksource *cs)
+{
+ return read_timestamp_counter();
+}
+
+static struct clocksource clocksource_mn10300 = {
+ .name = "TSC",
+ .rating = 200,
+ .read = mn10300_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_clocksource(void)
+{
+ startup_timestamp_counter();
+ clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK);
+ clocksource_register(&clocksource_mn10300);
+ return 0;
+}
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 3d394b4eefba..f00b9bafcd3e 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -28,25 +28,17 @@
#include <asm/asm-offsets.h>
#include <asm/frame.inc>
+#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
+#include <asm/gdb-stub.h>
+#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
+
#ifdef CONFIG_PREEMPT
-#define preempt_stop __cli
+#define preempt_stop LOCAL_IRQ_DISABLE
#else
#define preempt_stop
#define resume_kernel restore_all
#endif
- .macro __cli
- and ~EPSW_IM,epsw
- or EPSW_IE|MN10300_CLI_LEVEL,epsw
- nop
- nop
- nop
- .endm
- .macro __sti
- or EPSW_IE|EPSW_IM_7,epsw
- .endm
-
-
.am33_2
###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
syscall_exit:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
mov (TI_flags,a2),d2
btst _TIF_ALLWORK_MASK,d2
bne syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
syscall_exit_work:
btst _TIF_SYSCALL_TRACE,d2
beq work_pending
- __sti # could let syscall_trace_exit() call
+ LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call
# schedule() instead
mov fp,d0
call syscall_trace_exit[],0 # do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
# is there any work to be done other than syscall tracing?
mov (TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
ENTRY(resume_userspace)
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
# is there any work to be done on int/exception return?
mov (TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- __cli
+ LOCAL_IRQ_DISABLE
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
bne restore_all
@@ -216,31 +208,6 @@ ENTRY(irq_handler)
###############################################################################
#
-# Monitor Signal handler entry point
-#
-###############################################################################
-ENTRY(monitor_signal)
- movbu (0xae000001),d1
- cmp 1,d1
- beq monsignal
- ret [],0
-
-monsignal:
- or EPSW_NMID,epsw
- mov d0,a0
- mov a0,sp
- mov (REG_EPSW,fp),d1
- and ~EPSW_nSL,d1
- mov d1,(REG_EPSW,fp)
- movm (sp),[d2,d3,a2,a3,exreg0,exreg1,exother]
- mov (sp),a1
- mov a1,usp
- movm (sp),[other]
- add 4,sp
-here: jmp 0x8e000008-here+0x8e000008
-
-###############################################################################
-#
# Double Fault handler entry point
# - note that there will not be a stack, D0/A0 will hold EPSW/PC as were
#
@@ -276,6 +243,10 @@ double_fault_loop:
ENTRY(raw_bus_error)
add -4,sp
mov d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d0
+ mov d0,(MMUCTR)
+#endif
mov (BCBERR),d0 # what
btst BCBERR_BEMR_DMA,d0 # see if it was an external bus error
beq __common_exception_aux # it wasn't
@@ -302,11 +273,88 @@ ENTRY(nmi_handler)
add -4,sp
mov d0,(sp)
mov (TBR),d0
+
+#ifdef CONFIG_SMP
+ add -4,sp
+ mov d0,(sp) # save d0(TBR)
+ movhu (NMIAGR),d0
+ and NMIAGR_GN,d0
+ lsr 0x2,d0
+ cmp CALL_FUNCTION_NMI_IPI,d0
+ bne 5f # if not call function, jump
+
+ # function call nmi ipi
+ add 4,sp # no need to store TBR
+ mov GxICR_DETECT,d0 # clear NMI request
+ movbu d0,(GxICR(CALL_FUNCTION_NMI_IPI))
+ movhu (GxICR(CALL_FUNCTION_NMI_IPI)),d0
+ and ~EPSW_NMID,epsw # enable NMI
+
+ mov (sp),d0 # restore d0
+ SAVE_ALL
+ call smp_nmi_call_function_interrupt[],0
+ RESTORE_ALL
+
+5:
+#ifdef CONFIG_GDBSTUB
+ cmp GDB_NMI_IPI,d0
+ bne 3f # if not gdb nmi ipi, jump
+
+ # gdb nmi ipi
+ add 4,sp # no need to store TBR
+ mov GxICR_DETECT,d0 # clear NMI
+ movbu d0,(GxICR(GDB_NMI_IPI))
+ movhu (GxICR(GDB_NMI_IPI)),d0
+ and ~EPSW_NMID,epsw # enable NMI
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ mov (gdbstub_nmi_opr_type),d0
+ cmp GDBSTUB_NMI_CACHE_PURGE,d0
+ bne 4f # if not gdb cache purge, jump
+
+ # gdb cache purge nmi ipi
+ add -20,sp
+ mov d1,(4,sp)
+ mov a0,(8,sp)
+ mov a1,(12,sp)
+ mov mdr,d0
+ mov d0,(16,sp)
+ call gdbstub_local_purge_cache[],0
+ mov 0x1,d0
+ mov (CPUID),d1
+ asl d1,d0
+ mov gdbstub_nmi_cpumask,a0
+ bclr d0,(a0)
+ mov (4,sp),d1
+ mov (8,sp),a0
+ mov (12,sp),a1
+ mov (16,sp),d0
+ mov d0,mdr
+ add 20,sp
+ mov (sp),d0
+ add 4,sp
+ rti
+4:
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+ # gdb wait nmi ipi
+ mov (sp),d0
+ SAVE_ALL
+ call gdbstub_nmi_wait[],0
+ RESTORE_ALL
+3:
+#endif /* CONFIG_GDBSTUB */
+ mov (sp),d0 # restore TBR to d0
+ add 4,sp
+#endif /* CONFIG_SMP */
+
bra __common_exception_nonmi
ENTRY(__common_exception)
add -4,sp
mov d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d0
+ mov d0,(MMUCTR)
+#endif
__common_exception_aux:
mov (TBR),d0
@@ -331,15 +379,21 @@ __common_exception_nonmi:
mov d0,(REG_ORIG_D0,fp)
#ifdef CONFIG_GDBSTUB
+#ifdef CONFIG_SMP
+ call gdbstub_busy_check[],0
+ and d0,d0 # check return value
+ beq 2f
+#else /* CONFIG_SMP */
btst 0x01,(gdbstub_busy)
beq 2f
+#endif /* CONFIG_SMP */
and ~EPSW_IE,epsw
mov fp,d0
mov a2,d1
call gdbstub_exception[],0 # gdbstub itself caused an exception
bra restore_all
2:
-#endif
+#endif /* CONFIG_GDBSTUB */
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector)
add exception_table,d0
mov d1,(d0)
mov 4,d1
-#if defined(CONFIG_MN10300_CACHE_WBACK)
- jmp mn10300_dcache_flush_inv_range2
-#else
ret [],0
-#endif
###############################################################################
#
diff --git a/arch/mn10300/kernel/fpu-low.S b/arch/mn10300/kernel/fpu-low.S
index 96cfd47e68d5..78df25cfae29 100644
--- a/arch/mn10300/kernel/fpu-low.S
+++ b/arch/mn10300/kernel/fpu-low.S
@@ -8,25 +8,14 @@
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/linkage.h>
#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
-###############################################################################
-#
-# void fpu_init_state(void)
-# - initialise the FPU
-#
-###############################################################################
- .globl fpu_init_state
- .type fpu_init_state,@function
-fpu_init_state:
- mov epsw,d0
- or EPSW_FE,epsw
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
- nop
- nop
- nop
-#endif
+.macro FPU_INIT_STATE_ALL
fmov 0,fs0
fmov fs0,fs1
fmov fs0,fs2
@@ -60,7 +49,100 @@ fpu_init_state:
fmov fs0,fs30
fmov fs0,fs31
fmov FPCR_INIT,fpcr
+.endm
+
+.macro FPU_SAVE_ALL areg,dreg
+ fmov fs0,(\areg+)
+ fmov fs1,(\areg+)
+ fmov fs2,(\areg+)
+ fmov fs3,(\areg+)
+ fmov fs4,(\areg+)
+ fmov fs5,(\areg+)
+ fmov fs6,(\areg+)
+ fmov fs7,(\areg+)
+ fmov fs8,(\areg+)
+ fmov fs9,(\areg+)
+ fmov fs10,(\areg+)
+ fmov fs11,(\areg+)
+ fmov fs12,(\areg+)
+ fmov fs13,(\areg+)
+ fmov fs14,(\areg+)
+ fmov fs15,(\areg+)
+ fmov fs16,(\areg+)
+ fmov fs17,(\areg+)
+ fmov fs18,(\areg+)
+ fmov fs19,(\areg+)
+ fmov fs20,(\areg+)
+ fmov fs21,(\areg+)
+ fmov fs22,(\areg+)
+ fmov fs23,(\areg+)
+ fmov fs24,(\areg+)
+ fmov fs25,(\areg+)
+ fmov fs26,(\areg+)
+ fmov fs27,(\areg+)
+ fmov fs28,(\areg+)
+ fmov fs29,(\areg+)
+ fmov fs30,(\areg+)
+ fmov fs31,(\areg+)
+ fmov fpcr,\dreg
+ mov \dreg,(\areg)
+.endm
+
+.macro FPU_RESTORE_ALL areg,dreg
+ fmov (\areg+),fs0
+ fmov (\areg+),fs1
+ fmov (\areg+),fs2
+ fmov (\areg+),fs3
+ fmov (\areg+),fs4
+ fmov (\areg+),fs5
+ fmov (\areg+),fs6
+ fmov (\areg+),fs7
+ fmov (\areg+),fs8
+ fmov (\areg+),fs9
+ fmov (\areg+),fs10
+ fmov (\areg+),fs11
+ fmov (\areg+),fs12
+ fmov (\areg+),fs13
+ fmov (\areg+),fs14
+ fmov (\areg+),fs15
+ fmov (\areg+),fs16
+ fmov (\areg+),fs17
+ fmov (\areg+),fs18
+ fmov (\areg+),fs19
+ fmov (\areg+),fs20
+ fmov (\areg+),fs21
+ fmov (\areg+),fs22
+ fmov (\areg+),fs23
+ fmov (\areg+),fs24
+ fmov (\areg+),fs25
+ fmov (\areg+),fs26
+ fmov (\areg+),fs27
+ fmov (\areg+),fs28
+ fmov (\areg+),fs29
+ fmov (\areg+),fs30
+ fmov (\areg+),fs31
+ mov (\areg),\dreg
+ fmov \dreg,fpcr
+.endm
+###############################################################################
+#
+# void fpu_init_state(void)
+# - initialise the FPU
+#
+###############################################################################
+ .globl fpu_init_state
+ .type fpu_init_state,@function
+fpu_init_state:
+ mov epsw,d0
+ or EPSW_FE,epsw
+
+#ifdef CONFIG_MN10300_PROC_MN103E010
+ nop
+ nop
+ nop
+#endif
+ FPU_INIT_STATE_ALL
#ifdef CONFIG_MN10300_PROC_MN103E010
nop
nop
@@ -89,40 +171,7 @@ fpu_save:
nop
#endif
mov d0,a0
- fmov fs0,(a0+)
- fmov fs1,(a0+)
- fmov fs2,(a0+)
- fmov fs3,(a0+)
- fmov fs4,(a0+)
- fmov fs5,(a0+)
- fmov fs6,(a0+)
- fmov fs7,(a0+)
- fmov fs8,(a0+)
- fmov fs9,(a0+)
- fmov fs10,(a0+)
- fmov fs11,(a0+)
- fmov fs12,(a0+)
- fmov fs13,(a0+)
- fmov fs14,(a0+)
- fmov fs15,(a0+)
- fmov fs16,(a0+)
- fmov fs17,(a0+)
- fmov fs18,(a0+)
- fmov fs19,(a0+)
- fmov fs20,(a0+)
- fmov fs21,(a0+)
- fmov fs22,(a0+)
- fmov fs23,(a0+)
- fmov fs24,(a0+)
- fmov fs25,(a0+)
- fmov fs26,(a0+)
- fmov fs27,(a0+)
- fmov fs28,(a0+)
- fmov fs29,(a0+)
- fmov fs30,(a0+)
- fmov fs31,(a0+)
- fmov fpcr,d0
- mov d0,(a0)
+ FPU_SAVE_ALL a0,d0
#ifdef CONFIG_MN10300_PROC_MN103E010
nop
nop
@@ -135,63 +184,75 @@ fpu_save:
###############################################################################
#
-# void fpu_restore(struct fpu_state_struct *)
-# - restore the fpu state
-# - note that an FPU Operational exception might occur during this process
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+# when CONFIG_FPU is enabled
#
###############################################################################
- .globl fpu_restore
- .type fpu_restore,@function
-fpu_restore:
- mov epsw,d1
- or EPSW_FE,epsw /* enable the FPU so we can access it */
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
+ .type fpu_disabled,@function
+ .globl fpu_disabled
+fpu_disabled:
+ or EPSW_nAR|EPSW_FE,epsw
nop
nop
-#endif
- mov d0,a0
- fmov (a0+),fs0
- fmov (a0+),fs1
- fmov (a0+),fs2
- fmov (a0+),fs3
- fmov (a0+),fs4
- fmov (a0+),fs5
- fmov (a0+),fs6
- fmov (a0+),fs7
- fmov (a0+),fs8
- fmov (a0+),fs9
- fmov (a0+),fs10
- fmov (a0+),fs11
- fmov (a0+),fs12
- fmov (a0+),fs13
- fmov (a0+),fs14
- fmov (a0+),fs15
- fmov (a0+),fs16
- fmov (a0+),fs17
- fmov (a0+),fs18
- fmov (a0+),fs19
- fmov (a0+),fs20
- fmov (a0+),fs21
- fmov (a0+),fs22
- fmov (a0+),fs23
- fmov (a0+),fs24
- fmov (a0+),fs25
- fmov (a0+),fs26
- fmov (a0+),fs27
- fmov (a0+),fs28
- fmov (a0+),fs29
- fmov (a0+),fs30
- fmov (a0+),fs31
- mov (a0),d0
- fmov d0,fpcr
-#ifdef CONFIG_MN10300_PROC_MN103E010
nop
+
+ mov sp,a1
+ mov (a1),d1 /* get epsw of user context */
+ and ~(THREAD_SIZE-1),a1 /* a1: (thread_info *ti) */
+ mov (TI_task,a1),a2 /* a2: (task_struct *tsk) */
+ btst EPSW_nSL,d1
+ beq fpu_used_in_kernel
+
+ or EPSW_FE,d1
+ mov d1,(sp)
+ mov (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1
+#ifndef CONFIG_LAZY_SAVE_FPU
+ or __THREAD_HAS_FPU,d1
+ mov d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2)
+#else /* !CONFIG_LAZY_SAVE_FPU */
+ mov (fpu_state_owner),a0
+ cmp 0,a0
+ beq fpu_regs_save_end
+
+ mov (TASK_THREAD+THREAD_UREGS,a0),a1
+ add TASK_THREAD+THREAD_FPU_STATE,a0
+ FPU_SAVE_ALL a0,d0
+
+ mov (REG_EPSW,a1),d0
+ and ~EPSW_FE,d0
+ mov d0,(REG_EPSW,a1)
+
+fpu_regs_save_end:
+ mov a2,(fpu_state_owner)
+#endif /* !CONFIG_LAZY_SAVE_FPU */
+
+ btst __THREAD_USING_FPU,d1
+ beq fpu_regs_init
+ add TASK_THREAD+THREAD_FPU_STATE,a2
+ FPU_RESTORE_ALL a2,d0
+ rti
+
+fpu_regs_init:
+ FPU_INIT_STATE_ALL
+ add TASK_THREAD+THREAD_FPU_FLAGS,a2
+ bset __THREAD_USING_FPU,(0,a2)
+ rti
+
+fpu_used_in_kernel:
+ and ~(EPSW_nAR|EPSW_FE),epsw
nop
nop
-#endif
- mov d1,epsw
- ret [],0
+ add -4,sp
+ SAVE_ALL
+ mov -1,d0
+ mov d0,(REG_ORIG_D0,fp)
+
+ and ~EPSW_NMID,epsw
+
+ mov fp,d0
+ call fpu_disabled_in_kernel[],0
+ jmp ret_from_exception
- .size fpu_restore,.-fpu_restore
+ .size fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu-low.S b/arch/mn10300/kernel/fpu-nofpu-low.S
new file mode 100644
index 000000000000..7ea087a549f4
--- /dev/null
+++ b/arch/mn10300/kernel/fpu-nofpu-low.S
@@ -0,0 +1,39 @@
+/* MN10300 Low level FPU management operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/linkage.h>
+#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+###############################################################################
+#
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+# when CONFIG_FPU is disabled
+#
+###############################################################################
+ .type fpu_disabled,@function
+ .globl fpu_disabled
+fpu_disabled:
+ add -4,sp
+ SAVE_ALL
+ mov -1,d0
+ mov d0,(REG_ORIG_D0,fp)
+
+ and ~EPSW_NMID,epsw
+
+ mov fp,d0
+ call unexpected_fpu_exception[],0
+ jmp ret_from_exception
+
+ .size fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c
new file mode 100644
index 000000000000..31c765b92c5d
--- /dev/null
+++ b/arch/mn10300/kernel/fpu-nofpu.c
@@ -0,0 +1,30 @@
+/* MN10300 FPU management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <asm/fpu.h>
+
+/*
+ * handle an FPU operational exception
+ * - there's a possibility that if the FPU is asynchronous, the signal might
+ * be meant for a process other than the current one
+ */
+asmlinkage
+void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code)
+{
+ panic("An FPU exception was received, but there's no FPU enabled.");
+}
+
+/*
+ * fill in the FPU structure for a core dump
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
+{
+ return 0; /* not valid */
+}
diff --git a/arch/mn10300/kernel/fpu.c b/arch/mn10300/kernel/fpu.c
index e705f25ad5ff..5f9c3fa19a85 100644
--- a/arch/mn10300/kernel/fpu.c
+++ b/arch/mn10300/kernel/fpu.c
@@ -12,56 +12,19 @@
#include <asm/fpu.h>
#include <asm/elf.h>
#include <asm/exceptions.h>
+#include <asm/system.h>
+#ifdef CONFIG_LAZY_SAVE_FPU
struct task_struct *fpu_state_owner;
+#endif
/*
- * handle an exception due to the FPU being disabled
+ * error functions in FPU disabled exception
*/
-asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
+asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
{
- struct task_struct *tsk = current;
-
- if (!user_mode(regs))
- die_if_no_fixup("An FPU Disabled exception happened in"
- " kernel space\n",
- regs, code);
-
-#ifdef CONFIG_FPU
- preempt_disable();
-
- /* transfer the last process's FPU state to memory */
- if (fpu_state_owner) {
- fpu_save(&fpu_state_owner->thread.fpu_state);
- fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
- }
-
- /* the current process now owns the FPU state */
- fpu_state_owner = tsk;
- regs->epsw |= EPSW_FE;
-
- /* load the FPU with the current process's FPU state or invent a new
- * clean one if the process doesn't have one */
- if (is_using_fpu(tsk)) {
- fpu_restore(&tsk->thread.fpu_state);
- } else {
- fpu_init_state();
- set_using_fpu(tsk);
- }
-
- preempt_enable();
-#else
- {
- siginfo_t info;
-
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void *) tsk->thread.uregs->pc;
- info.si_code = FPE_FLTINV;
-
- force_sig_info(SIGFPE, &info, tsk);
- }
-#endif /* CONFIG_FPU */
+ die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
+ regs, EXCEP_FPU_DISABLED);
}
/*
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
*/
asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
{
- struct task_struct *tsk = fpu_state_owner;
+ struct task_struct *tsk = current;
siginfo_t info;
+ u32 fpcr;
if (!user_mode(regs))
die_if_no_fixup("An FPU Operation exception happened in"
" kernel space\n",
regs, code);
- if (!tsk)
+ if (!is_using_fpu(tsk))
die_if_no_fixup("An FPU Operation exception happened,"
" but the FPU is not in use",
regs, code);
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
info.si_addr = (void *) tsk->thread.uregs->pc;
info.si_code = FPE_FLTINV;
-#ifdef CONFIG_FPU
- {
- u32 fpcr;
+ unlazy_fpu(tsk);
- /* get FPCR (we need to enable the FPU whilst we do this) */
- asm volatile(" or %1,epsw \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
- " nop \n"
- " nop \n"
- " nop \n"
-#endif
- " fmov fpcr,%0 \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
- " nop \n"
- " nop \n"
- " nop \n"
-#endif
- " and %2,epsw \n"
- : "=&d"(fpcr)
- : "i"(EPSW_FE), "i"(~EPSW_FE)
- );
-
- if (fpcr & FPCR_EC_Z)
- info.si_code = FPE_FLTDIV;
- else if (fpcr & FPCR_EC_O)
- info.si_code = FPE_FLTOVF;
- else if (fpcr & FPCR_EC_U)
- info.si_code = FPE_FLTUND;
- else if (fpcr & FPCR_EC_I)
- info.si_code = FPE_FLTRES;
- }
-#endif
+ fpcr = tsk->thread.fpu_state.fpcr;
+
+ if (fpcr & FPCR_EC_Z)
+ info.si_code = FPE_FLTDIV;
+ else if (fpcr & FPCR_EC_O)
+ info.si_code = FPE_FLTOVF;
+ else if (fpcr & FPCR_EC_U)
+ info.si_code = FPE_FLTUND;
+ else if (fpcr & FPCR_EC_I)
+ info.si_code = FPE_FLTRES;
force_sig_info(SIGFPE, &info, tsk);
}
/*
+ * handle an FPU invalid_op exception
+ * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
+ */
+asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
+{
+ siginfo_t info;
+
+ if (!user_mode(regs))
+ die_if_no_fixup("FPU invalid opcode", regs, code);
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_COPROC;
+ info.si_addr = (void *) regs->pc;
+ force_sig_info(info.si_signo, &info, current);
+}
+
+/*
* save the FPU state to a signal context
*/
int fpu_setup_sigcontext(struct fpucontext *fpucontext)
{
-#ifdef CONFIG_FPU
struct task_struct *tsk = current;
if (!is_using_fpu(tsk))
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
*/
preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ fpu_save(&tsk->thread.fpu_state);
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ }
+#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_save(&tsk->thread.fpu_state);
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
return -1;
return 1;
-#else
- return 0;
-#endif
}
/*
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
*/
void fpu_kill_state(struct task_struct *tsk)
{
-#ifdef CONFIG_FPU
/* disown anything left in the FPU */
preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ }
+#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
-#endif
+
/* we no longer have a valid current FPU state */
clear_using_fpu(tsk);
}
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext)
int ret;
/* load up the old FPU state */
- ret = copy_from_user(&tsk->thread.fpu_state,
- fpucontext,
+ ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
min(sizeof(struct fpu_state_struct),
sizeof(struct fpucontext)));
if (!ret)
diff --git a/arch/mn10300/kernel/gdb-io-serial-low.S b/arch/mn10300/kernel/gdb-io-serial-low.S
index 4998b24f5d3a..b1d0152e96cb 100644
--- a/arch/mn10300/kernel/gdb-io-serial-low.S
+++ b/arch/mn10300/kernel/gdb-io-serial-low.S
@@ -18,6 +18,7 @@
#include <asm/thread_info.h>
#include <asm/frame.inc>
#include <asm/intctl-regs.h>
+#include <asm/irqflags.h>
#include <unit/serial.h>
.text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
bra gdbstub_io_rx_done
gdbstub_io_rx_enter:
- or EPSW_IE|EPSW_IM_1,epsw
+ LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
add -4,sp
SAVE_ALL
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
mov fp,d0
call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep)
- and ~EPSW_IE,epsw
+ LOCAL_CLI
bclr 0x01,(gdbstub_busy)
.globl gdbstub_return
diff --git a/arch/mn10300/kernel/gdb-io-serial.c b/arch/mn10300/kernel/gdb-io-serial.c
index ae663dc717e9..0d5d63c91dc3 100644
--- a/arch/mn10300/kernel/gdb-io-serial.c
+++ b/arch/mn10300/kernel/gdb-io-serial.c
@@ -23,6 +23,7 @@
#include <asm/exceptions.h>
#include <asm/serial-regs.h>
#include <unit/serial.h>
+#include <asm/smp.h>
/*
* initialise the GDB stub
@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
+#if CONFIG_GDBSTUB_IRQ_LEVEL == 0
IVAR0 = EXCEP_IRQ_LEVEL0;
- set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+#else
+#error "Unknown irq level for gdbstub."
+#endif
+
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+ gdbstub_io_rx_handler);
XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
- XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
+ XIRQxICR(GDBPORT_SERIAL_IRQ) =
+ GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
/* permit level 0 IRQs to take place */
- asm volatile(
- " and %0,epsw \n"
- " or %1,epsw \n"
- :
- : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
- );
+ local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+ int cpu;
+#endif
*_ch = 0xff;
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
- watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ watchdog_alert_counter[cpu] = 0;
+#endif
goto try_again;
}
diff --git a/arch/mn10300/kernel/gdb-io-ttysm.c b/arch/mn10300/kernel/gdb-io-ttysm.c
index a560bbc3137d..97dfda23342c 100644
--- a/arch/mn10300/kernel/gdb-io-ttysm.c
+++ b/arch/mn10300/kernel/gdb-io-ttysm.c
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
gdbstub_io_set_baud(115200);
/* we want to get serial receive interrupts */
- set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
- set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
- set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+ set_intr_level(gdbstub_port->rx_irq,
+ NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+ set_intr_level(gdbstub_port->tx_irq,
+ NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+ gdbstub_io_rx_handler);
*gdbstub_port->rx_icr |= GxICR_ENABLE;
tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
tmp = *gdbstub_port->_control;
/* permit level 0 IRQs only */
- asm volatile(
- " and %0,epsw \n"
- " or %1,epsw \n"
- :
- : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
- );
+ local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+ int cpu;
+#endif
*_ch = 0xff;
@@ -201,8 +202,9 @@ try_again:
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
- watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ watchdog_alert_counter[cpu] = 0;
+#endif
goto try_again;
}
diff --git a/arch/mn10300/kernel/gdb-stub.c b/arch/mn10300/kernel/gdb-stub.c
index 41b11706c8ed..a5fc3f05309b 100644
--- a/arch/mn10300/kernel/gdb-stub.c
+++ b/arch/mn10300/kernel/gdb-stub.c
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
static int __gdbstub_mark_bp(u8 *addr, int ix)
{
- if (addr < (u8 *) 0x70000000UL)
- return 0;
- /* 70000000-7fffffff: vmalloc area */
- if (addr < (u8 *) 0x80000000UL)
+ /* vmalloc area */
+ if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
goto okay;
- if (addr < (u8 *) 0x8c000000UL)
- return 0;
- /* 8c000000-93ffffff: SRAM, SDRAM */
- if (addr < (u8 *) 0x94000000UL)
+ /* SRAM, SDRAM */
+ if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
goto okay;
return 0;
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
mn10300_set_gdbleds(1);
asm volatile("mov mdr,%0" : "=d"(mdr));
- asm volatile("mov epsw,%0" : "=d"(epsw));
- asm volatile("mov %0,epsw"
- :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
+ local_save_flags(epsw);
+ local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
gdbstub_store_fpu();
diff --git a/arch/mn10300/kernel/head.S b/arch/mn10300/kernel/head.S
index 14f27f3bfaf4..73e00fc78072 100644
--- a/arch/mn10300/kernel/head.S
+++ b/arch/mn10300/kernel/head.S
@@ -19,6 +19,12 @@
#include <asm/frame.inc>
#include <asm/param.h>
#include <unit/serial.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
__HEAD
@@ -30,17 +36,51 @@
.globl _start
.type _start,@function
_start:
+#ifdef CONFIG_SMP
+ #
+ # If this is a secondary CPU (AP), then deal with that elsewhere
+ #
+ mov (CPUID),d3
+ and CPUID_MASK,d3
+ bne startup_secondary
+
+ #
+ # We're dealing with the primary CPU (BP) here, then.
+ # Keep BP's D0,D1,D2 register for boot check.
+ #
+
+ # Set up the Boot IPI for each secondary CPU
+ mov 0x1,a0
+loop_set_secondary_icr:
+ mov a0,a1
+ asl CROSS_ICR_CPU_SHIFT,a1
+ add CROSS_GxICR(SMP_BOOT_IRQ,0),a1
+ movhu (a1),d3
+ or GxICR_ENABLE|GxICR_LEVEL_0,d3
+ movhu d3,(a1)
+ movhu (a1),d3 # flush
+ inc a0
+ cmp NR_CPUS,a0
+ bne loop_set_secondary_icr
+#endif /* CONFIG_SMP */
+
# save commandline pointer
mov d0,a3
# preload the PGD pointer register
mov swapper_pg_dir,d0
mov d0,(PTBR)
+ clr d0
+ movbu d0,(PIDR)
# turn on the TLBs
mov MMUCTR_IIV|MMUCTR_DIV,d0
mov d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
mov d0,(MMUCTR)
# turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
mov d0,(TBR)
# invalidate and enable both of the caches
+#ifdef CONFIG_SMP
+ mov ECHCTR,a0
+ clr d0
+ mov d0,(a0)
+#endif
mov CHCTR,a0
clr d0
movhu d0,(a0) # turn off first
@@ -61,18 +106,18 @@ _start:
btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy
lne
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_CACHE_WBACK
#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
-#endif /* CACHE_DISABLED */
+#endif /* NOWRALLOC */
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
#endif /* WBACK */
movhu d0,(a0) # enable
-#endif /* NOWRALLOC */
+#endif /* ENABLED */
# turn on RTS on the debug serial port if applicable
#ifdef CONFIG_MN10300_UNIT_ASB2305
@@ -206,6 +251,44 @@ __no_parameters:
call processor_init[],0
call unit_init[],0
+#ifdef CONFIG_SMP
+ # mark the primary CPU in cpu_boot_map
+ mov cpu_boot_map,a0
+ mov 0x1,d0
+ mov d0,(a0)
+
+ # signal each secondary CPU to begin booting
+ mov 0x1,d2 # CPU ID
+
+loop_request_boot_secondary:
+ mov d2,a0
+ # send SMP_BOOT_IPI to secondary CPU
+ asl CROSS_ICR_CPU_SHIFT,a0
+ add CROSS_GxICR(SMP_BOOT_IRQ,0),a0
+ movhu (a0),d0
+ or GxICR_REQUEST|GxICR_DETECT,d0
+ movhu d0,(a0)
+ movhu (a0),d0 # flush
+
+ # wait up to 100ms for AP's IPI to be received
+ clr d3
+wait_on_secondary_boot:
+ mov DELAY_TIME_BOOT_IPI,d0
+ call __delay[],0
+ inc d3
+ mov cpu_boot_map,a0
+ mov (a0),d0
+ lsr d2,d0
+ btst 0x1,d0
+ bne 1f
+ cmp TIME_OUT_COUNT_BOOT_IPI,d3
+ bne wait_on_secondary_boot
+1:
+ inc d2
+ cmp NR_CPUS,d2
+ bne loop_request_boot_secondary
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_GDBSTUB
call gdbstub_init[],0
@@ -217,7 +300,118 @@ __gdbstub_pause:
#endif
jmp start_kernel
- .size _start, _start-.
+ .size _start,.-_start
+
+###############################################################################
+#
+# Secondary CPU boot point
+#
+###############################################################################
+#ifdef CONFIG_SMP
+startup_secondary:
+ # preload the PGD pointer register
+ mov swapper_pg_dir,d0
+ mov d0,(PTBR)
+ clr d0
+ movbu d0,(PIDR)
+
+ # turn on the TLBs
+ mov MMUCTR_IIV|MMUCTR_DIV,d0
+ mov d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
+ mov d0,(MMUCTR)
+
+ # turn on AM33v2 exception handling mode and set the trap table base
+ movhu (CPUP),d0
+ or CPUP_EXM_AM33V2,d0
+ movhu d0,(CPUP)
+
+ # set the interrupt vector table
+ mov CONFIG_INTERRUPT_VECTOR_BASE,d0
+ mov d0,(TBR)
+
+ # invalidate and enable both of the caches
+ mov ECHCTR,a0
+ clr d0
+ mov d0,(a0)
+ mov CHCTR,a0
+ clr d0
+ movhu d0,(a0) # turn off first
+ mov CHCTR_ICINV|CHCTR_DCINV,d0
+ movhu d0,(a0)
+ setlb
+ mov (a0),d0
+ btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
+ lne
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
+#endif /* !NOWRALLOC */
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
+ movhu d0,(a0) # enable
+#endif /* ENABLED */
+
+ # Clear the boot IPI interrupt for this CPU
+ movhu (GxICR(SMP_BOOT_IRQ)),d0
+ and ~GxICR_REQUEST,d0
+ movhu d0,(GxICR(SMP_BOOT_IRQ))
+ movhu (GxICR(SMP_BOOT_IRQ)),d0 # flush
+
+ /* get stack */
+ mov CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
+ mov (CPUID),d0
+ and CPUID_MASK,d0
+ mulu CONFIG_BOOT_STACK_SIZE,d0
+ sub d0,a0
+ mov a0,sp
+
+ # init interrupt for AP
+ call smp_prepare_cpu_init[],0
+
+ # mark this secondary CPU in cpu_boot_map
+ mov (CPUID),d0
+ mov 0x1,d1
+ asl d0,d1
+ mov cpu_boot_map,a0
+ bset d1,(a0)
+
+ or EPSW_IE|EPSW_IM_1,epsw # permit level 0 interrupts
+ nop
+ nop
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ # flush the local cache if it's in writeback mode
+ call mn10300_local_dcache_flush_inv[],0
+ setlb
+ mov (CHCTR),d0
+ btst CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
+ lne
+#endif
+
+ # now sleep waiting for further instructions
+secondary_sleep:
+ mov CPUM_SLEEP,d0
+ movhu d0,(CPUM)
+ nop
+ nop
+ bra secondary_sleep
+ .size startup_secondary,.-startup_secondary
+#endif /* CONFIG_SMP */
+
+###############################################################################
+#
+#
+#
+###############################################################################
ENTRY(__head_end)
/*
diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h
index eee2eee86267..6a064ab5af07 100644
--- a/arch/mn10300/kernel/internal.h
+++ b/arch/mn10300/kernel/internal.h
@@ -9,6 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
+struct clocksource;
+struct clock_event_device;
+
/*
* kthread.S
*/
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int);
* entry.S
*/
extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
+
+/*
+ * smp-low.S
+ */
+#ifdef CONFIG_SMP
+extern void mn10300_low_ipi_handler(void);
+#endif
+
+/*
+ * time.c
+ */
+extern irqreturn_t local_timer_interrupt(void);
+
+/*
+ * time.c
+ */
+#ifdef CONFIG_CEVT_MN10300
+extern void clockevent_set_clock(struct clock_event_device *, unsigned int);
+#endif
+#ifdef CONFIG_CSRC_MN10300
+extern void clocksource_set_clock(struct clocksource *, unsigned int);
+#endif
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index e2d5ed891f37..c2e44597c22b 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -12,11 +12,26 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
+#include <linux/cpumask.h>
#include <asm/setup.h>
+#include <asm/serial-regs.h>
-unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
+unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
+ [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
+};
EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
+#ifdef CONFIG_SMP
+static char irq_affinity_online[NR_IRQS] = {
+ [0 ... NR_IRQS - 1] = 0
+};
+
+#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
+static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
+ [0 ... NR_IRQ_WORDS - 1] = 0
+};
+#endif /* CONFIG_SMP */
+
atomic_t irq_err_count;
/*
@@ -24,30 +39,67 @@ atomic_t irq_err_count;
*/
static void mn10300_cpupic_ack(unsigned int irq)
{
+ unsigned long flags;
u16 tmp;
- *(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
}
-static void mn10300_cpupic_mask(unsigned int irq)
+static void __mask_and_set_icr(unsigned int irq,
+ unsigned int mask, unsigned int set)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL);
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & mask) | set;
tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
+}
+
+static void mn10300_cpupic_mask(unsigned int irq)
+{
+ __mask_and_set_icr(irq, GxICR_LEVEL, 0);
}
static void mn10300_cpupic_mask_ack(unsigned int irq)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
- tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ if (!test_and_clear_bit(irq, irq_affinity_request)) {
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = GxICR(irq);
+ } else {
+ u16 tmp2;
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL);
+ tmp2 = GxICR(irq);
+
+ irq_affinity_online[irq] =
+ any_online_cpu(*irq_desc[irq].affinity);
+ CROSS_GxICR(irq, irq_affinity_online[irq]) =
+ (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+ }
+
+ arch_local_irq_restore(flags);
+#else /* CONFIG_SMP */
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
+#endif /* CONFIG_SMP */
}
static void mn10300_cpupic_unmask(unsigned int irq)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
- tmp = GxICR(irq);
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
}
static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC, so for level-triggered
* interrupts we need to clear the request bit when we re-enable */
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
- tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ if (!test_and_clear_bit(irq, irq_affinity_request)) {
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+ tmp = GxICR(irq);
+ } else {
+ tmp = GxICR(irq);
+
+ irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
+ CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+ }
+
+ arch_local_irq_restore(flags);
+#else /* CONFIG_SMP */
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
+#endif /* CONFIG_SMP */
}
+#ifdef CONFIG_SMP
+static int
+mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
+{
+ unsigned long flags;
+ int err;
+
+ flags = arch_local_cli_save();
+
+ /* check irq no */
+ switch (irq) {
+ case TMJCIRQ:
+ case RESCHEDULE_IPI:
+ case CALL_FUNC_SINGLE_IPI:
+ case LOCAL_TIMER_IPI:
+ case FLUSH_CACHE_IPI:
+ case CALL_FUNCTION_NMI_IPI:
+ case GDB_NMI_IPI:
+#ifdef CONFIG_MN10300_TTYSM0
+ case SC0RXIRQ:
+ case SC0TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+ case TM8IRQ:
+#elif CONFIG_MN10300_TTYSM0_TIMER2
+ case TM2IRQ:
+#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+ case SC1RXIRQ:
+ case SC1TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM1_TIMER12
+ case TM12IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER9
+ case TM9IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER3
+ case TM3IRQ:
+#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+ case SC2RXIRQ:
+ case SC2TXIRQ:
+ case TM10IRQ:
+#endif /* CONFIG_MN10300_TTYSM2 */
+ err = -1;
+ break;
+
+ default:
+ set_bit(irq, irq_affinity_request);
+ err = 0;
+ break;
+ }
+
+ arch_local_irq_restore(flags);
+ return err;
+}
+#endif /* CONFIG_SMP */
+
/*
* MN10300 PIC level-triggered IRQ handling.
*
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask,
.unmask = mn10300_cpupic_unmask_clear,
+#ifdef CONFIG_SMP
+ .set_affinity = mn10300_cpupic_setaffinity,
+#endif
};
/*
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask_ack,
.unmask = mn10300_cpupic_unmask,
+#ifdef CONFIG_SMP
+ .set_affinity = mn10300_cpupic_setaffinity,
+#endif
};
/*
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq)
*/
void set_intr_level(int irq, u16 level)
{
- u16 tmp;
+ BUG_ON(in_interrupt());
- if (in_interrupt())
- BUG();
+ __mask_and_set_icr(irq, GxICR_ENABLE, level);
+}
- tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_ENABLE) | level;
- tmp = GxICR(irq);
+void mn10300_intc_set_level(unsigned int irq, unsigned int level)
+{
+ set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
+}
+
+void mn10300_intc_clear(unsigned int irq)
+{
+ __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
+}
+
+void mn10300_intc_set(unsigned int irq)
+{
+ __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
+}
+
+void mn10300_intc_enable(unsigned int irq)
+{
+ mn10300_cpupic_unmask(irq);
+}
+
+void mn10300_intc_disable(unsigned int irq)
+{
+ mn10300_cpupic_mask(irq);
}
/*
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level)
* than before
* - see Documentation/mn10300/features.txt
*/
-void set_intr_postackable(int irq)
+void mn10300_set_lateack_irq_type(int irq)
{
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
@@ -147,6 +303,7 @@ void __init init_IRQ(void)
* interrupts */
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
+
unit_init_IRQ();
}
@@ -156,20 +313,22 @@ void __init init_IRQ(void)
asmlinkage void do_IRQ(void)
{
unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
+ unsigned int cpu_id = smp_processor_id();
int irq;
sp = current_stack_pointer();
- if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
- BUG();
+ BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
- old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
+ old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
local_save_flags(epsw);
- __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
+ __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
- __IRQ_STAT(smp_processor_id(), __irq_count)++;
+#ifdef CONFIG_MN10300_WD_TIMER
+ __IRQ_STAT(cpu_id, __irq_count)++;
+#endif
irq_enter();
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void)
local_irq_restore(epsw);
}
- __mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
+ __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
irq_exit();
}
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
- seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
- (GxICR(i) & GxICR_LEVEL) >>
- GxICR_LEVEL_SHIFT);
+
+ if (i < NR_CPU_IRQS)
+ seq_printf(p, " %14s.%u",
+ irq_desc[i].chip->name,
+ (GxICR(i) & GxICR_LEVEL) >>
+ GxICR_LEVEL_SHIFT);
+ else
+ seq_printf(p, " %14s",
+ irq_desc[i].chip->name);
+
seq_printf(p, " %s", action->name);
for (action = action->next;
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v)
/* polish off with NMI and error counters */
case NR_IRQS:
+#ifdef CONFIG_MN10300_WD_TIMER
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
+#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
break;
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void migrate_irqs(void)
+{
+ irq_desc_t *desc;
+ int irq;
+ unsigned int self, new;
+ unsigned long flags;
+
+ self = smp_processor_id();
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ desc = irq_desc + irq;
+
+ if (desc->status == IRQ_PER_CPU)
+ continue;
+
+ if (cpu_isset(self, irq_desc[irq].affinity) &&
+ !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
+ int cpu_id;
+ cpu_id = first_cpu(cpu_online_map);
+ cpu_set(cpu_id, irq_desc[irq].affinity);
+ }
+ /* We need to operate irq_affinity_online atomically. */
+ arch_local_cli_save(flags);
+ if (irq_affinity_online[irq] == self) {
+ u16 x, tmp;
+
+ x = GxICR(irq);
+ GxICR(irq) = x & GxICR_LEVEL;
+ tmp = GxICR(irq);
+
+ new = any_online_cpu(irq_desc[irq].affinity);
+ irq_affinity_online[irq] = new;
+
+ CROSS_GxICR(irq, new) =
+ (x & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, new);
+
+ x &= GxICR_LEVEL | GxICR_ENABLE;
+ if (GxICR(irq) & GxICR_REQUEST) {
+ x |= GxICR_REQUEST | GxICR_DETECT;
+ CROSS_GxICR(irq, new) = x;
+ tmp = CROSS_GxICR(irq, new);
+ }
+ arch_local_irq_restore(flags);
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index 67e6389d625a..0311a7fcea16 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush();
mn10300_icache_inv();
+#endif
}
void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
{
*p->addr = p->opcode;
regs->pc = (unsigned long) p->addr;
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush();
mn10300_icache_inv();
+#endif
}
static inline
diff --git a/arch/mn10300/kernel/mn10300-serial-low.S b/arch/mn10300/kernel/mn10300-serial-low.S
index 66702d256610..dfc1b6f2fa9a 100644
--- a/arch/mn10300/kernel/mn10300-serial-low.S
+++ b/arch/mn10300/kernel/mn10300-serial-low.S
@@ -39,7 +39,7 @@
###############################################################################
.balign L1_CACHE_BYTES
ENTRY(mn10300_serial_vdma_interrupt)
- or EPSW_IE,psw # permit overriding by
+# or EPSW_IE,psw # permit overriding by
# debugging interrupts
movm [d2,d3,a2,a3,exreg0],(sp)
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
rti
mnsc_vdma_tx_empty:
- mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
+ mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable the interrupt
movhu (e3),d2 # flush
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
movhu (SCxCTR,e2),d2 # turn on break mode
or SC01CTR_BKE,d2
movhu d2,(SCxCTR,e2)
- mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
+ mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable transmit interrupts on this
# channel
movhu (e3),d2 # flush
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index db509dd80565..996384dba45d 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
#include <unit/timex.h>
#include "mn10300-serial.h"
+#ifdef CONFIG_SMP
+#undef GxICR
+#define GxICR(X) CROSS_GxICR(X, 0)
+#endif /* CONFIG_SMP */
+
#define kenter(FMT, ...) \
printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
#define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
#define _proto(FMT, ...) \
no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
+#ifndef CODMSB
+/* c_cflag bit meaning */
+#define CODMSB 004000000000 /* change Transfer bit-order */
+#endif
+
#define NR_UARTS 3
#ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
.name = "ttySM0",
._iobase = &SC0CTR,
._control = &SC0CTR,
- ._status = (volatile u8 *) &SC0STR,
+ ._status = (volatile u8 *)&SC0STR,
._intr = &SC0ICR,
._rxb = &SC0RXB,
._txb = &SC0TXB,
.rx_name = "ttySM0:Rx",
.tx_name = "ttySM0:Tx",
-#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
.tm_name = "ttySM0:Timer8",
._tmxmd = &TM8MD,
._tmxbr = &TM8BR,
._tmicr = &TM8ICR,
.tm_irq = TM8IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+ .tm_name = "ttySM0:Timer0",
+ ._tmxmd = &TM0MD,
+ ._tmxbr = (volatile u16 *)&TM0BR,
+ ._tmicr = &TM0ICR,
+ .tm_irq = TM0IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
.tm_name = "ttySM0:Timer2",
._tmxmd = &TM2MD,
- ._tmxbr = (volatile u16 *) &TM2BR,
+ ._tmxbr = (volatile u16 *)&TM2BR,
._tmicr = &TM2ICR,
.tm_irq = TM2IRQ,
.div_timer = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM0"
#endif
.rx_irq = SC0RXIRQ,
.tx_irq = SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
.name = "ttySM1",
._iobase = &SC1CTR,
._control = &SC1CTR,
- ._status = (volatile u8 *) &SC1STR,
+ ._status = (volatile u8 *)&SC1STR,
._intr = &SC1ICR,
._rxb = &SC1RXB,
._txb = &SC1TXB,
.rx_name = "ttySM1:Rx",
.tx_name = "ttySM1:Tx",
-#ifdef CONFIG_MN10300_TTYSM1_TIMER9
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
.tm_name = "ttySM1:Timer9",
._tmxmd = &TM9MD,
._tmxbr = &TM9BR,
._tmicr = &TM9ICR,
.tm_irq = TM9IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
.tm_name = "ttySM1:Timer3",
._tmxmd = &TM3MD,
- ._tmxbr = (volatile u16 *) &TM3BR,
+ ._tmxbr = (volatile u16 *)&TM3BR,
._tmicr = &TM3ICR,
.tm_irq = TM3IRQ,
.div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
+ .tm_name = "ttySM1/Timer12",
+ ._tmxmd = &TM12MD,
+ ._tmxbr = &TM12BR,
+ ._tmicr = &TM12ICR,
+ .tm_irq = TM12IRQ,
+ .div_timer = MNSCx_DIV_TIMER_16BIT,
+#else
+#error "Unknown config for ttySM1"
#endif
.rx_irq = SC1RXIRQ,
.tx_irq = SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
.uart.lock =
__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
.name = "ttySM2",
- .rx_name = "ttySM2:Rx",
- .tx_name = "ttySM2:Tx",
- .tm_name = "ttySM2:Timer10",
._iobase = &SC2CTR,
._control = &SC2CTR,
- ._status = &SC2STR,
+ ._status = (volatile u8 *)&SC2STR,
._intr = &SC2ICR,
._rxb = &SC2RXB,
._txb = &SC2TXB,
+ .rx_name = "ttySM2:Rx",
+ .tx_name = "ttySM2:Tx",
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+ .tm_name = "ttySM2/Timer10",
._tmxmd = &TM10MD,
._tmxbr = &TM10BR,
._tmicr = &TM10ICR,
.tm_irq = TM10IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
+ .tm_name = "ttySM2/Timer9",
+ ._tmxmd = &TM9MD,
+ ._tmxbr = &TM9BR,
+ ._tmicr = &TM9ICR,
+ .tm_irq = TM9IRQ,
+ .div_timer = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+ .tm_name = "ttySM2/Timer1",
+ ._tmxmd = &TM1MD,
+ ._tmxbr = (volatile u16 *)&TM1BR,
+ ._tmicr = &TM1ICR,
+ .tm_irq = TM1IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+ .tm_name = "ttySM2/Timer3",
+ ._tmxmd = &TM3MD,
+ ._tmxbr = (volatile u16 *)&TM3BR,
+ ._tmicr = &TM3ICR,
+ .tm_irq = TM3IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM2"
+#endif
.rx_irq = SC2RXIRQ,
.tx_irq = SC2TXIRQ,
.rx_icr = &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
*/
static void mn10300_serial_mask_ack(unsigned int irq)
{
+ unsigned long flags;
u16 tmp;
+
+ flags = arch_local_cli_save();
GxICR(irq) = GxICR_LEVEL_6;
tmp = GxICR(irq); /* flush write buffer */
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
x = *port->tx_icr;
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE;
+
+ flags = arch_local_cli_save();
+ *port->tx_icr =
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
x = *port->tx_icr;
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
x = *port->rx_icr;
+ arch_local_irq_restore(flags);
}
/*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
static void mn10300_serial_set_mctrl(struct uart_port *_port,
unsigned int mctrl)
{
- struct mn10300_serial_port *port =
+ struct mn10300_serial_port *port __attribute__ ((unused)) =
container_of(_port, struct mn10300_serial_port, uart);
_enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
UART_XMIT_SIZE));
/* kick the virtual DMA controller */
+ arch_local_cli();
x = *port->tx_icr;
x |= GxICR_ENABLE;
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
_debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
*port->_control, *port->_intr, *port->_status,
- *port->_tmxmd, *port->_tmxbr, *port->tx_icr);
+ *port->_tmxmd,
+ (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
+ *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
+ *port->tx_icr);
*port->tx_icr = x;
x = *port->tx_icr;
+ arch_local_sti();
}
/*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
pint->port = port;
pint->vdma = mn10300_serial_vdma_tx_handler;
- set_intr_level(port->rx_irq, GxICR_LEVEL_1);
- set_intr_level(port->tx_irq, GxICR_LEVEL_1);
+ set_intr_level(port->rx_irq,
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
+ set_intr_level(port->tx_irq,
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
set_irq_chip(port->tm_irq, &mn10300_serial_pic);
if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
*/
static void mn10300_serial_shutdown(struct uart_port *_port)
{
+ u16 x;
struct mn10300_serial_port *port =
container_of(_port, struct mn10300_serial_port, uart);
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
free_irq(port->rx_irq, port);
free_irq(port->tx_irq, port);
- *port->rx_icr = GxICR_LEVEL_1;
- *port->tx_icr = GxICR_LEVEL_1;
+ arch_local_cli();
+ *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+ x = *port->rx_icr;
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+ x = *port->tx_icr;
+ arch_local_sti();
}
/*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
/* Determine divisor based on baud rate */
battempt = 0;
- if (div_timer == MNSCx_DIV_TIMER_16BIT)
- scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8
- * == SC2CTR_CK_TM10UFLOW) */
- else if (div_timer == MNSCx_DIV_TIMER_8BIT)
+ switch (port->uart.line) {
+#ifdef CONFIG_MN10300_TTYSM0
+ case 0: /* ttySM0 */
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
+ scxctr |= SC0CTR_CK_TM8UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+ scxctr |= SC0CTR_CK_TM0UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
scxctr |= SC0CTR_CK_TM2UFLOW_8;
+#else
+#error "Unknown config for ttySM0"
+#endif
+ break;
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+ case 1: /* ttySM1 */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
+ scxctr |= SC1CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
+ scxctr |= SC1CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
+ scxctr |= SC1CTR_CK_TM12UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ break;
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+ case 2: /* ttySM2 */
+#if defined(CONFIG_AM33_2)
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+ scxctr |= SC2CTR_CK_TM10UFLOW;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#else /* CONFIG_AM33_2 */
+#if defined(CONFIG_MN10300_TTYSM2_TIMER9)
+ scxctr |= SC2CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+ scxctr |= SC2CTR_CK_TM1UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+ scxctr |= SC2CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#endif /* CONFIG_AM33_2 */
+ break;
+#endif /* CONFIG_MN10300_TTYSM2 */
+
+ default:
+ break;
+ }
try_alternative:
baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
ctr &= ~SC2CTR_TWE;
*port->_control = ctr;
}
+
+ /* change Transfer bit-order (LSB/MSB) */
+ if (new->c_cflag & CODMSB)
+ *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
+ else
+ *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
}
/*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
printk(KERN_INFO "%s version %s (%s)\n",
serial_name, serial_version, serial_revdate);
-#ifdef CONFIG_MN10300_TTYSM2
- SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
+ {
+ int tmp;
+ SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+ tmp = SC2TIM;
+ }
#endif
- set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt);
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
+ mn10300_serial_vdma_interrupt);
ret = uart_register_driver(&mn10300_serial_driver);
if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
port = mn10300_serial_ports[co->index];
/* firstly hijack the serial port from the "virtual DMA" controller */
+ arch_local_cli();
txicr = *port->tx_icr;
- *port->tx_icr = GxICR_LEVEL_1;
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
tmp = *port->tx_icr;
+ arch_local_sti();
/* the transmitter may be disabled */
scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
if (!(scxctr & SC01CTR_TXE))
*port->_control = scxctr;
+ arch_local_cli();
*port->tx_icr = txicr;
tmp = *port->tx_icr;
+ arch_local_sti();
}
/*
diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S
index 996244745cca..f2f5c9cfaabd 100644
--- a/arch/mn10300/kernel/mn10300-watchdog-low.S
+++ b/arch/mn10300/kernel/mn10300-watchdog-low.S
@@ -16,6 +16,7 @@
#include <asm/intctl-regs.h>
#include <asm/timer-regs.h>
#include <asm/frame.inc>
+#include <linux/threads.h>
.text
@@ -53,7 +54,13 @@ watchdog_handler:
.type touch_nmi_watchdog,@function
touch_nmi_watchdog:
clr d0
- mov d0,(watchdog_alert_counter)
+ clr d1
+ mov watchdog_alert_counter, a0
+ setlb
+ mov d0, (a0+)
+ inc d1
+ cmp NR_CPUS, d1
+ lne
ret [],0
.size touch_nmi_watchdog,.-touch_nmi_watchdog
diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c
index f362d9d138f1..c5e12bfd9fcd 100644
--- a/arch/mn10300/kernel/mn10300-watchdog.c
+++ b/arch/mn10300/kernel/mn10300-watchdog.c
@@ -30,7 +30,7 @@
static DEFINE_SPINLOCK(watchdog_print_lock);
static unsigned int watchdog;
static unsigned int watchdog_hz = 1;
-unsigned int watchdog_alert_counter;
+unsigned int watchdog_alert_counter[NR_CPUS];
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
* is to check its timer makes IRQ counts. If they are not
* changing then that CPU has some problem.
*
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
* since NMIs dont listen to _any_ locks, we have to be extremely
* careful not to rely on unsafe variables. The printk might lock
* up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
printk(KERN_INFO "OK.\n");
- /* now that we know it works we can reduce NMI frequency to
- * something more reasonable; makes a difference in some configs
+ /* now that we know it works we can reduce NMI frequency to something
+ * more reasonable; makes a difference in some configs
*/
watchdog_hz = 1;
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
}
}
+#ifdef CONFIG_SMP
+static void watchdog_dump_register(void *dummy)
+{
+ printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
+ show_registers(current_frame());
+}
+#endif
+
asmlinkage
void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
{
-
/*
* Since current-> is always on the stack, and we always switch
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
- int sum, cpu = smp_processor_id();
+ int sum, cpu;
int irq = NMIIRQ;
u8 wdt, tmp;
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
tmp = WDCTR;
NMICR = NMICR_WDIF;
- nmi_count(cpu)++;
+ nmi_count(smp_processor_id())++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
- sum = irq_stat[cpu].__irq_count;
-
- if (last_irq_sums[cpu] == sum) {
- /*
- * Ayiee, looks like this CPU is stuck ...
- * wait a few IRQs (5 seconds) before doing the oops ...
- */
- watchdog_alert_counter++;
- if (watchdog_alert_counter == 5 * watchdog_hz) {
- spin_lock(&watchdog_print_lock);
+
+ for_each_online_cpu(cpu) {
+
+ sum = irq_stat[cpu].__irq_count;
+
+ if ((last_irq_sums[cpu] == sum)
+#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
+ && !(CHK_GDBSTUB_BUSY()
+ || atomic_read(&cpu_doing_single_step))
+#endif
+ ) {
/*
- * We are in trouble anyway, lets at least try
- * to get a message out.
+ * Ayiee, looks like this CPU is stuck ...
+ * wait a few IRQs (5 seconds) before doing the oops ...
*/
- bust_spinlocks(1);
- printk(KERN_ERR
- "NMI Watchdog detected LOCKUP on CPU%d,"
- " pc %08lx, registers:\n",
- cpu, regs->pc);
- show_registers(regs);
- printk("console shuts up ...\n");
- console_silent();
- spin_unlock(&watchdog_print_lock);
- bust_spinlocks(0);
+ watchdog_alert_counter[cpu]++;
+ if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
+ spin_lock(&watchdog_print_lock);
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ bust_spinlocks(1);
+ printk(KERN_ERR
+ "NMI Watchdog detected LOCKUP on CPU%d,"
+ " pc %08lx, registers:\n",
+ cpu, regs->pc);
+#ifdef CONFIG_SMP
+ printk(KERN_ERR
+ "--- Register Dump (CPU%d) ---\n",
+ CPUID);
+#endif
+ show_registers(regs);
+#ifdef CONFIG_SMP
+ smp_nmi_call_function(watchdog_dump_register,
+ NULL, 1);
+#endif
+ printk(KERN_NOTICE "console shuts up ...\n");
+ console_silent();
+ spin_unlock(&watchdog_print_lock);
+ bust_spinlocks(0);
#ifdef CONFIG_GDBSTUB
- if (gdbstub_busy)
- gdbstub_exception(regs, excep);
- else
- gdbstub_intercept(regs, excep);
+ if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
+ gdbstub_exception(regs, excep);
+ else
+ gdbstub_intercept(regs, excep);
#endif
- do_exit(SIGSEGV);
+ do_exit(SIGSEGV);
+ }
+ } else {
+ last_irq_sums[cpu] = sum;
+ watchdog_alert_counter[cpu] = 0;
}
- } else {
- last_irq_sums[cpu] = sum;
- watchdog_alert_counter = 0;
}
WDCTR = wdt | WDCTR_WDRST;
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index f48373e2bc1c..0d0f8049a17b 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/*
* we use this if we don't have any better idle routine
*/
@@ -69,6 +70,35 @@ static void default_idle(void)
local_irq_enable();
}
+#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static inline void poll_idle(void)
+{
+ int oldval;
+
+ local_irq_enable();
+
+ /*
+ * Deal with another CPU just having chosen a thread to
+ * run here:
+ */
+ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+ if (!oldval) {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ while (!need_resched())
+ cpu_relax();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ } else {
+ set_need_resched();
+ }
+}
+#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
/*
* the idle thread
* - there's no useful work to be done, so just try to conserve power and have
@@ -77,8 +107,6 @@ static void default_idle(void)
*/
void cpu_idle(void)
{
- int cpu = smp_processor_id();
-
/* endless idle loop with no priority at all */
for (;;) {
while (!need_resched()) {
@@ -86,10 +114,13 @@ void cpu_idle(void)
smp_rmb();
idle = pm_idle;
- if (!idle)
+ if (!idle) {
+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
+ idle = poll_idle;
+#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
idle = default_idle;
-
- irq_stat[cpu].idle_timestamp = jiffies;
+#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
+ }
idle();
}
@@ -197,6 +228,7 @@ int copy_thread(unsigned long clone_flags,
unsigned long c_usp, unsigned long ustk_size,
struct task_struct *p, struct pt_regs *kregs)
{
+ struct thread_info *ti = task_thread_info(p);
struct pt_regs *c_uregs, *c_kregs, *uregs;
unsigned long c_ksp;
@@ -217,7 +249,7 @@ int copy_thread(unsigned long clone_flags,
/* the new TLS pointer is passed in as arg #5 to sys_clone() */
if (clone_flags & CLONE_SETTLS)
- c_uregs->e2 = __frame->d3;
+ c_uregs->e2 = current_frame()->d3;
/* set up the return kernel frame if called from kernel_thread() */
c_kregs = c_uregs;
@@ -235,7 +267,7 @@ int copy_thread(unsigned long clone_flags,
}
/* set up things up so the scheduler can start the new task */
- p->thread.__frame = c_kregs;
+ ti->frame = c_kregs;
p->thread.a3 = (unsigned long) c_kregs;
p->thread.sp = c_ksp;
p->thread.pc = (unsigned long) ret_from_fork;
@@ -247,25 +279,26 @@ int copy_thread(unsigned long clone_flags,
/*
* clone a process
- * - tlsptr is retrieved by copy_thread() from __frame->d3
+ * - tlsptr is retrieved by copy_thread() from current_frame()->d3
*/
asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tidptr, int __user *child_tidptr,
int __user *tlsptr)
{
- return do_fork(clone_flags, newsp ?: __frame->sp, __frame, 0,
- parent_tidptr, child_tidptr);
+ return do_fork(clone_flags, newsp ?: current_frame()->sp,
+ current_frame(), 0, parent_tidptr, child_tidptr);
}
asmlinkage long sys_fork(void)
{
- return do_fork(SIGCHLD, __frame->sp, __frame, 0, NULL, NULL);
+ return do_fork(SIGCHLD, current_frame()->sp,
+ current_frame(), 0, NULL, NULL);
}
asmlinkage long sys_vfork(void)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, __frame->sp, __frame,
- 0, NULL, NULL);
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp,
+ current_frame(), 0, NULL, NULL);
}
asmlinkage long sys_execve(const char __user *name,
@@ -279,7 +312,7 @@ asmlinkage long sys_execve(const char __user *name,
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
- error = do_execve(filename, argv, envp, __frame);
+ error = do_execve(filename, argv, envp, current_frame());
putname(filename);
return error;
}
diff --git a/arch/mn10300/kernel/profile.c b/arch/mn10300/kernel/profile.c
index 20d7d0306b16..4f342f75d00c 100644
--- a/arch/mn10300/kernel/profile.c
+++ b/arch/mn10300/kernel/profile.c
@@ -41,7 +41,7 @@ static __init int profile_init(void)
tmp = TM11ICR;
printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
- mn10300_ioclk / 8 / (TM11BR + 1));
+ MN10300_IOCLK / 8 / (TM11BR + 1));
printk(KERN_INFO "Profile histogram stored %p-%p\n",
prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
diff --git a/arch/mn10300/kernel/ptrace.c b/arch/mn10300/kernel/ptrace.c
index cf847dabc1bd..5c0b07e61006 100644
--- a/arch/mn10300/kernel/ptrace.c
+++ b/arch/mn10300/kernel/ptrace.c
@@ -295,31 +295,31 @@ void ptrace_disable(struct task_struct *child)
/*
* handle the arch-specific side of process tracing
*/
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
tmp = 0; /* Default return condition */
if (addr < NR_PTREGS << 2)
tmp = get_stack_long(child,
ptrace_regid_to_frame[addr]);
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
ret = 0;
@@ -332,25 +332,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS: /* Set all integer regs in the child. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
- (const void __user *)data);
+ datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
- (const void __user *)data);
+ datap);
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 4eef0e7224f6..e9e20f9a4dd3 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -20,18 +20,22 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-/* time for RTC to update itself in ioclks */
-static unsigned long mn10300_rtc_update_period;
-
+/*
+ * Read the current RTC time
+ */
void read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
get_rtc_time(&tm);
- ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
ts->tv_nsec = 0;
+ ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ /* if rtc is way off in the past, set something reasonable */
+ if (ts->tv_sec < 0)
+ ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
}
/*
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
*/
void __init calibrate_clock(void)
{
- unsigned long count0, counth, count1;
unsigned char status;
/* make sure the RTC is running and is set to operate in 24hr mode */
status = RTSRC;
RTCRB |= RTCRB_SET;
RTCRB |= RTCRB_TM_24HR;
+ RTCRB &= ~RTCRB_DM_BINARY;
RTCRA |= RTCRA_DVR;
RTCRA &= ~RTCRA_DVR;
RTCRB &= ~RTCRB_SET;
-
- /* work out the clock speed by counting clock cycles between ends of
- * the RTC update cycle - track the RTC through one complete update
- * cycle (1 second)
- */
- startup_timestamp_counter();
-
- while (!(RTCRA & RTCRA_UIP)) {}
- while ((RTCRA & RTCRA_UIP)) {}
-
- count0 = TMTSCBC;
-
- while (!(RTCRA & RTCRA_UIP)) {}
-
- counth = TMTSCBC;
-
- while ((RTCRA & RTCRA_UIP)) {}
-
- count1 = TMTSCBC;
-
- shutdown_timestamp_counter();
-
- MN10300_TSCCLK = count0 - count1; /* the timers count down */
- mn10300_rtc_update_period = counth - count1;
- MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
}
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index d464affcba0e..9e7a3209a3e1 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
+#include <linux/cpu.h>
#include <asm/processor.h>
#include <linux/console.h>
#include <asm/uaccess.h>
@@ -30,7 +31,6 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <proc/proc.h>
-#include <asm/busctl-regs.h>
#include <asm/fpu.h>
#include <asm/sections.h>
@@ -64,11 +64,13 @@ unsigned long memory_size;
struct thread_info *__current_ti = &init_thread_union.thread_info;
struct task_struct *__current = &init_task;
-#define mn10300_known_cpus 3
+#define mn10300_known_cpus 5
static const char *const mn10300_cputypes[] = {
- "am33v1",
- "am33v2",
- "am34v1",
+ "am33-1",
+ "am33-2",
+ "am34-1",
+ "am33-3",
+ "am34-2",
"unknown"
};
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
unit_setup();
+ smp_init_cpus();
parse_mem_cmdline(cmdline_p);
init_mm.start_code = (unsigned long)&_text;
@@ -179,57 +182,55 @@ void __init setup_arch(char **cmdline_p)
void __init cpu_init(void)
{
unsigned long cpurev = CPUREV, type;
- unsigned long base, size;
type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
type = mn10300_known_cpus;
- printk(KERN_INFO "Matsushita %s, rev %ld\n",
+ printk(KERN_INFO "Panasonic %s, rev %ld\n",
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
- /* determine the memory size and base from the memory controller regs */
- memory_size = 0;
-
- base = SDBASE(0);
- if (base & SDBASE_CE) {
- size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
- size = ~size + 1;
- base &= SDBASE_CBA;
+ get_mem_info(&phys_memory_base, &memory_size);
+ phys_memory_end = phys_memory_base + memory_size;
- printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
- memory_size += size;
- phys_memory_base = base;
- }
+ fpu_init_state();
+}
- base = SDBASE(1);
- if (base & SDBASE_CE) {
- size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
- size = ~size + 1;
- base &= SDBASE_CBA;
+static struct cpu cpu_devices[NR_CPUS];
- printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
- memory_size += size;
- if (phys_memory_base == 0)
- phys_memory_base = base;
- }
+static int __init topology_init(void)
+{
+ int i;
- phys_memory_end = phys_memory_base + memory_size;
+ for_each_present_cpu(i)
+ register_cpu(&cpu_devices[i], i);
-#ifdef CONFIG_FPU
- fpu_init_state();
-#endif
+ return 0;
}
+subsys_initcall(topology_init);
+
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
+#ifdef CONFIG_SMP
+ struct mn10300_cpuinfo *c = v;
+ unsigned long cpu_id = c - cpu_data;
+ unsigned long cpurev = c->type, type, icachesz, dcachesz;
+#else /* CONFIG_SMP */
+ unsigned long cpu_id = 0;
unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
+#endif /* CONFIG_SMP */
- type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
+#ifdef CONFIG_SMP
+ if (!cpu_online(cpu_id))
+ return 0;
+#endif
+
+ type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
type = mn10300_known_cpus;
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1024;
seq_printf(m,
- "processor : 0\n"
- "vendor_id : Matsushita\n"
+ "processor : %ld\n"
+ "vendor_id : " PROCESSOR_VENDOR_NAME "\n"
"cpu core : %s\n"
"cpu rev : %lu\n"
"model name : " PROCESSOR_MODEL_NAME "\n"
"icache size: %lu\n"
"dcache size: %lu\n",
+ cpu_id,
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
icachesz,
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"bogomips : %lu.%02lu\n\n",
MN10300_IOCLK / 1000000,
(MN10300_IOCLK / 10000) % 100,
+#ifdef CONFIG_SMP
+ c->loops_per_jiffy / (500000 / HZ),
+ (c->loops_per_jiffy / (5000 / HZ)) % 100
+#else /* CONFIG_SMP */
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100
+#endif /* CONFIG_SMP */
);
return 0;
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index d4de05ab7864..690f4e9507d7 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -91,7 +91,7 @@ asmlinkage long sys_sigaction(int sig,
*/
asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss)
{
- return do_sigaltstack(uss, uoss, __frame->sp);
+ return do_sigaltstack(uss, uoss, current_frame()->sp);
}
/*
@@ -156,10 +156,11 @@ badframe:
*/
asmlinkage long sys_sigreturn(void)
{
- struct sigframe __user *frame = (struct sigframe __user *) __frame->sp;
+ struct sigframe __user *frame;
sigset_t set;
long d0;
+ frame = (struct sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask))
@@ -176,7 +177,7 @@ asmlinkage long sys_sigreturn(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(__frame, &frame->sc, &d0))
+ if (restore_sigcontext(current_frame(), &frame->sc, &d0))
goto badframe;
return d0;
@@ -191,11 +192,11 @@ badframe:
*/
asmlinkage long sys_rt_sigreturn(void)
{
- struct rt_sigframe __user *frame =
- (struct rt_sigframe __user *) __frame->sp;
+ struct rt_sigframe __user *frame;
sigset_t set;
- unsigned long d0;
+ long d0;
+ frame = (struct rt_sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -207,10 +208,11 @@ asmlinkage long sys_rt_sigreturn(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0))
+ if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT)
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) ==
+ -EFAULT)
goto badframe;
return d0;
@@ -572,7 +574,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(__frame);
+ tracehook_notify_resume(current_frame());
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S
new file mode 100644
index 000000000000..72938cefc05e
--- /dev/null
+++ b/arch/mn10300/kernel/smp-low.S
@@ -0,0 +1,97 @@
+/* SMP IPI low-level handler
+ *
+ * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+ .am33_2
+
+###############################################################################
+#
+# IPI interrupt handler
+#
+###############################################################################
+ .globl mn10300_low_ipi_handler
+mn10300_low_ipi_handler:
+ add -4,sp
+ mov d0,(sp)
+ movhu (IAGR),d0
+ and IAGR_GN,d0
+ lsr 0x2,d0
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ cmp FLUSH_CACHE_IPI,d0
+ beq mn10300_flush_cache_ipi
+#endif
+ cmp SMP_BOOT_IRQ,d0
+ beq mn10300_smp_boot_ipi
+ /* OTHERS */
+ mov (sp),d0
+ add 4,sp
+#ifdef CONFIG_GDBSTUB
+ jmp gdbstub_io_rx_handler
+#else
+ jmp end
+#endif
+
+###############################################################################
+#
+# Cache flush IPI interrupt handler
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+mn10300_flush_cache_ipi:
+ mov (sp),d0
+ add 4,sp
+
+ /* FLUSH_CACHE_IPI */
+ add -4,sp
+ SAVE_ALL
+ mov GxICR_DETECT,d2
+ movbu d2,(GxICR(FLUSH_CACHE_IPI)) # ACK the interrupt
+ movhu (GxICR(FLUSH_CACHE_IPI)),d2
+ call smp_cache_interrupt[],0
+ RESTORE_ALL
+ jmp end
+#endif
+
+###############################################################################
+#
+# SMP boot CPU IPI interrupt handler
+#
+###############################################################################
+mn10300_smp_boot_ipi:
+ /* clear interrupt */
+ movhu (GxICR(SMP_BOOT_IRQ)),d0
+ and ~GxICR_REQUEST,d0
+ movhu d0,(GxICR(SMP_BOOT_IRQ))
+ mov (sp),d0
+ add 4,sp
+
+ # get stack
+ mov (CPUID),a0
+ add -1,a0
+ add a0,a0
+ add a0,a0
+ mov (start_stack,a0),a0
+ mov a0,sp
+ jmp initialize_secondary
+
+
+# Jump here after RTI to suppress the icache lookahead
+end:
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
new file mode 100644
index 000000000000..0dcd1c686ba8
--- /dev/null
+++ b/arch/mn10300/kernel/smp.c
@@ -0,0 +1,1152 @@
+/* SMP support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+#include "internal.h"
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+
+static unsigned long sleep_mode[NR_CPUS];
+
+static void run_sleep_cpu(unsigned int cpu);
+static void run_wakeup_cpu(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Debug Message function
+ */
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#else
+#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#endif
+
+/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
+#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
+
+/*
+ * Structure and data for smp_nmi_call_function().
+ */
+struct nmi_call_data_struct {
+ smp_call_func_t func;
+ void *info;
+ cpumask_t started;
+ cpumask_t finished;
+ int wait;
+ char size_alignment[0]
+ __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+static DEFINE_SPINLOCK(smp_nmi_call_lock);
+static struct nmi_call_data_struct *nmi_call_data;
+
+/*
+ * Data structures and variables
+ */
+static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
+static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
+cpumask_t cpu_boot_map; /* Bitmask of boot APs */
+unsigned long start_stack[NR_CPUS - 1];
+
+/*
+ * Per CPU parameters
+ */
+struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount; /* The count of boot CPUs */
+static cpumask_t smp_commenced_mask;
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/*
+ * Function Prototypes
+ */
+static int do_boot_cpu(int);
+static void smp_show_cpu_info(int cpu_id);
+static void smp_callin(void);
+static void smp_online(void);
+static void smp_store_cpu_info(int);
+static void smp_cpu_init(void);
+static void smp_tune_scheduling(void);
+static void send_IPI_mask(const cpumask_t *cpumask, int irq);
+static void init_ipi(void);
+
+/*
+ * IPI Initialization interrupt definitions
+ */
+static void mn10300_ipi_disable(unsigned int irq);
+static void mn10300_ipi_enable(unsigned int irq);
+static void mn10300_ipi_ack(unsigned int irq);
+static void mn10300_ipi_nop(unsigned int irq);
+
+static struct irq_chip mn10300_ipi_type = {
+ .name = "cpu_ipi",
+ .disable = mn10300_ipi_disable,
+ .enable = mn10300_ipi_enable,
+ .ack = mn10300_ipi_ack,
+ .eoi = mn10300_ipi_nop
+};
+
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
+
+static struct irqaction reschedule_ipi = {
+ .handler = smp_reschedule_interrupt,
+ .name = "smp reschedule IPI"
+};
+static struct irqaction call_function_ipi = {
+ .handler = smp_call_function_interrupt,
+ .name = "smp call function IPI"
+};
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
+static struct irqaction local_timer_ipi = {
+ .handler = smp_ipi_timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "smp local timer IPI"
+};
+#endif
+
+/**
+ * init_ipi - Initialise the IPI mechanism
+ */
+static void init_ipi(void)
+{
+ unsigned long flags;
+ u16 tmp16;
+
+ /* set up the reschedule IPI */
+ set_irq_chip_and_handler(RESCHEDULE_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
+ set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
+ mn10300_ipi_enable(RESCHEDULE_IPI);
+
+ /* set up the call function IPI */
+ set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
+ set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
+ mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+ /* set up the local timer IPI */
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+ set_irq_chip_and_handler(LOCAL_TIMER_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
+ set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
+ mn10300_ipi_enable(LOCAL_TIMER_IPI);
+#endif
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ /* set up the cache flush IPI */
+ flags = arch_local_cli_save();
+ __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
+ mn10300_low_ipi_handler);
+ GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(FLUSH_CACHE_IPI);
+ arch_local_irq_restore(flags);
+#endif
+
+ /* set up the NMI call function IPI */
+ flags = arch_local_cli_save();
+ GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+ arch_local_irq_restore(flags);
+
+ /* set up the SMP boot IPI */
+ flags = arch_local_cli_save();
+ __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
+ mn10300_low_ipi_handler);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_shutdown - Shut down handling of an IPI
+ * @irq: The IPI to be shut down.
+ */
+static void mn10300_ipi_shutdown(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_enable - Enable an IPI
+ * @irq: The IPI to be enabled.
+ */
+static void mn10300_ipi_enable(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_disable - Disable an IPI
+ * @irq: The IPI to be disabled.
+ */
+static void mn10300_ipi_disable(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = tmp & GxICR_LEVEL;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
+ * @irq: The IPI to be acknowledged.
+ *
+ * Clear the interrupt detection flag for the IPI on the appropriate interrupt
+ * channel in the PIC.
+ */
+static void mn10300_ipi_ack(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+ GxICR_u8(irq) = GxICR_DETECT;
+ tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_nop - Dummy IPI action
+ * @irq: The IPI to be acted upon.
+ */
+static void mn10300_ipi_nop(unsigned int irq)
+{
+}
+
+/**
+ * send_IPI_mask - Send IPIs to all CPUs in list
+ * @cpumask: The list of CPUs to target.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all the CPUs in the list, not waiting for them to
+ * finish before returning. The caller is responsible for synchronisation if
+ * that is needed.
+ */
+static void send_IPI_mask(const cpumask_t *cpumask, int irq)
+{
+ int i;
+ u16 tmp;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_isset(i, *cpumask)) {
+ /* send IPI */
+ tmp = CROSS_GxICR(irq, i);
+ CROSS_GxICR(irq, i) =
+ tmp | GxICR_REQUEST | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, i); /* flush write buffer */
+ }
+ }
+}
+
+/**
+ * send_IPI_self - Send an IPI to this CPU.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to the current CPU.
+ */
+void send_IPI_self(int irq)
+{
+ send_IPI_mask(cpumask_of(smp_processor_id()), irq);
+}
+
+/**
+ * send_IPI_allbutself - Send IPIs to all the other CPUs.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all CPUs in the system barring the current one,
+ * not waiting for them to finish before returning. The caller is responsible
+ * for synchronisation if that is needed.
+ */
+void send_IPI_allbutself(int irq)
+{
+ cpumask_t cpumask;
+
+ cpumask = cpu_online_map;
+ cpu_clear(smp_processor_id(), cpumask);
+ send_IPI_mask(&cpumask, irq);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ BUG();
+ /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
+}
+
+/**
+ * smp_send_reschedule - Send reschedule IPI to a CPU
+ * @cpu: The CPU to target.
+ */
+void smp_send_reschedule(int cpu)
+{
+ send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
+}
+
+/**
+ * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
+ * @func: The function to ask to be run.
+ * @info: The context data to pass to that function.
+ * @wait: If true, wait (atomically) until function is run on all CPUs.
+ *
+ * Send a non-maskable request to all CPUs in the system, requesting them to
+ * run the specified function with the given context data, and, potentially, to
+ * wait for completion of that function on all CPUs.
+ *
+ * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
+ * timeout.
+ */
+int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
+{
+ struct nmi_call_data_struct data;
+ unsigned long flags;
+ unsigned int cnt;
+ int cpus, ret = 0;
+
+ cpus = num_online_cpus() - 1;
+ if (cpus < 1)
+ return 0;
+
+ data.func = func;
+ data.info = info;
+ data.started = cpu_online_map;
+ cpu_clear(smp_processor_id(), data.started);
+ data.wait = wait;
+ if (wait)
+ data.finished = data.started;
+
+ spin_lock_irqsave(&smp_nmi_call_lock, flags);
+ nmi_call_data = &data;
+ smp_mb();
+
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
+
+ /* Wait for response */
+ if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
+ for (cnt = 0;
+ cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+ !cpus_empty(data.started);
+ cnt++)
+ mdelay(1);
+
+ if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
+ for (cnt = 0;
+ cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+ !cpus_empty(data.finished);
+ cnt++)
+ mdelay(1);
+ }
+
+ if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
+ ret = -ETIMEDOUT;
+
+ } else {
+ /* If timeout value is zero, wait until cpumask has been
+ * cleared */
+ while (!cpus_empty(data.started))
+ barrier();
+ if (wait)
+ while (!cpus_empty(data.finished))
+ barrier();
+ }
+
+ spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
+ return ret;
+}
+
+/**
+ * stop_this_cpu - Callback to stop a CPU.
+ * @unused: Callback context (ignored).
+ */
+void stop_this_cpu(void *unused)
+{
+ static volatile int stopflag;
+ unsigned long flags;
+
+#ifdef CONFIG_GDBSTUB
+ /* In case of single stepping smp_send_stop by other CPU,
+ * clear procindebug to avoid deadlock.
+ */
+ atomic_set(&procindebug[smp_processor_id()], 0);
+#endif /* CONFIG_GDBSTUB */
+
+ flags = arch_local_cli_save();
+ cpu_clear(smp_processor_id(), cpu_online_map);
+
+ while (!stopflag)
+ cpu_relax();
+
+ cpu_set(smp_processor_id(), cpu_online_map);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_send_stop - Send a stop request to all CPUs.
+ */
+void smp_send_stop(void)
+{
+ smp_nmi_call_function(stop_this_cpu, NULL, 0);
+}
+
+/**
+ * smp_reschedule_interrupt - Reschedule IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * We need do nothing here, since the scheduling will be effected on our way
+ * back through entry.S.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
+{
+ /* do nothing */
+ return IRQ_HANDLED;
+}
+
+/**
+ * smp_call_function_interrupt - Call function IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
+{
+ /* generic_smp_call_function_interrupt(); */
+ generic_smp_call_function_single_interrupt();
+ return IRQ_HANDLED;
+}
+
+/**
+ * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
+ */
+void smp_nmi_call_function_interrupt(void)
+{
+ smp_call_func_t func = nmi_call_data->func;
+ void *info = nmi_call_data->info;
+ int wait = nmi_call_data->wait;
+
+ /* Notify the initiating CPU that I've grabbed the data and am about to
+ * execute the function
+ */
+ smp_mb();
+ cpu_clear(smp_processor_id(), nmi_call_data->started);
+ (*func)(info);
+
+ if (wait) {
+ smp_mb();
+ cpu_clear(smp_processor_id(), nmi_call_data->finished);
+ }
+}
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/**
+ * smp_ipi_timer_interrupt - Local timer IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
+{
+ return local_timer_interrupt();
+}
+#endif
+
+void __init smp_init_cpus(void)
+{
+ int i;
+ for (i = 0; i < NR_CPUS; i++) {
+ set_cpu_possible(i, true);
+ set_cpu_present(i, true);
+ }
+}
+
+/**
+ * smp_cpu_init - Initialise AP in start_secondary.
+ *
+ * For this Application Processor, set up init_mm, initialise FPU and set
+ * interrupt level 0-6 setting.
+ */
+static void __init smp_cpu_init(void)
+{
+ unsigned long flags;
+ int cpu_id = smp_processor_id();
+ u16 tmp16;
+
+ if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+ for (;;)
+ local_irq_enable();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Force FPU initialization */
+ clear_using_fpu(current);
+
+ GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+ GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(LOCAL_TIMER_IPI);
+
+ GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(RESCHEDULE_IPI);
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(FLUSH_CACHE_IPI);
+#endif
+
+ mn10300_ipi_shutdown(SMP_BOOT_IRQ);
+
+ /* Set up the non-maskable call function IPI */
+ flags = arch_local_cli_save();
+ GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_prepare_cpu_init - Initialise CPU in startup_secondary
+ *
+ * Set interrupt level 0-6 setting and init ICR of gdbstub.
+ */
+void smp_prepare_cpu_init(void)
+{
+ int loop;
+
+ /* Set the interrupt vector registers */
+ IVAR0 = EXCEP_IRQ_LEVEL0;
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+ IVAR6 = EXCEP_IRQ_LEVEL6;
+
+ /* Disable all interrupts and set to priority 6 (lowest) */
+ for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+
+#ifdef CONFIG_GDBSTUB
+ /* initialise GDB-stub */
+ do {
+ unsigned long flags;
+ u16 tmp16;
+
+ flags = arch_local_cli_save();
+ GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(GDB_NMI_IPI);
+ arch_local_irq_restore(flags);
+ } while (0);
+#endif
+}
+
+/**
+ * start_secondary - Activate a secondary CPU (AP)
+ * @unused: Thread parameter (ignored).
+ */
+int __init start_secondary(void *unused)
+{
+ smp_cpu_init();
+ smp_callin();
+ while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ cpu_relax();
+
+ local_flush_tlb();
+ preempt_disable();
+ smp_online();
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ init_clockevents();
+#endif
+ cpu_idle();
+ return 0;
+}
+
+/**
+ * smp_prepare_cpus - Boot up secondary CPUs (APs)
+ * @max_cpus: Maximum number of CPUs to boot.
+ *
+ * Call do_boot_cpu, and boot up APs.
+ */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int phy_id;
+
+ /* Setup boot CPU information */
+ smp_store_cpu_info(0);
+ smp_tune_scheduling();
+
+ init_ipi();
+
+ /* If SMP should be disabled, then finish */
+ if (max_cpus == 0) {
+ printk(KERN_INFO "SMP mode deactivated.\n");
+ goto smp_done;
+ }
+
+ /* Boot secondary CPUs (for which phy_id > 0) */
+ for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
+ /* Don't boot primary CPU */
+ if (max_cpus <= cpucount + 1)
+ continue;
+ if (phy_id != 0)
+ do_boot_cpu(phy_id);
+ set_cpu_possible(phy_id, true);
+ smp_show_cpu_info(phy_id);
+ }
+
+smp_done:
+ Dprintk("Boot done.\n");
+}
+
+/**
+ * smp_store_cpu_info - Save a CPU's information
+ * @cpu: The CPU to save for.
+ *
+ * Save boot_cpu_data and jiffy for the specified CPU.
+ */
+static void __init smp_store_cpu_info(int cpu)
+{
+ struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+ *ci = boot_cpu_data;
+ ci->loops_per_jiffy = loops_per_jiffy;
+ ci->type = CPUREV;
+}
+
+/**
+ * smp_tune_scheduling - Set time slice value
+ *
+ * Nothing to do here.
+ */
+static void __init smp_tune_scheduling(void)
+{
+}
+
+/**
+ * do_boot_cpu: Boot up one CPU
+ * @phy_id: Physical ID of CPU to boot.
+ *
+ * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
+ * otherwise.
+ */
+static int __init do_boot_cpu(int phy_id)
+{
+ struct task_struct *idle;
+ unsigned long send_status, callin_status;
+ int timeout, cpu_id;
+
+ send_status = GxICR_REQUEST;
+ callin_status = 0;
+ timeout = 0;
+ cpu_id = phy_id;
+
+ cpucount++;
+
+ /* Create idle thread for this CPU */
+ idle = fork_idle(cpu_id);
+ if (IS_ERR(idle))
+ panic("Failed fork for CPU#%d.", cpu_id);
+
+ idle->thread.pc = (unsigned long)start_secondary;
+
+ printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
+ start_stack[cpu_id - 1] = idle->thread.sp;
+
+ task_thread_info(idle)->cpu = cpu_id;
+
+ /* Send boot IPI to AP */
+ send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
+
+ Dprintk("Waiting for send to finish...\n");
+
+ /* Wait for AP's IPI receive in 100[ms] */
+ do {
+ udelay(1000);
+ send_status =
+ CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
+ } while (send_status == GxICR_REQUEST && timeout++ < 100);
+
+ Dprintk("Waiting for cpu_callin_map.\n");
+
+ if (send_status == 0) {
+ /* Allow AP to start initializing */
+ cpu_set(cpu_id, cpu_callout_map);
+
+ /* Wait for setting cpu_callin_map */
+ timeout = 0;
+ do {
+ udelay(1000);
+ callin_status = cpu_isset(cpu_id, cpu_callin_map);
+ } while (callin_status == 0 && timeout++ < 5000);
+
+ if (callin_status == 0)
+ Dprintk("Not responding.\n");
+ } else {
+ printk(KERN_WARNING "IPI not delivered.\n");
+ }
+
+ if (send_status == GxICR_REQUEST || callin_status == 0) {
+ cpu_clear(cpu_id, cpu_callout_map);
+ cpu_clear(cpu_id, cpu_callin_map);
+ cpu_clear(cpu_id, cpu_initialized);
+ cpucount--;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * smp_show_cpu_info - Show SMP CPU information
+ * @cpu: The CPU of interest.
+ */
+static void __init smp_show_cpu_info(int cpu)
+{
+ struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+ printk(KERN_INFO
+ "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
+ cpu,
+ MN10300_IOCLK / 1000000,
+ (MN10300_IOCLK / 10000) % 100,
+ ci->loops_per_jiffy / (500000 / HZ),
+ (ci->loops_per_jiffy / (5000 / HZ)) % 100);
+}
+
+/**
+ * smp_callin - Set cpu_callin_map of the current CPU ID
+ */
+static void __init smp_callin(void)
+{
+ unsigned long timeout;
+ int cpu;
+
+ cpu = smp_processor_id();
+ timeout = jiffies + (2 * HZ);
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ printk(KERN_ERR "CPU#%d already present.\n", cpu);
+ BUG();
+ }
+ Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
+
+ /* Wait for AP startup 2s total */
+ while (time_before(jiffies, timeout)) {
+ if (cpu_isset(cpu, cpu_callout_map))
+ break;
+ cpu_relax();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ printk(KERN_ERR
+ "BUG: CPU#%d started up but did not get a callout!\n",
+ cpu);
+ BUG();
+ }
+
+#ifdef CONFIG_CALIBRATE_DELAY
+ calibrate_delay(); /* Get our bogomips */
+#endif
+
+ /* Save our processor parameters */
+ smp_store_cpu_info(cpu);
+
+ /* Allow the boot processor to continue */
+ cpu_set(cpu, cpu_callin_map);
+}
+
+/**
+ * smp_online - Set cpu_online_map
+ */
+static void __init smp_online(void)
+{
+ int cpu;
+
+ cpu = smp_processor_id();
+
+ local_irq_enable();
+
+ cpu_set(cpu, cpu_online_map);
+ smp_wmb();
+}
+
+/**
+ * smp_cpus_done -
+ * @max_cpus: Maximum CPU count.
+ *
+ * Do nothing.
+ */
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * smp_prepare_boot_cpu - Set up stuff for the boot processor.
+ *
+ * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
+ * processor (CPU 0).
+ */
+void __devinit smp_prepare_boot_cpu(void)
+{
+ cpu_set(0, cpu_callout_map);
+ cpu_set(0, cpu_callin_map);
+ current_thread_info()->cpu = 0;
+}
+
+/*
+ * initialize_secondary - Initialise a secondary CPU (Application Processor).
+ *
+ * Set SP register and jump to thread's PC address.
+ */
+void initialize_secondary(void)
+{
+ asm volatile (
+ "mov %0,sp \n"
+ "jmp (%1) \n"
+ :
+ : "a"(current->thread.sp), "a"(current->thread.pc));
+}
+
+/**
+ * __cpu_up - Set smp_commenced_mask for the nominated CPU
+ * @cpu: The target CPU.
+ */
+int __devinit __cpu_up(unsigned int cpu)
+{
+ int timeout;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (num_online_cpus() == 1)
+ disable_hlt();
+ if (sleep_mode[cpu])
+ run_wakeup_cpu(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+ cpu_set(cpu, smp_commenced_mask);
+
+ /* Wait 5s total for a response */
+ for (timeout = 0 ; timeout < 5000 ; timeout++) {
+ if (cpu_isset(cpu, cpu_online_map))
+ break;
+ udelay(1000);
+ }
+
+ BUG_ON(!cpu_isset(cpu, cpu_online_map));
+ return 0;
+}
+
+/**
+ * setup_profiling_timer - Set up the profiling timer
+ * @multiplier - The frequency multiplier to use
+ *
+ * The frequency of the profiling timer can be changed by writing a multiplier
+ * value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+/*
+ * CPU hotplug routines
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+ int cpu, ret;
+
+ for_each_cpu(cpu) {
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ if (ret)
+ printk(KERN_WARNING
+ "topology_init: register_cpu %d failed (%d)\n",
+ cpu, ret);
+ }
+ return 0;
+}
+
+subsys_initcall(topology_init);
+
+int __cpu_disable(void)
+{
+ int cpu = smp_processor_id();
+ if (cpu == 0)
+ return -EBUSY;
+
+ migrate_irqs();
+ cpu_clear(cpu, current->active_mm->cpu_vm_mask);
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ run_sleep_cpu(cpu);
+
+ if (num_online_cpus() == 1)
+ enable_hlt();
+}
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+static inline void hotplug_cpu_disable_cache(void)
+{
+ int tmp;
+ asm volatile(
+ " movhu (%1),%0 \n"
+ " and %2,%0 \n"
+ " movhu %0,(%1) \n"
+ "1: movhu (%1),%0 \n"
+ " btst %3,%0 \n"
+ " bne 1b \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
+ "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
+ : "memory", "cc");
+}
+
+static inline void hotplug_cpu_enable_cache(void)
+{
+ int tmp;
+ asm volatile(
+ "movhu (%1),%0 \n"
+ "or %2,%0 \n"
+ "movhu %0,(%1) \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(CHCTR_ICEN | CHCTR_DCEN)
+ : "memory", "cc");
+}
+
+static inline void hotplug_cpu_invalidate_cache(void)
+{
+ int tmp;
+ asm volatile (
+ "movhu (%1),%0 \n"
+ "or %2,%0 \n"
+ "movhu %0,(%1) \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(CHCTR_ICINV | CHCTR_DCINV)
+ : "cc");
+}
+
+#else /* CONFIG_MN10300_CACHE_ENABLED */
+#define hotplug_cpu_disable_cache() do {} while (0)
+#define hotplug_cpu_enable_cache() do {} while (0)
+#define hotplug_cpu_invalidate_cache() do {} while (0)
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/**
+ * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
+ * @cpumask: List of target CPUs.
+ * @func: The function to call on those CPUs.
+ * @info: The context data for the function to be called.
+ * @wait: Whether to wait for the calls to complete.
+ *
+ * Non-maskably call a function on another CPU for hotplug purposes.
+ *
+ * This function must be called with maskable interrupts disabled.
+ */
+static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
+ smp_call_func_t func, void *info,
+ int wait)
+{
+ /*
+ * The address and the size of nmi_call_func_mask_data
+ * need to be aligned on L1_CACHE_BYTES.
+ */
+ static struct nmi_call_data_struct nmi_call_func_mask_data
+ __cacheline_aligned;
+ unsigned long start, end;
+
+ start = (unsigned long)&nmi_call_func_mask_data;
+ end = start + sizeof(struct nmi_call_data_struct);
+
+ nmi_call_func_mask_data.func = func;
+ nmi_call_func_mask_data.info = info;
+ nmi_call_func_mask_data.started = cpumask;
+ nmi_call_func_mask_data.wait = wait;
+ if (wait)
+ nmi_call_func_mask_data.finished = cpumask;
+
+ spin_lock(&smp_nmi_call_lock);
+ nmi_call_data = &nmi_call_func_mask_data;
+ mn10300_local_dcache_flush_range(start, end);
+ smp_wmb();
+
+ send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
+
+ do {
+ mn10300_local_dcache_inv_range(start, end);
+ barrier();
+ } while (!cpus_empty(nmi_call_func_mask_data.started));
+
+ if (wait) {
+ do {
+ mn10300_local_dcache_inv_range(start, end);
+ barrier();
+ } while (!cpus_empty(nmi_call_func_mask_data.finished));
+ }
+
+ spin_unlock(&smp_nmi_call_lock);
+ return 0;
+}
+
+static void restart_wakeup_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cpu_set(cpu, cpu_callin_map);
+ local_flush_tlb();
+ cpu_set(cpu, cpu_online_map);
+ smp_wmb();
+}
+
+static void prepare_sleep_cpu(void *unused)
+{
+ sleep_mode[smp_processor_id()] = 1;
+ smp_mb();
+ mn10300_local_dcache_flush_inv();
+ hotplug_cpu_disable_cache();
+ hotplug_cpu_invalidate_cache();
+}
+
+/* when this function called, IE=0, NMID=0. */
+static void sleep_cpu(void *unused)
+{
+ unsigned int cpu_id = smp_processor_id();
+ /*
+ * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
+ * before this cpu goes in SLEEP mode.
+ */
+ do {
+ smp_mb();
+ __sleep_cpu();
+ } while (sleep_mode[cpu_id]);
+ restart_wakeup_cpu();
+}
+
+static void run_sleep_cpu(unsigned int cpu)
+{
+ unsigned long flags;
+ cpumask_t cpumask = cpumask_of(cpu);
+
+ flags = arch_local_cli_save();
+ hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
+ hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
+ udelay(1); /* delay for the cpu to sleep. */
+ arch_local_irq_restore(flags);
+}
+
+static void wakeup_cpu(void)
+{
+ hotplug_cpu_invalidate_cache();
+ hotplug_cpu_enable_cache();
+ smp_mb();
+ sleep_mode[smp_processor_id()] = 0;
+}
+
+static void run_wakeup_cpu(unsigned int cpu)
+{
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+#if NR_CPUS == 2
+ mn10300_local_dcache_flush_inv();
+#else
+ /*
+ * Before waking up the cpu,
+ * all online cpus should stop and flush D-Cache for global data.
+ */
+#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
+#endif
+ hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
+ arch_local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/switch_to.S b/arch/mn10300/kernel/switch_to.S
index 630aad71b946..9074d0fb8788 100644
--- a/arch/mn10300/kernel/switch_to.S
+++ b/arch/mn10300/kernel/switch_to.S
@@ -15,6 +15,9 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
.text
@@ -35,8 +38,6 @@ ENTRY(__switch_to)
mov d1,a1
# save prev context
- mov (__frame),d0
- mov d0,(THREAD_FRAME,a0)
mov __switch_back,d0
mov d0,(THREAD_PC,a0)
mov sp,a2
@@ -58,8 +59,6 @@ ENTRY(__switch_to)
mov a2,e2
#endif
- mov (THREAD_FRAME,a1),a2
- mov a2,(__frame)
mov (THREAD_PC,a1),a2
mov d2,d0 # for ret_from_fork
mov d0,a0 # for __switch_to
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 8f7f6d22783d..f860a340acc9 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -17,29 +17,18 @@
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/cnt32_to_63.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
#include <asm/rtc.h>
-
-#ifdef CONFIG_MN10300_RTC
-unsigned long mn10300_ioclk; /* system I/O clock frequency */
-unsigned long mn10300_iobclk; /* system I/O clock frequency */
-unsigned long mn10300_tsc_per_HZ; /* number of ioclks per jiffy */
-#endif /* CONFIG_MN10300_RTC */
+#include "internal.h"
static unsigned long mn10300_last_tsc; /* time-stamp counter at last time
* interrupt occurred */
-static irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-static struct irqaction timer_irq = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
- .name = "timer",
-};
-
static unsigned long sched_clock_multiplier;
/*
@@ -54,9 +43,12 @@ unsigned long long sched_clock(void)
unsigned long tsc, tmp;
unsigned product[3]; /* 96-bit intermediate value */
+ /* cnt32_to_63() is not safe with preemption */
+ preempt_disable();
+
/* read the TSC value
*/
- tsc = 0 - get_cycles(); /* get_cycles() counts down */
+ tsc = get_cycles();
/* expand to 64-bits.
* - sched_clock() must be called once a minute or better or the
@@ -64,6 +56,8 @@ unsigned long long sched_clock(void)
*/
tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
+ preempt_enable();
+
/* scale the 64-bit TSC value to a nanosecond value via a 96-bit
* intermediate
*/
@@ -90,6 +84,20 @@ static void __init mn10300_sched_clock_init(void)
__muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
}
+/**
+ * local_timer_interrupt - Local timer interrupt handler
+ *
+ * Handle local timer interrupts for this CPU. They may have been propagated
+ * to this CPU from the CPU that actually gets them by way of an IPI.
+ */
+irqreturn_t local_timer_interrupt(void)
+{
+ profile_tick(CPU_PROFILING);
+ update_process_times(user_mode(get_irq_regs()));
+ return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_TIME
/*
* advance the kernel's time keeping clocks (xtime and jiffies)
* - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
@@ -98,27 +106,73 @@ static void __init mn10300_sched_clock_init(void)
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned tsc, elapse;
+ irqreturn_t ret;
write_seqlock(&xtime_lock);
while (tsc = get_cycles(),
- elapse = mn10300_last_tsc - tsc, /* time elapsed since last
+ elapse = tsc - mn10300_last_tsc, /* time elapsed since last
* tick */
elapse > MN10300_TSC_PER_HZ
) {
- mn10300_last_tsc -= MN10300_TSC_PER_HZ;
+ mn10300_last_tsc += MN10300_TSC_PER_HZ;
/* advance the kernel's time tracking system */
- profile_tick(CPU_PROFILING);
do_timer(1);
}
write_sequnlock(&xtime_lock);
- update_process_times(user_mode(get_irq_regs()));
+ ret = local_timer_interrupt();
+#ifdef CONFIG_SMP
+ send_IPI_allbutself(LOCAL_TIMER_IPI);
+#endif
+ return ret;
+}
- return IRQ_HANDLED;
+static struct irqaction timer_irq = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
+ .name = "timer",
+};
+#endif /* CONFIG_GENERIC_TIME */
+
+#ifdef CONFIG_CSRC_MN10300
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) NSEC_PER_SEC << shift;
+ do_div(temp, clock);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cs->shift = shift;
+ cs->mult = (u32) temp;
}
+#endif
+
+#if CONFIG_CEVT_MN10300
+void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) clock << shift;
+ do_div(temp, NSEC_PER_SEC);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cd->shift = shift;
+ cd->mult = (u32) temp;
+}
+#endif
/*
* initialise the various timers used by the main part of the kernel
@@ -131,21 +185,25 @@ void __init time_init(void)
*/
TMPSCNT |= TMPSCNT_ENABLE;
+#ifdef CONFIG_GENERIC_TIME
+ init_clocksource();
+#else
startup_timestamp_counter();
+#endif
printk(KERN_INFO
"timestamp counter I/O clock running at %lu.%02lu"
" (calibrated against RTC)\n",
MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
- mn10300_last_tsc = TMTSCBC;
-
- /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
- setup_irq(TMJCIRQ, &timer_irq);
+ mn10300_last_tsc = read_timestamp_counter();
- set_intr_level(TMJCIRQ, TMJCICR_LEVEL);
-
- startup_jiffies_counter();
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ init_clockevents();
+#else
+ reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+ setup_jiffies_interrupt(TMJCIRQ, &timer_irq, CONFIG_TIMER_IRQ_LEVEL);
+#endif
#ifdef CONFIG_MN10300_WD_TIMER
/* start the watchdog timer */
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index 91365adba4f5..b90c3f160c77 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -45,9 +45,6 @@
#error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
#endif
-struct pt_regs *__frame; /* current frame pointer */
-EXPORT_SYMBOL(__frame);
-
int kstack_depth_to_print = 24;
spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
@@ -101,7 +98,6 @@ DO_EINFO(SIGILL, {}, "invalid opcode", invalid_op, ILL_ILLOPC);
DO_EINFO(SIGILL, {}, "invalid ex opcode", invalid_exop, ILL_ILLOPC);
DO_EINFO(SIGBUS, {}, "invalid address", mem_error, BUS_ADRERR);
DO_EINFO(SIGBUS, {}, "bus error", bus_error, BUS_ADRERR);
-DO_EINFO(SIGILL, {}, "FPU invalid opcode", fpu_invalid_op, ILL_COPROC);
DO_ERROR(SIGTRAP,
#ifndef CONFIG_MN10300_USING_JTAG
@@ -222,11 +218,14 @@ void show_registers_only(struct pt_regs *regs)
printk(KERN_EMERG "threadinfo=%p task=%p)\n",
current_thread_info(), current);
- if ((unsigned long) current >= 0x90000000UL &&
- (unsigned long) current < 0x94000000UL)
+ if ((unsigned long) current >= PAGE_OFFSET &&
+ (unsigned long) current < (unsigned long)high_memory)
printk(KERN_EMERG "Process %s (pid: %d)\n",
current->comm, current->pid);
+#ifdef CONFIG_SMP
+ printk(KERN_EMERG "CPUID: %08x\n", CPUID);
+#endif
printk(KERN_EMERG "CPUP: %04hx\n", CPUP);
printk(KERN_EMERG "TBR: %08x\n", TBR);
printk(KERN_EMERG "DEAR: %08x\n", DEAR);
@@ -522,8 +521,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
{
unsigned long addr;
u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
+ unsigned long flags;
addr = (unsigned long) handler - (unsigned long) vector;
+
+ flags = arch_local_cli_save();
+
vector[0] = 0xdc; /* JMP handler */
vector[1] = addr;
vector[2] = addr >> 8;
@@ -533,30 +536,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
vector[6] = 0xcb;
vector[7] = 0xcb;
- mn10300_dcache_flush_inv();
- mn10300_icache_inv();
-}
-
-/*
- * set an interrupt stub to invoke the JTAG unit and then jump to a handler
- */
-void __init set_jtag_stub(enum exception_code code, void *handler)
-{
- unsigned long addr;
- u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
-
- addr = (unsigned long) handler - ((unsigned long) vector + 1);
- vector[0] = 0xff; /* PI to jump into JTAG debugger */
- vector[1] = 0xdc; /* jmp handler */
- vector[2] = addr;
- vector[3] = addr >> 8;
- vector[4] = addr >> 16;
- vector[5] = addr >> 24;
- vector[6] = 0xcb;
- vector[7] = 0xcb;
+ arch_local_irq_restore(flags);
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush_inv();
- flush_icache_range((unsigned long) vector, (unsigned long) vector + 8);
+ mn10300_icache_inv();
+#endif
}
/*
@@ -581,7 +566,6 @@ void __init trap_init(void)
set_excp_vector(EXCEP_PRIVINSACC, insn_acc_error);
set_excp_vector(EXCEP_PRIVDATACC, data_acc_error);
set_excp_vector(EXCEP_DATINSACC, insn_acc_error);
- set_excp_vector(EXCEP_FPU_DISABLED, fpu_disabled);
set_excp_vector(EXCEP_FPU_UNIMPINS, fpu_invalid_op);
set_excp_vector(EXCEP_FPU_OPERATION, fpu_exception);
diff --git a/arch/mn10300/lib/bitops.c b/arch/mn10300/lib/bitops.c
index 440a7dcbf87b..a66c6cdaf442 100644
--- a/arch/mn10300/lib/bitops.c
+++ b/arch/mn10300/lib/bitops.c
@@ -15,7 +15,7 @@
/*
* try flipping a bit using BSET and BCLR
*/
-void change_bit(int nr, volatile void *addr)
+void change_bit(unsigned long nr, volatile void *addr)
{
if (test_bit(nr, addr))
goto try_clear_bit;
@@ -34,7 +34,7 @@ try_clear_bit:
/*
* try flipping a bit using BSET and BCLR and returning the old value
*/
-int test_and_change_bit(int nr, volatile void *addr)
+int test_and_change_bit(unsigned long nr, volatile void *addr)
{
if (test_bit(nr, addr))
goto try_clear_bit;
diff --git a/arch/mn10300/lib/delay.c b/arch/mn10300/lib/delay.c
index fdf6f710f94e..8e7ceb8ba33d 100644
--- a/arch/mn10300/lib/delay.c
+++ b/arch/mn10300/lib/delay.c
@@ -38,14 +38,14 @@ EXPORT_SYMBOL(__delay);
*/
void __udelay(unsigned long usecs)
{
- signed long ioclk, stop;
+ unsigned long start, stop, cnt;
/* usecs * CLK / 1E6 */
stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
- stop = TMTSCBC - stop;
+ start = TMTSCBC;
do {
- ioclk = TMTSCBC;
- } while (stop < ioclk);
+ cnt = start - TMTSCBC;
+ } while (cnt < stop);
}
EXPORT_SYMBOL(__udelay);
diff --git a/arch/mn10300/lib/do_csum.S b/arch/mn10300/lib/do_csum.S
index e138994e1667..1d27bba0cd8f 100644
--- a/arch/mn10300/lib/do_csum.S
+++ b/arch/mn10300/lib/do_csum.S
@@ -10,26 +10,25 @@
*/
#include <asm/cache.h>
- .section .text
- .balign L1_CACHE_BYTES
+ .section .text
+ .balign L1_CACHE_BYTES
###############################################################################
#
-# unsigned int do_csum(const unsigned char *buff, size_t len)
+# unsigned int do_csum(const unsigned char *buff, int len)
#
###############################################################################
.globl do_csum
- .type do_csum,@function
+ .type do_csum,@function
do_csum:
movm [d2,d3],(sp)
- mov d0,(12,sp)
- mov d1,(16,sp)
mov d1,d2 # count
mov d0,a0 # buff
+ mov a0,a1
clr d1 # accumulator
cmp +0,d2
- beq do_csum_done # return if zero-length buffer
+ ble do_csum_done # check for zero length or negative
# 4-byte align the buffer pointer
btst +3,a0
@@ -41,17 +40,15 @@ do_csum:
inc a0
asl +8,d0
add d0,d1
- addc +0,d1
add -1,d2
-do_csum_addr_not_odd:
+do_csum_addr_not_odd:
cmp +2,d2
bcs do_csum_fewer_than_4
btst +2,a0
beq do_csum_now_4b_aligned
movhu (a0+),d0
add d0,d1
- addc +0,d1
add -2,d2
cmp +4,d2
bcs do_csum_fewer_than_4
@@ -66,20 +63,20 @@ do_csum_now_4b_aligned:
do_csum_loop:
mov (a0+),d0
- add d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ add d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
mov (a0+),d0
- addc d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ addc d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
addc +0,d1
@@ -94,12 +91,12 @@ do_csum_remainder:
cmp +16,d2
bcs do_csum_fewer_than_16
mov (a0+),d0
- add d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ add d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
addc +0,d1
add -16,d2
@@ -131,9 +128,9 @@ do_csum_fewer_than_4:
xor_cmp d0,d0,+2,d2
bcs do_csum_fewer_than_2
movhu (a0+),d0
-do_csum_fewer_than_2:
and +1,d2
beq do_csum_add_last_bit
+do_csum_fewer_than_2:
movbu (a0),d3
add d3,d0
do_csum_add_last_bit:
@@ -142,21 +139,19 @@ do_csum_add_last_bit:
do_csum_done:
# compress the checksum down to 16 bits
- mov +0xffff0000,d2
- and d1,d2
+ mov +0xffff0000,d0
+ and d1,d0
asl +16,d1
- add d2,d1,d0
+ add d1,d0
addc +0xffff,d0
lsr +16,d0
# flip the halves of the word result if the buffer was oddly aligned
- mov (12,sp),d1
- and +1,d1
+ and +1,a1
beq do_csum_not_oddly_aligned
swaph d0,d0 # exchange bits 15:8 with 7:0
do_csum_not_oddly_aligned:
ret [d2,d3],8
-do_csum_end:
- .size do_csum, do_csum_end-do_csum
+ .size do_csum, .-do_csum
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
new file mode 100644
index 000000000000..c4fd923a55a0
--- /dev/null
+++ b/arch/mn10300/mm/Kconfig.cache
@@ -0,0 +1,101 @@
+#
+# MN10300 CPU cache options
+#
+
+choice
+ prompt "CPU Caching mode"
+ default MN10300_CACHE_WBACK
+ help
+ This option determines the caching mode for the kernel.
+
+ Write-Back caching mode involves the all reads and writes causing
+ the affected cacheline to be read into the cache first before being
+ operated upon. Memory is not then updated by a write until the cache
+ is filled and a cacheline needs to be displaced from the cache to
+ make room. Only at that point is it written back.
+
+ Write-Through caching only fetches cachelines from memory on a
+ read. Writes always get written directly to memory. If the affected
+ cacheline is also in cache, it will be updated too.
+
+ The final option is to turn of caching entirely.
+
+config MN10300_CACHE_WBACK
+ bool "Write-Back"
+ help
+ The dcache operates in delayed write-back mode. It must be manually
+ flushed if writes are made that subsequently need to be executed or
+ to be DMA'd by a device.
+
+config MN10300_CACHE_WTHRU
+ bool "Write-Through"
+ help
+ The dcache operates in immediate write-through mode. Writes are
+ committed to RAM immediately in addition to being stored in the
+ cache. This means that the written data is immediately available for
+ execution or DMA.
+
+ This is not available for use with an SMP kernel if cache flushing
+ and invalidation by automatic purge register is not selected.
+
+config MN10300_CACHE_DISABLED
+ bool "Disabled"
+ help
+ The icache and dcache are disabled.
+
+endchoice
+
+config MN10300_CACHE_ENABLED
+ def_bool y if !MN10300_CACHE_DISABLED
+
+
+choice
+ prompt "CPU cache flush/invalidate method"
+ default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
+ default MN10300_CACHE_MANAGE_BY_REG if AM34_2
+ depends on MN10300_CACHE_ENABLED
+ help
+ This determines the method by which CPU cache flushing and
+ invalidation is performed.
+
+config MN10300_CACHE_MANAGE_BY_TAG
+ bool "Use the cache tag registers directly"
+ depends on !(SMP && MN10300_CACHE_WTHRU)
+
+config MN10300_CACHE_MANAGE_BY_REG
+ bool "Flush areas by way of automatic purge registers (AM34 only)"
+ depends on AM34_2
+
+endchoice
+
+config MN10300_CACHE_INV_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_INV_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_FLUSH_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
+
+config MN10300_CACHE_FLUSH_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
+
+
+config MN10300_HAS_CACHE_SNOOP
+ def_bool n
+
+config MN10300_CACHE_SNOOP
+ bool "Use CPU Cache Snooping"
+ depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
+ default y
+
+config MN10300_CACHE_FLUSH_ICACHE
+ def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the dcache flushing before the icache is invalidated.
+
+config MN10300_CACHE_INV_ICACHE
+ def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the icache to be invalidated, even if the dcache is in
+ write-through mode and doesn't need flushing.
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 1557277fbc5c..203fee23f7d7 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,11 +2,21 @@
# Makefile for the MN10300-specific memory management code
#
-cacheflush-y := cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
+
+cacheflush-y := cache.o
+cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
+cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
misalignment.o dma-alloc.o $(cacheflush-y)
+
+obj-$(CONFIG_SMP) += tlb-smp.o
diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S
new file mode 100644
index 000000000000..1dcae0211671
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-reg.S
@@ -0,0 +1,308 @@
+/* MN10300 CPU core caching routines, using indirect regs on cache controller
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set mask
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge
+ #
+ # DCPGCR = DCPGCR_DCP
+ #
+ mov DCPGCR_DCP,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize;
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # determine the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgloop:
+ # area purge
+ mov a2,d0
+ or DCPGCR_DCP,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCP
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgloop
+ bns dcpgloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2,d3,a2],12
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set the mask to cover everything
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge & invalidate
+ mov DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # set the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgivloop:
+ # area purge & invalidate
+ mov a2,d0
+ or DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge & invalidate of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgivloop
+ bns dcpgivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S
new file mode 100644
index 000000000000..5cd6a27dd63e
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-tag.S
@@ -0,0 +1,251 @@
+/* MN10300 CPU core caching routines, using direct tag flushing
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ # read the addresses tagged in the cache's tag RAM and attempt to flush
+ # those addresses specifically
+ # - we rely on the hardware to filter out invalid tag entry addresses
+ mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_local_dcache_flush_loop:
+ mov (a0),d0
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
+ # cache
+ mov d0,(a1) # conditional purge
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_loop
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush
+1:
+
+ # round start addr down
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush all instances of an address from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
+ # cache
+
+mn10300_local_dcache_flush_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_range_loop
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2],4
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov L1_CACHE_NENTRIES,d1
+ clr a1
+
+mn10300_local_dcache_flush_inv_loop:
+ mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_loop
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_INV_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush_inv
+1:
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush and invalidate all instances of an address
+ # from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+mn10300_local_dcache_flush_inv_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # in all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_range_loop
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2],4
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644
index 000000000000..fdb1a9db20f0
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-icache.c
@@ -0,0 +1,155 @@
+/* Flush dcache and invalidate icache when the dcache is in writeback mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page - Flush a page from the dcache and invalidate the icache
+ * @vma: The VMA the page is part of.
+ * @page: The page to be flushed.
+ *
+ * Write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long start = page_to_phys(page);
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ mn10300_local_dcache_flush_page(start);
+ mn10300_local_icache_inv_page(start);
+
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Flush the dcache and invalidate the icache for part of a single page, as
+ * determined by the virtual addresses given. The page must be in the paged
+ * area.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* flush the dcache and invalidate the icache coverage on that
+ * region */
+ mn10300_local_dcache_flush_range2(addr + off, size);
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_local_dcache_flush_range(start_page, end);
+ mn10300_local_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_dcache_flush();
+ mn10300_icache_inv();
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
deleted file mode 100644
index c8ed1cbac107..000000000000
--- a/arch/mn10300/mm/cache-flush-mn10300.S
+++ /dev/null
@@ -1,192 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
- .am33_2
- .globl mn10300_dcache_flush
- .globl mn10300_dcache_flush_page
- .globl mn10300_dcache_flush_range
- .globl mn10300_dcache_flush_range2
- .globl mn10300_dcache_flush_inv
- .globl mn10300_dcache_flush_inv_page
- .globl mn10300_dcache_flush_inv_range
- .globl mn10300_dcache_flush_inv_range2
-
-###############################################################################
-#
-# void mn10300_dcache_flush(void)
-# Flush the entire data cache back to RAM
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_end
-
- # read the addresses tagged in the cache's tag RAM and attempt to flush
- # those addresses specifically
- # - we rely on the hardware to filter out invalid tag entry addresses
- mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_loop:
- mov (a0),d0
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
- # cache
- mov d0,(a1) # conditional purge
-
-mn10300_dcache_flush_skip:
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_loop
-
-mn10300_dcache_flush_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_page(unsigned start)
-# void mn10300_dcache_flush_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
-# Flush a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_range2:
- add d0,d1
-mn10300_dcache_flush_range:
- movm [d2,d3],(sp)
-
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_range_end
-
- # round start addr down
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush all instances of an address from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
- # cache
-
-mn10300_dcache_flush_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_range_loop
-
-mn10300_dcache_flush_range_end:
- ret [d2,d3],8
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv(void)
-# Flush the entire data cache and invalidate all entries
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_inv_end
-
- # hit each line in the dcache with an unconditional purge
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_inv_loop:
- mov (a1),d0 # unconditional purge
-
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_inv_loop
-
-mn10300_dcache_flush_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv_page(unsigned start)
-# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
-# Flush and invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_inv_range2:
- add d0,d1
-mn10300_dcache_flush_inv_range:
- movm [d2,d3],(sp)
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush and invalidate all instances of an address
- # from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
-mn10300_dcache_flush_inv_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # in all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_inv_range_loop
-
-mn10300_dcache_flush_inv_range_end:
- ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S
new file mode 100644
index 000000000000..c8950861ed77
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-reg.S
@@ -0,0 +1,356 @@
+/* MN10300 CPU cache invalidation routines, using automatic purge registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+ # round the addresses out to be full cachelines, unless we're in
+ # writeback mode, in which case we would be in flush and invalidate by
+ # now
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ mov L1_CACHE_BYTES-1,d2
+ add d2,d1
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+
+ sub d0,d1,d2 # calculate the total size
+ mov d0,a2 # A2 = start address
+ mov d1,a1 # A1 = end address
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov DCPGCR,a0 # make sure the purger isn't busy
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # skip initial address alignment calculation if address is zero
+ mov d2,d1
+ cmp 0,a2
+ beq 1f
+
+dcivloop:
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * while (! start & alignsize) {
+ * alignsize <<=1;
+ * }
+ * d1 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d1
+ lsr 1,d1
+ setlb
+ add d1,d1
+ mov d1,d0
+ and a2,d0
+ leq
+
+1:
+ /* calculate invsize
+ *
+ * if (totalsize > alignsize) {
+ * invsize = alignsize;
+ * } else {
+ * invsize = totalsize;
+ * tmp = 0x80000000;
+ * while (! invsize & tmp) {
+ * tmp >>= 1;
+ * }
+ * invsize = tmp;
+ * }
+ * d1 = invsize
+ */
+ cmp d2,d1
+ bns 2f
+ mov d2,d1
+
+ mov 0x80000000,d0 # start from 31bit=1
+ setlb
+ lsr 1,d0
+ mov d0,e0
+ and d1,e0
+ leq
+ mov d0,d1
+
+2:
+ /* set mask
+ *
+ * mask = ~(invsize-1);
+ * DCPGMR = mask;
+ */
+ mov d1,d0
+ add -1,d0
+ not d0
+ mov d0,(DCPGMR)
+
+ # invalidate area
+ mov a2,d0
+ or DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
+
+ setlb # wait for the purge to complete
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ sub d1,d2 # decrease size remaining
+ add d1,a2 # increase next start address
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + invsize
+ * if (a2 < end) {
+ * goto dcivloop;
+ * } */
+ cmp a1,a2
+ bns dcivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
+
+###############################################################################
+#
+# void mn10300_local_icache_inv_page(unsigned long start)
+# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
+# Invalidate a range of addresses on a page in the icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+ .type mn10300_local_icache_inv_page,@function
+ .type mn10300_local_icache_inv_range,@function
+ .type mn10300_local_icache_inv_range2,@function
+mn10300_local_icache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_icache_inv_range2:
+ add d0,d1
+mn10300_local_icache_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_ICEN,d2
+ beq mn10300_local_icache_inv_range_reg_end
+
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
+ * alignsize <<= 1;
+ * }
+ * d2 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+
+ /* a1 = end */
+ mov d1,a1
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov ICIVCR,a0
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* set mask
+ *
+ * mask = ~(alignsize-1);
+ * ICIVMR = mask;
+ */
+ mov d2,d1
+ add -1,d1
+ not d1
+ mov d1,(ICIVMR)
+ /* a2 = mask & start */
+ and d1,d0,a2
+
+icivloop:
+ /* area invalidate
+ *
+ * ICIVCR = (mask & start) | ICIVCR_ICI
+ */
+ mov a2,d0
+ or ICIVCR_ICI,d0
+ mov d0,(a0)
+
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + alignsize
+ * if (a2 < end) {
+ * goto icivloop;
+ * } */
+ add d2,a2
+ cmp a1,a2
+ bns icivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_icache_inv_range_reg_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
+ .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
+ .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S
new file mode 100644
index 000000000000..e9713b40c0ff
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-tag.S
@@ -0,0 +1,348 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+
+mn10300_local_icache_inv_page = mn10300_local_icache_inv
+mn10300_local_icache_inv_range = mn10300_local_icache_inv
+mn10300_local_icache_inv_range2 = mn10300_local_icache_inv
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the icache
+ and ~CHCTR_ICEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_ICINV,d0
+ or CHCTR_ICEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the dcache
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_DCINV,d0
+ or CHCTR_DCEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a2
+ movhu (a2),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+ mov d0,a1
+
+ clr d2 # we're going to clear tag RAM
+ # entries
+
+ # read the tags from the tag RAM, and if they indicate a valid dirty
+ # cache line then invalidate that line
+ mov DCACHE_TAG(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache tag RAM
+ # access address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
+
+mn10300_local_dcache_inv_range_outer_loop:
+ LOCAL_CLI_SAVE(d3)
+
+ # disable the dcache
+ movhu (a2),d0
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a2)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+mn10300_local_dcache_inv_range_loop:
+
+ # process the way 0 slot
+ mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_0:
+
+ # process the way 1 slot
+ mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_1:
+
+ # process the way 2 slot
+ mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_2:
+
+ # process the way 3 slot
+ mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_3:
+
+ # approx every N steps we re-enable the cache and see if there are any
+ # interrupts to be processed
+ # we also break out if we've reached the end of the loop
+ # (the bottom nibble of the count is zero in both cases)
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0
+ add -1,d1
+ btst mn10300_local_dcache_inv_range_intr_interval,d1
+ bne mn10300_local_dcache_inv_range_loop
+
+ # wait for the cache to finish what it's doing
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ or CHCTR_DCEN,d0
+ movhu d0,(a2)
+ movhu (a2),d0
+
+ # re-enable interrupts
+ # - we don't bother with delay NOPs as we'll have enough instructions
+ # before we disable interrupts again to give the interrupts a chance
+ # to happen
+ LOCAL_IRQ_RESTORE(d3)
+
+ # go around again if the counter hasn't yet reached zero
+ add 0,d1
+ bne mn10300_local_dcache_inv_range_outer_loop
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644
index 000000000000..a8933a60b2d4
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -0,0 +1,129 @@
+/* Invalidate icache when dcache doesn't need invalidation as it's in
+ * write-through mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Invalidate the icache for part of a single page, as determined by the
+ * virtual addresses given. The page must be in the paged area. The dcache is
+ * not flushed as the cache must be in write-through mode to get here.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* invalidate the icache coverage on that region */
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
deleted file mode 100644
index e839d0aedd69..000000000000
--- a/arch/mn10300/mm/cache-mn10300.S
+++ /dev/null
@@ -1,289 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-#define mn10300_dcache_inv_range_intr_interval \
- +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
-
-#if mn10300_dcache_inv_range_intr_interval > 0xff
-#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
-#endif
-
- .am33_2
-
- .globl mn10300_icache_inv
- .globl mn10300_dcache_inv
- .globl mn10300_dcache_inv_range
- .globl mn10300_dcache_inv_range2
- .globl mn10300_dcache_inv_page
-
-###############################################################################
-#
-# void mn10300_icache_inv(void)
-# Invalidate the entire icache
-#
-###############################################################################
- ALIGN
-mn10300_icache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_ICEN,d0
- beq mn10300_icache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the icache
- and ~CHCTR_ICEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # invalidate
- or CHCTR_ICINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_ICINV,d0
- or CHCTR_ICEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_icache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv(void)
-# Invalidate the entire dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the dcache
- and ~CHCTR_DCEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # invalidate
- or CHCTR_DCINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_DCINV,d0
- or CHCTR_DCEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_dcache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
-# void mn10300_dcache_inv_page(unsigned start)
-# Invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_inv_range2:
- add d0,d1
-mn10300_dcache_inv_range:
- movm [d2,d3,a2],(sp)
- mov CHCTR,a2
-
- movhu (a2),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- clr d2 # we're going to clear tag ram
- # entries
-
- # read the tags from the tag RAM, and if they indicate a valid dirty
- # cache line then invalidate that line
- mov DCACHE_TAG(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache tag RAM
- # access address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
-
-mn10300_dcache_inv_range_outer_loop:
- # disable interrupts
- mov epsw,d3
- and ~EPSW_IE,epsw
- nop # note that reading CHCTR and
- # AND'ing D0 occupy two delay
- # slots after disabling
- # interrupts
-
- # disable the dcache
- movhu (a2),d0
- and ~CHCTR_DCEN,d0
- movhu d0,(a2)
-
- # and wait for it to calm down
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
-mn10300_dcache_inv_range_loop:
-
- # process the way 0 slot
- mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_0:
-
- # process the way 1 slot
- mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_1:
-
- # process the way 2 slot
- mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_2:
-
- # process the way 3 slot
- mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_3:
-
- # approx every N steps we re-enable the cache and see if there are any
- # interrupts to be processed
- # we also break out if we've reached the end of the loop
- # (the bottom nibble of the count is zero in both cases)
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- btst mn10300_dcache_inv_range_intr_interval,d1
- bne mn10300_dcache_inv_range_loop
-
- # wait for the cache to finish what it's doing
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- or CHCTR_DCEN,d0
- movhu d0,(a2)
- movhu (a2),d0
-
- # re-enable interrupts
- # - we don't bother with delay NOPs as we'll have enough instructions
- # before we disable interrupts again to give the interrupts a chance
- # to happen
- mov d3,epsw
-
- # go around again if the counter hasn't yet reached zero
- add 0,d1
- bne mn10300_dcache_inv_range_outer_loop
-
-mn10300_dcache_inv_range_end:
- ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644
index 000000000000..fd51af5eaf70
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-flush.c
@@ -0,0 +1,156 @@
+/* Functions for global dcache flush when writeback caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_dcache_flush - Globally flush data cache
+ *
+ * Flush the data cache on all CPUs.
+ */
+void mn10300_dcache_flush(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush();
+ smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_page - Globally flush a page of data cache
+ * @start: The address of the page of memory to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs covering
+ * the page that includes the given address.
+ */
+void mn10300_dcache_flush_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @end: The end address of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * end-1 inclusive.
+ */
+void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range2 - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @size: The size of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
+ *
+ * Flush and invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_flush_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv();
+ smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
+ * cache
+ * @start: The address of the page of memory to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_dcache_flush_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @end: The end address of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @size: The size of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644
index 000000000000..ff1787358c8e
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-inv.c
@@ -0,0 +1,153 @@
+/* Functions for global i/dcache invalidation when caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_icache_inv - Globally invalidate instruction cache
+ *
+ * Invalidate the instruction cache on all CPUs.
+ */
+void mn10300_icache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_icache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_page(start);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_icache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range(start, end);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range2(start, size);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv - Globally invalidate data cache
+ *
+ * Invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv();
+ smp_cache_call(SMP_DCACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_page - Globally invalidate a page of data cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs covering the
+ * page that includes the given address.
+ */
+void mn10300_dcache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_page(start);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and end-1 inclusive.
+ */
+void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and start+size-1 inclusive.
+ */
+void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644
index 000000000000..4a6e9a4b5b27
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.c
@@ -0,0 +1,105 @@
+/* SMP global caching code
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+DEFINE_SPINLOCK(smp_cache_lock);
+static unsigned long smp_cache_mask;
+static unsigned long smp_cache_start;
+static unsigned long smp_cache_end;
+static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */
+
+/**
+ * smp_cache_interrupt - Handle IPI request to flush caches.
+ *
+ * Handle a request delivered by IPI to flush the current CPU's
+ * caches. The parameters are stored in smp_cache_*.
+ */
+void smp_cache_interrupt(void)
+{
+ unsigned long opr_mask = smp_cache_mask;
+
+ switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
+ case SMP_DCACHE_NOP:
+ break;
+ case SMP_DCACHE_INV:
+ mn10300_local_dcache_inv();
+ break;
+ case SMP_DCACHE_INV_RANGE:
+ mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH:
+ mn10300_local_dcache_flush();
+ break;
+ case SMP_DCACHE_FLUSH_RANGE:
+ mn10300_local_dcache_flush_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH_INV:
+ mn10300_local_dcache_flush_inv();
+ break;
+ case SMP_DCACHE_FLUSH_INV_RANGE:
+ mn10300_local_dcache_flush_inv_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ }
+
+ switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
+ case SMP_ICACHE_NOP:
+ break;
+ case SMP_ICACHE_INV:
+ mn10300_local_icache_inv();
+ break;
+ case SMP_ICACHE_INV_RANGE:
+ mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ }
+
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+}
+
+/**
+ * smp_cache_call - Issue an IPI to request the other CPUs flush caches
+ * @opr_mask: Cache operation flags
+ * @start: Start address of request
+ * @end: End address of request
+ *
+ * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt()
+ * above on those other CPUs and then waits for them to finish.
+ *
+ * The caller must hold smp_cache_lock.
+ */
+void smp_cache_call(unsigned long opr_mask,
+ unsigned long start, unsigned long end)
+{
+ smp_cache_mask = opr_mask;
+ smp_cache_start = start;
+ smp_cache_end = end;
+ smp_cache_ipi_map = cpu_online_map;
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+
+ send_IPI_allbutself(FLUSH_CACHE_IPI);
+
+ while (!cpus_empty(smp_cache_ipi_map))
+ /* nothing. lockup detection does not belong here */
+ mb();
+}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644
index 000000000000..cb52892aa66a
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.h
@@ -0,0 +1,69 @@
+/* SMP caching definitions
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+
+/*
+ * Operation requests for smp_cache_call().
+ *
+ * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
+ */
+enum smp_icache_ops {
+ SMP_ICACHE_NOP = 0x0000,
+ SMP_ICACHE_INV = 0x0001,
+ SMP_ICACHE_INV_RANGE = 0x0002,
+};
+#define SMP_ICACHE_OP_MASK 0x0003
+
+enum smp_dcache_ops {
+ SMP_DCACHE_NOP = 0x0000,
+ SMP_DCACHE_INV = 0x0004,
+ SMP_DCACHE_INV_RANGE = 0x0008,
+ SMP_DCACHE_FLUSH = 0x000c,
+ SMP_DCACHE_FLUSH_RANGE = 0x0010,
+ SMP_DCACHE_FLUSH_INV = 0x0014,
+ SMP_DCACHE_FLUSH_INV_RANGE = 0x0018,
+};
+#define SMP_DCACHE_OP_MASK 0x001c
+
+#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
+#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
+
+/*
+ * cache-smp.c
+ */
+#ifdef CONFIG_SMP
+extern spinlock_t smp_cache_lock;
+
+extern void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end);
+
+static inline unsigned long smp_lock_cache(void)
+ __acquires(&smp_cache_lock)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&smp_cache_lock, flags);
+ return flags;
+}
+
+static inline void smp_unlock_cache(unsigned long flags)
+ __releases(&smp_cache_lock)
+{
+ spin_unlock_irqrestore(&smp_cache_lock, flags);
+}
+
+#else
+static inline unsigned long smp_lock_cache(void) { return 0; }
+static inline void smp_unlock_cache(unsigned long flags) {}
+static inline void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end)
+{
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 9261217e8d2c..0a1f0aa92ebc 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -18,8 +18,13 @@
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_icache_inv_range);
+EXPORT_SYMBOL(mn10300_icache_inv_range2);
+EXPORT_SYMBOL(mn10300_icache_inv_page);
EXPORT_SYMBOL(mn10300_dcache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv_range);
EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -37,96 +42,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page);
#endif
/*
- * write a page back from the dcache and invalidate the icache so that we can
- * run code from it that we've just written into it
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- mn10300_dcache_flush_page(page_to_phys(page));
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_page);
-
-/*
- * write some code we've just written back from the dcache and invalidate the
- * icache so that we can run that code
- */
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_MN10300_CACHE_WBACK
- unsigned long addr, size, base, off;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ppte, pte;
-
- if (end > 0x80000000UL) {
- /* addresses above 0xa0000000 do not go through the cache */
- if (end > 0xa0000000UL) {
- end = 0xa0000000UL;
- if (start >= end)
- return;
- }
-
- /* kernel addresses between 0x80000000 and 0x9fffffff do not
- * require page tables, so we just map such addresses directly */
- base = (start >= 0x80000000UL) ? start : 0x80000000UL;
- mn10300_dcache_flush_range(base, end);
- if (base == start)
- goto invalidate;
- end = base;
- }
-
- for (; start < end; start += size) {
- /* work out how much of the page to flush */
- off = start & (PAGE_SIZE - 1);
-
- size = end - start;
- if (size > PAGE_SIZE - off)
- size = PAGE_SIZE - off;
-
- /* get the physical address the page is mapped to from the page
- * tables */
- pgd = pgd_offset(current->mm, start);
- if (!pgd || !pgd_val(*pgd))
- continue;
-
- pud = pud_offset(pgd, start);
- if (!pud || !pud_val(*pud))
- continue;
-
- pmd = pmd_offset(pud, start);
- if (!pmd || !pmd_val(*pmd))
- continue;
-
- ppte = pte_offset_map(pmd, start);
- if (!ppte)
- continue;
- pte = *ppte;
- pte_unmap(ppte);
-
- if (pte_none(pte))
- continue;
-
- page = pte_page(pte);
- if (!page)
- continue;
-
- addr = page_to_phys(page);
-
- /* flush the dcache and invalidate the icache coverage on that
- * region */
- mn10300_dcache_flush_range2(addr + off, size);
- }
-#endif
-
-invalidate:
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 81f153fa51b4..59c3da49d9d9 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
-#ifdef CONFIG_SMP
- /* Many serial drivers do __global_cli() */
- global_irq_lock = 0;
-#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
}
#endif
-asmlinkage void monitor_signal(struct pt_regs *);
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
*/
bad_area:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/* User mode accesses just cause a SIGSEGV */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
}
no_context:
- monitor_signal(regs);
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -338,14 +330,13 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
- goto no_context;
- pagefault_out_of_memory();
- return;
+ printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+ do_exit(SIGKILL);
+ goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 6e6bc0e51521..48907cc3bdb7 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
/*
* set up paging
*/
@@ -73,7 +77,24 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
- __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ /* The Atomic Operation Unit registers need to be mapped to userspace
+ * for all processes. The following uses vm_area_register_early() to
+ * reserve the first page of the vmalloc area and sets the pte for that
+ * page.
+ *
+ * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+ * it from now on.
+ */
+ user_iomap_vm.flags = VM_USERMAP;
+ user_iomap_vm.size = 1 << PAGE_SHIFT;
+ vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+ ppte = kernel_vmalloc_ptes;
+ set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+ PAGE_USERIO));
+#endif
+
+ local_flush_tlb_all();
}
/*
@@ -84,8 +105,7 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize;
int tmp;
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index 6dffbf97ac26..eef989c1d0c1 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -449,8 +449,7 @@ found_opcode:
regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
tmp = format_tbl[pop->format].opsz;
- if (tmp > noc)
- BUG(); /* match was less complete than it ought to have been */
+ BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
if (tmp < noc) {
tmp = noc - tmp;
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 36ba02191d40..a4f7d3dcc6e6 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -13,40 +13,15 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
/*
* list of the MMU contexts last allocated on each CPU
*/
unsigned long mmu_context_cache[NR_CPUS] = {
- [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+ [0 ... NR_CPUS - 1] =
+ MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
};
-
-/*
- * flush the specified TLB entry
- */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long pteu, cnx, flags;
-
- addr &= PAGE_MASK;
-
- /* make sure the context doesn't migrate and defend against
- * interference from vmalloc'd regions */
- local_irq_save(flags);
-
- cnx = mm_context(vma->vm_mm);
-
- if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
- IPTEU = pteu;
- DPTEU = pteu;
- if (IPTEL & xPTEL_V)
- IPTEL = 0;
- if (DPTEL & xPTEL_V)
- DPTEL = 0;
- }
-
- local_irq_restore(flags);
-}
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
/*
* preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
* interference from vmalloc'd regions */
local_irq_save(flags);
+ cnx = ~MMU_NO_CONTEXT;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
cnx = mm_context(vma->vm_mm);
+#endif
if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
+#endif
if (!(pte_val(pte) & _PAGE_NX)) {
IPTEU = pteu;
if (IPTEL & xPTEL_V)
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 9c1624c9e4e9..450f7ba3f8f2 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S
index 7095147dcb8b..b9940177d81b 100644
--- a/arch/mn10300/mm/tlb-mn10300.S
+++ b/arch/mn10300/mm/tlb-mn10300.S
@@ -27,7 +27,6 @@
###############################################################################
.type itlb_miss,@function
ENTRY(itlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (IPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
btst _PAGE_VALID,d2
beq itlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
itlb_miss_set:
- mov d2,(IPTEL) # change the TLB
+ mov d2,(IPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
###############################################################################
.type dtlb_miss,@function
ENTRY(dtlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (DPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
btst _PAGE_VALID,d2
beq dtlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
dtlb_miss_set:
- mov d2,(DPTEL) # change the TLB
+ mov d2,(DPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
###############################################################################
.type itlb_aerror,@function
ENTRY(itlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
add -4,sp # need to pass three params
# calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
or 0x00010000,d1 # it's an instruction fetch
# determine the page address
- mov (IPTEU),a2
- mov a2,d0
+ mov (IPTEU),d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
- mov d0,(IPTEL)
+ mov d0,(IPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
###############################################################################
.type dtlb_aerror,@function
ENTRY(dtlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
add -4,sp # need to pass three params
+ and ~EPSW_NMID,epsw
# calculate the fault code
movhu (MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
mov d0,(12,sp)
clr d0
- mov d0,(DPTEL)
+ mov d0,(DPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
new file mode 100644
index 000000000000..0b6a5ad1960e
--- /dev/null
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -0,0 +1,214 @@
+/* SMP TLB support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+
+/*
+ * For flush TLB
+ */
+#define FLUSH_ALL 0xffffffff
+
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+ &init_mm, 0
+};
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va);
+static void do_flush_tlb_all(void *info);
+
+/**
+ * smp_flush_tlb - Callback to invalidate the TLB.
+ * @unused: Callback context (ignored).
+ */
+void smp_flush_tlb(void *unused)
+{
+ unsigned long cpu_id;
+
+ cpu_id = get_cpu();
+
+ if (!cpu_isset(cpu_id, flush_cpumask))
+ /* This was a BUG() but until someone can quote me the line
+ * from the intel manual that guarantees an IPI to multiple
+ * CPUs is retried _only_ on the erroring CPUs its staying as a
+ * return
+ *
+ * BUG();
+ */
+ goto out;
+
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ local_flush_tlb_page(flush_mm, flush_va);
+
+ smp_mb__before_clear_bit();
+ cpu_clear(cpu_id, flush_cpumask);
+ smp_mb__after_clear_bit();
+out:
+ put_cpu();
+}
+
+/**
+ * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
+ * @cpumask: The list of CPUs to target.
+ * @mm: The VM context to flush from (if va!=FLUSH_ALL).
+ * @va: Virtual address to flush or FLUSH_ALL to flush everything.
+ */
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
+ cpumask_t tmp;
+
+ /* A couple of sanity checks (to be removed):
+ * - mask must not be empty
+ * - current CPU must not be in mask
+ * - we do not send IPIs to as-yet unbooted CPUs.
+ */
+ BUG_ON(!mm);
+ BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+
+ cpus_and(tmp, cpumask, cpu_online_map);
+ BUG_ON(!cpus_equal(cpumask, tmp));
+
+ /* I'm not happy about this global shared spinlock in the MM hot path,
+ * but we'll see how contended it is.
+ *
+ * Temporarily this turns IRQs off, so that lockups are detected by the
+ * NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
+ atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+#else
+#error Not supported.
+#endif
+
+ /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
+ smp_call_function(smp_flush_tlb, NULL, 1);
+
+ while (!cpus_empty(flush_cpumask))
+ /* Lockup detection does not belong here */
+ smp_mb();
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+/**
+ * flush_tlb_mm - Invalidate TLB of specified VM context
+ * @mm: The VM context to invalidate.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_current_task - Invalidate TLB of current task
+ */
+void flush_tlb_current_task(void)
+{
+ struct mm_struct *mm = current->mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_page - Invalidate TLB of page
+ * @vma: The VM context to invalidate the page for.
+ * @va: The virtual address of the page to invalidate.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb_page(mm, va);
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, va);
+
+ preempt_enable();
+}
+
+/**
+ * do_flush_tlb_all - Callback to completely invalidate a TLB
+ * @unused: Callback context (ignored).
+ */
+static void do_flush_tlb_all(void *unused)
+{
+ local_flush_tlb_all();
+}
+
+/**
+ * flush_tlb_all - Completely invalidate TLBs on all CPUs
+ */
+void flush_tlb_all(void)
+{
+ on_each_cpu(do_flush_tlb_all, 0, 1);
+}
diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
index bdc1f9a59b4c..c1528004163c 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
@@ -30,4 +30,13 @@
*/
#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL 4
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/clock.h b/arch/mn10300/proc-mn103e010/include/proc/clock.h
index aa23e147d620..704a819f1f4b 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/clock.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/clock.h
@@ -13,6 +13,4 @@
#include <unit/clock.h>
-#define MN10300_WDCLK MN10300_IOCLK
-
#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
new file mode 100644
index 000000000000..d72d328d1f9c
--- /dev/null
+++ b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
@@ -0,0 +1,102 @@
+/* MN103E010 on-board DMA controller registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
+#define DMxCTR_BG 0x0000001f /* transfer request source */
+#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
+#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
+#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
+#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
+#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
+#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
+#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
+#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
+#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
+#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
+#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
+#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
+#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
+#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
+#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
+#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
+#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
+#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
+#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
+#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
+#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
+#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
+#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
+#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
+#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
+#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
+#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
+#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
+#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
+#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
+#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
+#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
+#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
+#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
+#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
+#define DMxCTR_RQM 0x00060000 /* external request input source mode */
+#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
+#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
+#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
+#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
+#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
+#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
+
+#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
+
+#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
+
+#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
+#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
+
+#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
+ * size reg */
+#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
+
+#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
+#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
+#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
+#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
+
+#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
+#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
+#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
+#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+ u32 ctr;
+ const void *src;
+ void *dst;
+ u32 siz;
+ u32 cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
new file mode 100644
index 000000000000..f537801a44ba
--- /dev/null
+++ b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN 0x00fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z) \
+({ \
+ typeof(Z) x = (Z); \
+ x &= ~(3 << ((X) * 2)); \
+ x |= ((Y) & 3) << ((X) * 2); \
+ (Z) = x; \
+})
+
+/* external pin intr spec reg */
+#define EXTMD __SYSREG(0xd4000200, u16)
+#define GET_XIRQ_TRIGGER(X) __GET_XIRQ_TRIGGER(X, EXTMD)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/proc.h b/arch/mn10300/proc-mn103e010/include/proc/proc.h
index 22a2b93f70b7..39c4f8e7d2d3 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/proc.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/proc.h
@@ -12,7 +12,7 @@
#ifndef _ASM_PROC_PROC_H
#define _ASM_PROC_PROC_H
-#define PROCESSOR_VENDOR_NAME "Matsushita"
+#define PROCESSOR_VENDOR_NAME "Panasonic"
#define PROCESSOR_MODEL_NAME "mn103e010"
#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn103e010/proc-init.c b/arch/mn10300/proc-mn103e010/proc-init.c
index 9a482efafa82..27b97980dca4 100644
--- a/arch/mn10300/proc-mn103e010/proc-init.c
+++ b/arch/mn10300/proc-mn103e010/proc-init.c
@@ -9,7 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/kernel.h>
+#include <asm/fpu.h>
#include <asm/rtc.h>
+#include <asm/busctl-regs.h>
/*
* initialise the on-silicon processor peripherals
@@ -28,6 +30,7 @@ asmlinkage void __init processor_init(void)
__set_intr_stub(EXCEP_DAERROR, dtlb_aerror);
__set_intr_stub(EXCEP_BUSERROR, raw_bus_error);
__set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault);
+ __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled);
__set_intr_stub(EXCEP_SYSCALL0, system_call);
__set_intr_stub(EXCEP_NMI, nmi_handler);
@@ -73,3 +76,37 @@ asmlinkage void __init processor_init(void)
calibrate_clock();
}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+ unsigned long base, size;
+
+ *mem_base = 0;
+ *mem_size = 0;
+
+ base = SDBASE(0);
+ if (base & SDBASE_CE) {
+ size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+ size = ~size + 1;
+ base &= SDBASE_CBA;
+
+ printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
+ *mem_size += size;
+ *mem_base = base;
+ }
+
+ base = SDBASE(1);
+ if (base & SDBASE_CE) {
+ size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+ size = ~size + 1;
+ base &= SDBASE_CBA;
+
+ printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
+ *mem_size += size;
+ if (*mem_base == 0)
+ *mem_base = base;
+ }
+}
diff --git a/arch/mn10300/proc-mn2ws0050/Makefile b/arch/mn10300/proc-mn2ws0050/Makefile
new file mode 100644
index 000000000000..d4ca13309a85
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := proc-init.o
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
new file mode 100644
index 000000000000..cafd7b5b55b4
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
@@ -0,0 +1,48 @@
+/* Cache specification
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Add L1_CACHE_SHIFT_MAX definition.
+ * 29-Jul-2008 MEI Add define for MN10300_HAS_AREAPURGE_REG.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CACHE_H
+#define _ASM_PROC_CACHE_H
+
+/*
+ * L1 cache
+ */
+#define L1_CACHE_NWAYS 4 /* number of ways in caches */
+#define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+#define L1_CACHE_BYTES 32 /* bytes per entry */
+#define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
+#define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+#define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+#define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
+#define L1_CACHE_TAG_ENTRY 0x00000fe0 /* cache tag entry address mask */
+#define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
+
+/*
+ * specification of the interval between interrupt checking intervals whilst
+ * managing the cache with the interrupts disabled
+ */
+#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL 4
+
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
+#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/clock.h b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
new file mode 100644
index 000000000000..fe4c0a4a53a2
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
@@ -0,0 +1,20 @@
+/* clock.h: proc-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 23-Feb-2007 MEI Delete define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CLOCK_H
+#define _ASM_PROC_CLOCK_H
+
+#include <unit/clock.h>
+
+#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
new file mode 100644
index 000000000000..4c4319e241d1
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
@@ -0,0 +1,103 @@
+/* MN2WS0050 on-board DMA controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define DMxCTR(N) __SYSREG(0xd4005000+(N*0x100), u32) /* control reg */
+#define DMxCTR_BG 0x0000001f /* transfer request source */
+#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
+#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
+#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
+#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
+#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
+#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
+#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
+#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
+#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
+#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
+#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
+#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
+#define DMxCTR_BG_RYBY 0x0000000d /* - NAND Flash RY/BY request source */
+#define DMxCTR_BG_RMC 0x0000000e /* - remote controller output */
+#define DMxCTR_BG_XIRQ12 0x00000011 /* - XIRQ12 pin interrupt source */
+#define DMxCTR_BG_XIRQ13 0x00000012 /* - XIRQ13 pin interrupt source */
+#define DMxCTR_BG_TCK 0x00000014 /* - tick timer underflow */
+#define DMxCTR_BG_SC4TX 0x00000019 /* - serial port4 transmission */
+#define DMxCTR_BG_SC4RX 0x0000001a /* - serial port4 reception */
+#define DMxCTR_BG_SC5TX 0x0000001b /* - serial port5 transmission */
+#define DMxCTR_BG_SC5RX 0x0000001c /* - serial port5 reception */
+#define DMxCTR_BG_SC6TX 0x0000001d /* - serial port6 transmission */
+#define DMxCTR_BG_SC6RX 0x0000001e /* - serial port6 reception */
+#define DMxCTR_BG_TMSUFLOW 0x0000001f /* - timestamp timer underflow */
+#define DMxCTR_SAM 0x00000060 /* DMA transfer src addr mode */
+#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
+#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
+#define DMxCTR_DAM 0x00000300 /* DMA transfer dest addr mode */
+#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
+#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
+#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
+#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
+#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
+#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
+#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
+#define DMxCTR_RRE 0x00008000 /* DMA round robin enable */
+#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
+#define DMxCTR_RQM 0x00060000 /* external request input source mode */
+#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
+#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
+#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
+#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
+#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
+#define DMxCTR_PERR 0x40000000 /* DMA transfer parameter error flag */
+#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
+
+#define DMxSRC(N) __SYSREG(0xd4005004+(N*0x100), u32) /* control reg */
+
+#define DMxDST(N) __SYSREG(0xd4005008+(N*0x100), u32) /* source addr reg */
+
+#define DMxSIZ(N) __SYSREG(0xd400500c+(N*0x100), u32) /* dest addr reg */
+#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
+
+#define DMxCYC(N) __SYSREG(0xd4005010+(N*0x100), u32) /* intermittent size reg */
+#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
+
+#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
+#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
+#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
+#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
+
+#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
+#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
+#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
+#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+ u32 ctr;
+ const void *src;
+ void *dst;
+ u32 siz;
+ u32 cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
new file mode 100644
index 000000000000..a1e977273d19
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN 0x003fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z) \
+({ \
+ typeof(Z) x = (Z); \
+ x &= ~(3 << ((X) * 2)); \
+ x |= ((Y) & 3) << ((X) * 2); \
+ (Z) = x; \
+})
+
+/* external pin intr spec reg */
+#define EXTMD0 __SYSREG(0xd4000200, u32)
+#define GET_XIRQ_TRIGGER(X) __GET_XIRQ_TRIGGER(X, EXTMD0)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD0)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/irq.h b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
new file mode 100644
index 000000000000..37777a85ab6f
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
@@ -0,0 +1,49 @@
+/* MN2WS0050 on-board interrupt controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Define extended IRQ number for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PROC_IRQ_H
+#define _PROC_IRQ_H
+
+#ifdef __KERNEL__
+
+#define GxICR_NUM_IRQS 163
+#ifdef CONFIG_SMP
+#define GxICR_NUM_EXT_IRQS 197
+#endif /* CONFIG_SMP */
+
+#define GxICR_NUM_XIRQS 16
+
+#define XIRQ0 34
+#define XIRQ1 35
+#define XIRQ2 36
+#define XIRQ3 37
+#define XIRQ4 38
+#define XIRQ5 39
+#define XIRQ6 40
+#define XIRQ7 41
+#define XIRQ8 42
+#define XIRQ9 43
+#define XIRQ10 44
+#define XIRQ11 45
+#define XIRQ12 46
+#define XIRQ13 47
+#define XIRQ14 48
+#define XIRQ15 49
+
+#define XIRQ2IRQ(num) (XIRQ0 + num)
+
+#endif /* __KERNEL__ */
+
+#endif /* _PROC_IRQ_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
new file mode 100644
index 000000000000..84448f3828b3
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
@@ -0,0 +1,120 @@
+/* NAND flash interface register definitions
+ *
+ * Copyright (C) 2008-2009 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PROC_NAND_REGS_H_
+#define _PROC_NAND_REGS_H_
+
+/* command register */
+#define FCOMMAND_0 __SYSREG(0xd8f00000, u8) /* fcommand[24:31] */
+#define FCOMMAND_1 __SYSREG(0xd8f00001, u8) /* fcommand[16:23] */
+#define FCOMMAND_2 __SYSREG(0xd8f00002, u8) /* fcommand[8:15] */
+#define FCOMMAND_3 __SYSREG(0xd8f00003, u8) /* fcommand[0:7] */
+
+/* for dma 16 byte trans, use FCOMMAND2 register */
+#define FCOMMAND2_0 __SYSREG(0xd8f00110, u8) /* fcommand2[24:31] */
+#define FCOMMAND2_1 __SYSREG(0xd8f00111, u8) /* fcommand2[16:23] */
+#define FCOMMAND2_2 __SYSREG(0xd8f00112, u8) /* fcommand2[8:15] */
+#define FCOMMAND2_3 __SYSREG(0xd8f00113, u8) /* fcommand2[0:7] */
+
+#define FCOMMAND_FIEN 0x80 /* nand flash I/F enable */
+#define FCOMMAND_BW_8BIT 0x00 /* 8bit bus width */
+#define FCOMMAND_BW_16BIT 0x40 /* 16bit bus width */
+#define FCOMMAND_BLOCKSZ_SMALL 0x00 /* small block */
+#define FCOMMAND_BLOCKSZ_LARGE 0x20 /* large block */
+#define FCOMMAND_DMASTART 0x10 /* dma start */
+#define FCOMMAND_RYBY 0x08 /* ready/busy flag */
+#define FCOMMAND_RYBYINTMSK 0x04 /* mask ready/busy interrupt */
+#define FCOMMAND_XFWP 0x02 /* write protect enable */
+#define FCOMMAND_XFCE 0x01 /* flash device disable */
+#define FCOMMAND_SEQKILL 0x10 /* stop seq-read */
+#define FCOMMAND_ANUM 0x07 /* address cycle */
+#define FCOMMAND_ANUM_NONE 0x00 /* address cycle none */
+#define FCOMMAND_ANUM_1CYC 0x01 /* address cycle 1cycle */
+#define FCOMMAND_ANUM_2CYC 0x02 /* address cycle 2cycle */
+#define FCOMMAND_ANUM_3CYC 0x03 /* address cycle 3cycle */
+#define FCOMMAND_ANUM_4CYC 0x04 /* address cycle 4cycle */
+#define FCOMMAND_ANUM_5CYC 0x05 /* address cycle 5cycle */
+#define FCOMMAND_FCMD_READ0 0x00 /* read1 command */
+#define FCOMMAND_FCMD_SEQIN 0x80 /* page program 1st command */
+#define FCOMMAND_FCMD_PAGEPROG 0x10 /* page program 2nd command */
+#define FCOMMAND_FCMD_RESET 0xff /* reset command */
+#define FCOMMAND_FCMD_ERASE1 0x60 /* erase 1st command */
+#define FCOMMAND_FCMD_ERASE2 0xd0 /* erase 2nd command */
+#define FCOMMAND_FCMD_STATUS 0x70 /* read status command */
+#define FCOMMAND_FCMD_READID 0x90 /* read id command */
+#define FCOMMAND_FCMD_READOOB 0x50 /* read3 command */
+/* address register */
+#define FADD __SYSREG(0xd8f00004, u32)
+/* address register 2 */
+#define FADD2 __SYSREG(0xd8f00008, u32)
+/* error judgement register */
+#define FJUDGE __SYSREG(0xd8f0000c, u32)
+#define FJUDGE_NOERR 0x0 /* no error */
+#define FJUDGE_1BITERR 0x1 /* 1bit error in data area */
+#define FJUDGE_PARITYERR 0x2 /* parity error */
+#define FJUDGE_UNCORRECTABLE 0x3 /* uncorrectable error */
+#define FJUDGE_ERRJDG_MSK 0x3 /* mask of judgement result */
+/* 1st ECC store register */
+#define FECC11 __SYSREG(0xd8f00010, u32)
+/* 2nd ECC store register */
+#define FECC12 __SYSREG(0xd8f00014, u32)
+/* 3rd ECC store register */
+#define FECC21 __SYSREG(0xd8f00018, u32)
+/* 4th ECC store register */
+#define FECC22 __SYSREG(0xd8f0001c, u32)
+/* 5th ECC store register */
+#define FECC31 __SYSREG(0xd8f00020, u32)
+/* 6th ECC store register */
+#define FECC32 __SYSREG(0xd8f00024, u32)
+/* 7th ECC store register */
+#define FECC41 __SYSREG(0xd8f00028, u32)
+/* 8th ECC store register */
+#define FECC42 __SYSREG(0xd8f0002c, u32)
+/* data register */
+#define FDATA __SYSREG(0xd8f00030, u32)
+/* access pulse register */
+#define FPWS __SYSREG(0xd8f00100, u32)
+#define FPWS_PWS1W_2CLK 0x00000000 /* write pulse width 1clock */
+#define FPWS_PWS1W_3CLK 0x01000000 /* write pulse width 2clock */
+#define FPWS_PWS1W_4CLK 0x02000000 /* write pulse width 4clock */
+#define FPWS_PWS1W_5CLK 0x03000000 /* write pulse width 5clock */
+#define FPWS_PWS1W_6CLK 0x04000000 /* write pulse width 6clock */
+#define FPWS_PWS1W_7CLK 0x05000000 /* write pulse width 7clock */
+#define FPWS_PWS1W_8CLK 0x06000000 /* write pulse width 8clock */
+#define FPWS_PWS1R_3CLK 0x00010000 /* read pulse width 3clock */
+#define FPWS_PWS1R_4CLK 0x00020000 /* read pulse width 4clock */
+#define FPWS_PWS1R_5CLK 0x00030000 /* read pulse width 5clock */
+#define FPWS_PWS1R_6CLK 0x00040000 /* read pulse width 6clock */
+#define FPWS_PWS1R_7CLK 0x00050000 /* read pulse width 7clock */
+#define FPWS_PWS1R_8CLK 0x00060000 /* read pulse width 8clock */
+#define FPWS_PWS2W_2CLK 0x00000100 /* write pulse interval 2clock */
+#define FPWS_PWS2W_3CLK 0x00000200 /* write pulse interval 3clock */
+#define FPWS_PWS2W_4CLK 0x00000300 /* write pulse interval 4clock */
+#define FPWS_PWS2W_5CLK 0x00000400 /* write pulse interval 5clock */
+#define FPWS_PWS2W_6CLK 0x00000500 /* write pulse interval 6clock */
+#define FPWS_PWS2R_2CLK 0x00000001 /* read pulse interval 2clock */
+#define FPWS_PWS2R_3CLK 0x00000002 /* read pulse interval 3clock */
+#define FPWS_PWS2R_4CLK 0x00000003 /* read pulse interval 4clock */
+#define FPWS_PWS2R_5CLK 0x00000004 /* read pulse interval 5clock */
+#define FPWS_PWS2R_6CLK 0x00000005 /* read pulse interval 6clock */
+/* command register 2 */
+#define FCOMMAND2 __SYSREG(0xd8f00110, u32)
+/* transfer frequency register */
+#define FNUM __SYSREG(0xd8f00114, u32)
+#define FSDATA_ADDR 0xd8f00400
+/* active data register */
+#define FSDATA __SYSREG(FSDATA_ADDR, u32)
+
+#endif /* _PROC_NAND_REGS_H_ */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/proc.h b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
new file mode 100644
index 000000000000..90d5cadd05bd
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
@@ -0,0 +1,18 @@
+/* proc.h: MN2WS0050 processor description
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_PROC_H
+#define _ASM_PROC_PROC_H
+
+#define PROCESSOR_VENDOR_NAME "Panasonic"
+#define PROCESSOR_MODEL_NAME "mn2ws0050"
+
+#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
new file mode 100644
index 000000000000..22f277fbb4de
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
@@ -0,0 +1,51 @@
+/* MN10300/AM33v2 Microcontroller SMP registers
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ * Created:
+ * 13-Nov-2006 MEI Add extended cache and atomic operation register
+ * for SMP support.
+ * 23-Feb-2007 MEI Add define for gdbstub SMP.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_SMP_REGS_H
+#define _ASM_PROC_SMP_REGS_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm/cpu-regs.h>
+
+/*
+ * Reference to the interrupt controllers of other CPUs
+ */
+#define CROSS_ICR_CPU_SHIFT 16
+
+#define CROSS_GxICR(X, CPU) __SYSREG(0xc4000000 + (X) * 4 + \
+ ((X) >= 64 && (X) < 192) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u16)
+#define CROSS_GxICR_u8(X, CPU) __SYSREG(0xc4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u8)
+
+/* CPU ID register */
+#define CPUID __SYSREGC(0xc0000054, u32)
+#define CPUID_MASK 0x00000007 /* CPU ID mask */
+
+/* extended cache control register */
+#define ECHCTR __SYSREG(0xc0000c20, u32)
+#define ECHCTR_IBCM 0x00000001 /* instruction cache broad cast mask */
+#define ECHCTR_DBCM 0x00000002 /* data cache broad cast mask */
+#define ECHCTR_ISPM 0x00000004 /* instruction cache snoop mask */
+#define ECHCTR_DSPM 0x00000008 /* data cache snoop mask */
+
+#define NMIAGR __SYSREG(0xd400013c, u16)
+#define NMIAGR_GN 0x03fc
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_PROC_SMP_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/proc-init.c b/arch/mn10300/proc-mn2ws0050/proc-init.c
new file mode 100644
index 000000000000..c58249b9525a
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/proc-init.c
@@ -0,0 +1,134 @@
+/* MN2WS0050 processor initialisation
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/busctl-regs.h>
+#include <unit/timex.h>
+#include <asm/fpu.h>
+#include <asm/rtc.h>
+
+#define MEMCONF __SYSREGC(0xdf800400, u32)
+
+/*
+ * initialise the on-silicon processor peripherals
+ */
+asmlinkage void __init processor_init(void)
+{
+ int loop;
+
+ /* set up the exception table first */
+ for (loop = 0x000; loop < 0x400; loop += 8)
+ __set_intr_stub(loop, __common_exception);
+
+ __set_intr_stub(EXCEP_ITLBMISS, itlb_miss);
+ __set_intr_stub(EXCEP_DTLBMISS, dtlb_miss);
+ __set_intr_stub(EXCEP_IAERROR, itlb_aerror);
+ __set_intr_stub(EXCEP_DAERROR, dtlb_aerror);
+ __set_intr_stub(EXCEP_BUSERROR, raw_bus_error);
+ __set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault);
+ __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled);
+ __set_intr_stub(EXCEP_SYSCALL0, system_call);
+
+ __set_intr_stub(EXCEP_NMI, nmi_handler);
+ __set_intr_stub(EXCEP_WDT, nmi_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL0, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL1, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL2, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL3, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL4, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL5, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL6, irq_handler);
+
+ IVAR0 = EXCEP_IRQ_LEVEL0;
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+ IVAR6 = EXCEP_IRQ_LEVEL6;
+
+#ifndef CONFIG_MN10300_HAS_CACHE_SNOOP
+ mn10300_dcache_flush_inv();
+ mn10300_icache_inv();
+#endif
+
+ /* disable all interrupts and set to priority 6 (lowest) */
+#ifdef CONFIG_SMP
+ for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#else /* !CONFIG_SMP */
+ for (loop = 0; loop < NR_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#endif /* !CONFIG_SMP */
+
+ /* clear the timers */
+ TM0MD = 0;
+ TM1MD = 0;
+ TM2MD = 0;
+ TM3MD = 0;
+ TM4MD = 0;
+ TM5MD = 0;
+ TM6MD = 0;
+ TM6MDA = 0;
+ TM6MDB = 0;
+ TM7MD = 0;
+ TM8MD = 0;
+ TM9MD = 0;
+ TM10MD = 0;
+ TM11MD = 0;
+ TM12MD = 0;
+ TM13MD = 0;
+ TM14MD = 0;
+ TM15MD = 0;
+
+ calibrate_clock();
+}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+ unsigned long memconf = MEMCONF;
+ unsigned long size = 0; /* order: MByte */
+
+ *mem_base = 0x90000000; /* fixed address */
+
+ switch (memconf & 0x00000003) {
+ case 0x01:
+ size = 256 / 8; /* 256 Mbit per chip */
+ break;
+ case 0x02:
+ size = 512 / 8; /* 512 Mbit per chip */
+ break;
+ case 0x03:
+ size = 1024 / 8; /* 1 Gbit per chip */
+ break;
+ default:
+ panic("Invalid SDRAM size");
+ break;
+ }
+
+ printk(KERN_INFO "DDR2-SDRAM: %luMB x 2 @%08lx\n", size, *mem_base);
+
+ *mem_size = (size * 2) << 20;
+}
diff --git a/arch/mn10300/unit-asb2303/include/unit/clock.h b/arch/mn10300/unit-asb2303/include/unit/clock.h
index 2a0bf79ab968..0316907a012e 100644
--- a/arch/mn10300/unit-asb2303/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2303/include/unit/clock.h
@@ -14,32 +14,11 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
#define MN10300_IOCLK 33333333UL
/* #define MN10300_IOBCLK 66666666UL */
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK MN10300_IOCLK
-#define MN10300_TSCCLK MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
#endif /* !__ASSEMBLY__ */
+#define MN10300_WDCLK MN10300_IOCLK
+
#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2303/include/unit/serial.h b/arch/mn10300/unit-asb2303/include/unit/serial.h
index 047566cd2e36..991e356bac5f 100644
--- a/arch/mn10300/unit-asb2303/include/unit/serial.h
+++ b/arch/mn10300/unit-asb2303/include/unit/serial.h
@@ -22,6 +22,11 @@
#define SERIAL_IRQ XIRQ0 /* Dual serial (PC16552) (Hi) */
/*
+ * The ASB2303 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD (18432000 / 16)
+
+/*
* dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
*/
#ifndef CONFIG_GDBSTUB_ON_TTYSx
diff --git a/arch/mn10300/unit-asb2303/include/unit/timex.h b/arch/mn10300/unit-asb2303/include/unit/timex.h
index f206b63c95b4..cc18fe7d8b90 100644
--- a/arch/mn10300/unit-asb2303/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2303/include/unit/timex.h
@@ -1,6 +1,6 @@
-/* ASB2303-specific timer specifcations
+/* ASB2303-specific timer specifications
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,67 +17,72 @@
#include <asm/timer-regs.h>
#include <unit/clock.h>
+#include <asm/param.h>
/*
* jiffies counter specifications
*/
#define TMJCBR_MAX 0xffff
-#define TMJCBC TM01BC
-
-#define TMJCMD TM01MD
-#define TMJCBR TM01BR
#define TMJCIRQ TM1IRQ
#define TMJCICR TM1ICR
-#define TMJCICR_LEVEL GxICR_LEVEL_5
#ifndef __ASSEMBLY__
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 1
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 8
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 32
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
{
- unsigned rate;
- u16 md, t16;
-
- /* use as little prescaling as possible to avoid losing accuracy */
- md = TM0MD_SRC_IOCLK;
- rate = MN10300_JCCLK / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_8;
- rate = MN10300_JCCLK / 8 / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_32;
- rate = MN10300_JCCLK / 32 / HZ;
-
- if (rate > TMJCBR_MAX)
- BUG();
- }
- }
+ u16 tmp;
+ TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+ tmp = TM01MD;
+}
- TMJCBR = rate - 1;
- t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_INIT_COUNTER |
- TM1MD_INIT_COUNTER << 8;
+ TM01BR = cnt;
+ tmp = TM01BR;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_COUNT_ENABLE |
- TM1MD_COUNT_ENABLE << 8;
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_INIT_COUNTER | \
+ TM1MD_INIT_COUNTER << 8;
- t16 = TMJCMD;
- TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- t16 = TMJCICR;
-}
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_COUNT_ENABLE | \
+ TM1MD_COUNT_ENABLE << 8;
-static inline void shutdown_jiffies_counter(void)
-{
+ tmp = TM01MD;
}
#endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
static inline void startup_timestamp_counter(void)
{
+ u32 t32;
+
/* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
* - count down from 4Gig-1 to 0 and wrap at IOCLK rate
*/
TM45BR = TMTSCBR_MAX;
+ t32 = TM45BR;
- TM4MD = TM4MD_SRC_IOCLK;
+ TM4MD = TSC_TIMER_CLKSRC;
TM4MD |= TM4MD_INIT_COUNTER;
TM4MD &= ~TM4MD_INIT_COUNTER;
TM4ICR = 0;
+ t32 = TM4ICR;
TM5MD = TM5MD_SRC_TM4CASCADE;
TM5MD |= TM5MD_INIT_COUNTER;
TM5MD &= ~TM5MD_INIT_COUNTER;
TM5ICR = 0;
+ t32 = TM5ICR;
TM5MD |= TM5MD_COUNT_ENABLE;
TM4MD |= TM4MD_COUNT_ENABLE;
+ t32 = TM5MD;
+ t32 = TM4MD;
}
static inline void shutdown_timestamp_counter(void)
{
+ u8 t8;
TM4MD = 0;
TM5MD = 0;
+ t8 = TM4MD;
+ t8 = TM5MD;
}
/*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
static inline cycles_t read_timestamp_counter(void)
{
- return (cycles_t)TMTSCBC;
+ return (cycles_t)~TMTSCBC;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c
index 70e8cb4ea266..834a76aa551a 100644
--- a/arch/mn10300/unit-asb2303/unit-init.c
+++ b/arch/mn10300/unit-asb2303/unit-init.c
@@ -31,6 +31,14 @@ asmlinkage void __init unit_init(void)
SET_XIRQ_TRIGGER(3, XIRQ_TRIGGER_HILEVEL);
SET_XIRQ_TRIGGER(4, XIRQ_TRIGGER_LOWLEVEL);
SET_XIRQ_TRIGGER(5, XIRQ_TRIGGER_LOWLEVEL);
+
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+ set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#endif
+
+#ifdef CONFIG_ETHERNET_IRQ_LEVEL
+ set_intr_level(XIRQ3, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
}
/*
@@ -51,7 +59,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_intr_postackable(XIRQ2IRQ(extnum));
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/mn10300/unit-asb2305/include/unit/clock.h b/arch/mn10300/unit-asb2305/include/unit/clock.h
index 67be3f2eb18e..29e3425431cf 100644
--- a/arch/mn10300/unit-asb2305/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2305/include/unit/clock.h
@@ -14,32 +14,11 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
#define MN10300_IOCLK 33333333UL
/* #define MN10300_IOBCLK 66666666UL */
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK MN10300_IOCLK
-#define MN10300_TSCCLK MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
#endif /* !__ASSEMBLY__ */
+#define MN10300_WDCLK MN10300_IOCLK
+
#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2305/include/unit/serial.h b/arch/mn10300/unit-asb2305/include/unit/serial.h
index 8086cc092cec..88c08219315f 100644
--- a/arch/mn10300/unit-asb2305/include/unit/serial.h
+++ b/arch/mn10300/unit-asb2305/include/unit/serial.h
@@ -21,6 +21,11 @@
#define SERIAL_IRQ XIRQ0 /* Dual serial (PC16552) (Hi) */
/*
+ * The ASB2305 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD (18432000 / 16)
+
+/*
* dispose of the /dev/ttyS0 serial port
*/
#ifndef CONFIG_GDBSTUB_ON_TTYSx
diff --git a/arch/mn10300/unit-asb2305/include/unit/timex.h b/arch/mn10300/unit-asb2305/include/unit/timex.h
index d1c72d59fa9f..758af30d1a16 100644
--- a/arch/mn10300/unit-asb2305/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2305/include/unit/timex.h
@@ -1,6 +1,6 @@
-/* ASB2305 timer specifcations
+/* ASB2305-specific timer specifications
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,67 +17,72 @@
#include <asm/timer-regs.h>
#include <unit/clock.h>
+#include <asm/param.h>
/*
* jiffies counter specifications
*/
#define TMJCBR_MAX 0xffff
-#define TMJCBC TM01BC
-
-#define TMJCMD TM01MD
-#define TMJCBR TM01BR
#define TMJCIRQ TM1IRQ
#define TMJCICR TM1ICR
-#define TMJCICR_LEVEL GxICR_LEVEL_5
#ifndef __ASSEMBLY__
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 1
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 8
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 32
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
{
- unsigned rate;
- u16 md, t16;
-
- /* use as little prescaling as possible to avoid losing accuracy */
- md = TM0MD_SRC_IOCLK;
- rate = MN10300_JCCLK / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_8;
- rate = MN10300_JCCLK / 8 / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_32;
- rate = MN10300_JCCLK / 32 / HZ;
-
- if (rate > TMJCBR_MAX)
- BUG();
- }
- }
+ u16 tmp;
+ TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+ tmp = TM01MD;
+}
- TMJCBR = rate - 1;
- t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_INIT_COUNTER |
- TM1MD_INIT_COUNTER << 8;
+ TM01BR = cnt;
+ tmp = TM01BR;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_COUNT_ENABLE |
- TM1MD_COUNT_ENABLE << 8;
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_INIT_COUNTER | \
+ TM1MD_INIT_COUNTER << 8;
- t16 = TMJCMD;
- TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- t16 = TMJCICR;
-}
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_COUNT_ENABLE | \
+ TM1MD_COUNT_ENABLE << 8;
-static inline void shutdown_jiffies_counter(void)
-{
+ tmp = TM01MD;
}
#endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
static inline void startup_timestamp_counter(void)
{
+ u32 t32;
+
/* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
* - count down from 4Gig-1 to 0 and wrap at IOCLK rate
*/
TM45BR = TMTSCBR_MAX;
+ t32 = TM45BR;
- TM4MD = TM4MD_SRC_IOCLK;
+ TM4MD = TSC_TIMER_CLKSRC;
TM4MD |= TM4MD_INIT_COUNTER;
TM4MD &= ~TM4MD_INIT_COUNTER;
TM4ICR = 0;
+ t32 = TM4ICR;
TM5MD = TM5MD_SRC_TM4CASCADE;
TM5MD |= TM5MD_INIT_COUNTER;
TM5MD &= ~TM5MD_INIT_COUNTER;
TM5ICR = 0;
+ t32 = TM5ICR;
TM5MD |= TM5MD_COUNT_ENABLE;
TM4MD |= TM4MD_COUNT_ENABLE;
+ t32 = TM5MD;
+ t32 = TM4MD;
}
static inline void shutdown_timestamp_counter(void)
{
+ u8 t8;
TM4MD = 0;
TM5MD = 0;
+ t8 = TM4MD;
+ t8 = TM5MD;
}
/*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
static inline cycles_t read_timestamp_counter(void)
{
- return (cycles_t) TMTSCBC;
+ return (cycles_t)~TMTSCBC;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index 45b40ac6c464..8e6763e6f250 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -93,7 +93,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
struct pci_bus *bus;
struct pci_dev *dev;
int idx;
- struct resource *r, *pr;
+ struct resource *r;
/* Depth-First Search on bus tree */
list_for_each_entry(bus, bus_list, node) {
@@ -105,10 +105,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->flags)
continue;
- pr = pci_find_parent_resource(dev, r);
if (!r->start ||
- !pr ||
- request_resource(pr, r) < 0) {
+ pci_claim_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of bridge %s\n",
@@ -131,7 +129,7 @@ static void __init pcibios_allocate_resources(int pass)
struct pci_dev *dev = NULL;
int idx, disabled;
u16 command;
- struct resource *r, *pr;
+ struct resource *r;
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -150,8 +148,7 @@ static void __init pcibios_allocate_resources(int pass)
" (f=%lx, d=%d, p=%d)\n",
pci_name(dev), r->start, r->end, r->flags,
disabled, pass);
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
+ if (pci_claim_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of device %s\n",
@@ -184,7 +181,7 @@ static void __init pcibios_allocate_resources(int pass)
static int __init pcibios_assign_resources(void)
{
struct pci_dev *dev = NULL;
- struct resource *r, *pr;
+ struct resource *r;
if (!(pci_probe & PCI_ASSIGN_ROMS)) {
/* Try to use BIOS settings for ROMs, otherwise let
@@ -194,8 +191,7 @@ static int __init pcibios_assign_resources(void)
r = &dev->resource[PCI_ROM_RESOURCE];
if (!r->flags || !r->start)
continue;
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
+ if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
r->end -= r->start;
r->start = 0;
}
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6d8720a0a599..a4954fe82094 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -503,7 +503,7 @@ asmlinkage void __init unit_pci_init(void)
struct pci_ops *o = &pci_direct_ampci;
u32 x;
- set_intr_level(XIRQ1, GxICR_LEVEL_3);
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_PCI_IRQ_LEVEL));
memset(&bus, 0, sizeof(bus));
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index a76c8e0ab90f..e1becd6b7571 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -26,8 +26,10 @@ asmlinkage void __init unit_init(void)
{
#ifndef CONFIG_GDBSTUB_ON_TTYSx
/* set the 16550 interrupt line to level 3 if not being used for GDB */
- set_intr_level(XIRQ0, GxICR_LEVEL_3);
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+ set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
#endif
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
}
/*
@@ -51,7 +53,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_intr_postackable(XIRQ2IRQ(extnum));
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/mn10300/unit-asb2364/Makefile b/arch/mn10300/unit-asb2364/Makefile
new file mode 100644
index 000000000000..b3263ecfc4ff
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+obj-y := unit-init.o leds.o irq-fpga.o
+
+obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/arch/mn10300/unit-asb2364/include/unit/clock.h b/arch/mn10300/unit-asb2364/include/unit/clock.h
new file mode 100644
index 000000000000..d34ac9a7508b
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/clock.h
@@ -0,0 +1,29 @@
+/* clock.h: unit-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 23-Feb-2007 MEI Add define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_CLOCK_H
+#define _ASM_UNIT_CLOCK_H
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_IOCLK 100000000UL /* for DDR800 */
+/*#define MN10300_IOCLK 83333333UL */ /* for DDR667 */
+#define MN10300_IOBCLK MN10300_IOCLK /* IOBCLK is equal to IOCLK */
+
+#endif /* !__ASSEMBLY__ */
+
+#define MN10300_WDCLK 27000000UL
+
+#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
new file mode 100644
index 000000000000..7cf12054db65
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
@@ -0,0 +1,52 @@
+/* ASB2364 FPGA registers
+ */
+
+#ifndef _ASM_UNIT_FPGA_REGS_H
+#define _ASM_UNIT_FPGA_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+#define ASB2364_FPGA_REG_RESET_LAN __SYSREG(0xa9001300, u16)
+#define ASB2364_FPGA_REG_RESET_UART __SYSREG(0xa9001304, u16)
+#define ASB2364_FPGA_REG_RESET_I2C __SYSREG(0xa9001308, u16)
+#define ASB2364_FPGA_REG_RESET_USB __SYSREG(0xa900130c, u16)
+#define ASB2364_FPGA_REG_RESET_AV __SYSREG(0xa9001310, u16)
+
+#define ASB2364_FPGA_REG_IRQ(X) __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_IRQ_LAN ASB2364_FPGA_REG_IRQ(0)
+#define ASB2364_FPGA_REG_IRQ_UART ASB2364_FPGA_REG_IRQ(1)
+#define ASB2364_FPGA_REG_IRQ_I2C ASB2364_FPGA_REG_IRQ(2)
+#define ASB2364_FPGA_REG_IRQ_USB ASB2364_FPGA_REG_IRQ(3)
+#define ASB2364_FPGA_REG_IRQ_FPGA ASB2364_FPGA_REG_IRQ(5)
+
+#define ASB2364_FPGA_REG_MASK(X) __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_MASK_LAN ASB2364_FPGA_REG_MASK(0)
+#define ASB2364_FPGA_REG_MASK_UART ASB2364_FPGA_REG_MASK(1)
+#define ASB2364_FPGA_REG_MASK_I2C ASB2364_FPGA_REG_MASK(2)
+#define ASB2364_FPGA_REG_MASK_USB ASB2364_FPGA_REG_MASK(3)
+#define ASB2364_FPGA_REG_MASK_FPGA ASB2364_FPGA_REG_MASK(5)
+
+#define ASB2364_FPGA_REG_CPLD5_SET1 __SYSREG(0xa9002500, u16)
+#define ASB2364_FPGA_REG_CPLD5_SET2 __SYSREG(0xa9002504, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET1 __SYSREG(0xa9002600, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET2 __SYSREG(0xa9002604, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET1 __SYSREG(0xa9002700, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET2 __SYSREG(0xa9002704, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET1 __SYSREG(0xa9002800, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET2 __SYSREG(0xa9002804, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET1 __SYSREG(0xa9002900, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET2 __SYSREG(0xa9002904, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET1 __SYSREG(0xa9002a00, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET2 __SYSREG(0xa9002a04, u16)
+
+#define SyncExBus() \
+ do { \
+ unsigned short w; \
+ w = *(volatile short *)0xa9000000; \
+ } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_UNIT_FPGA_REGS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/irq.h b/arch/mn10300/unit-asb2364/include/unit/irq.h
new file mode 100644
index 000000000000..786148e46565
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/irq.h
@@ -0,0 +1,35 @@
+/* ASB2364 FPGA irq numbers
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _UNIT_IRQ_H
+#define _UNIT_IRQ_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#define NR_CPU_IRQS GxICR_NUM_EXT_IRQS
+#else
+#define NR_CPU_IRQS GxICR_NUM_IRQS
+#endif
+
+enum {
+ FPGA_LAN_IRQ = NR_CPU_IRQS,
+ FPGA_UART_IRQ,
+ FPGA_I2C_IRQ,
+ FPGA_USB_IRQ,
+ FPGA_RESERVED_IRQ,
+ FPGA_FPGA_IRQ,
+ NR_IRQS
+};
+
+extern void __init irq_fpga_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _UNIT_IRQ_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/leds.h b/arch/mn10300/unit-asb2364/include/unit/leds.h
new file mode 100644
index 000000000000..03a3933ad323
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/leds.h
@@ -0,0 +1,54 @@
+/* Unit-specific leds
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_LEDS_H
+#define _ASM_UNIT_LEDS_H
+
+#include <asm/pio-regs.h>
+#include <asm/cpu-regs.h>
+#include <asm/exceptions.h>
+
+#define MN10300_USE_7SEGLEDS 0
+
+#define ASB2364_7SEGLEDS __SYSREG(0xA9001630, u32)
+
+/*
+ * use the 7-segment LEDs to indicate states
+ */
+
+#if MN10300_USE_7SEGLEDS
+/* flip the 7-segment LEDs between "Gdb-" and "----" */
+#define mn10300_set_gdbleds(ONOFF) \
+ do { \
+ ASB2364_7SEGLEDS = (ONOFF) ? 0x8543077f : 0x7f7f7f7f; \
+ } while (0)
+#else
+#define mn10300_set_gdbleds(ONOFF) do {} while (0)
+#endif
+
+#if MN10300_USE_7SEGLEDS
+/* indicate double-fault by displaying "db-f" on the LEDs */
+#define mn10300_set_dbfleds \
+ mov 0x43077f1d,d0 ; \
+ mov d0,(ASB2364_7SEGLEDS)
+#else
+#define mn10300_set_dbfleds
+#endif
+
+#ifndef __ASSEMBLY__
+extern void peripheral_leds_display_exception(enum exception_code);
+extern void peripheral_leds_led_chase(void);
+extern void peripheral_leds7x4_display_dec(unsigned int, unsigned int);
+extern void peripheral_leds7x4_display_hex(unsigned int, unsigned int);
+extern void debug_to_serial(const char *, int);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_LEDS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/serial.h b/arch/mn10300/unit-asb2364/include/unit/serial.h
new file mode 100644
index 000000000000..7f048bbfdfd7
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/serial.h
@@ -0,0 +1,151 @@
+/* Unit-specific 8250 serial ports
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_SERIAL_H
+#define _ASM_UNIT_SERIAL_H
+
+#include <asm/cpu-regs.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+#include <linux/serial_reg.h>
+
+#define SERIAL_PORT0_BASE_ADDRESS 0xA8200000
+
+#define SERIAL_IRQ XIRQ1 /* single serial (TL16C550C) (Lo) */
+
+/*
+ * The ASB2364 has an 12.288 MHz clock
+ * for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD (12288000 / 16)
+
+/*
+ * dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
+ */
+#ifndef CONFIG_GDBSTUB_ON_TTYSx
+
+#define SERIAL_PORT_DFNS \
+ { \
+ .baud_base = BASE_BAUD, \
+ .irq = SERIAL_IRQ, \
+ .flags = STD_COM_FLAGS, \
+ .iomem_base = (u8 *) SERIAL_PORT0_BASE_ADDRESS, \
+ .iomem_reg_shift = 1, \
+ .io_type = SERIAL_IO_MEM, \
+ },
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#else /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_PORT_DFNS /* stolen by gdb-stub */
+
+#if defined(CONFIG_GDBSTUB_ON_TTYS0)
+#define GDBPORT_SERIAL_RX __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_RX * 4, u8)
+#define GDBPORT_SERIAL_TX __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_TX * 4, u8)
+#define GDBPORT_SERIAL_DLL __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLL * 4, u8)
+#define GDBPORT_SERIAL_DLM __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLM * 4, u8)
+#define GDBPORT_SERIAL_IER __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IER * 4, u8)
+#define GDBPORT_SERIAL_IIR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IIR * 4, u8)
+#define GDBPORT_SERIAL_FCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_FCR * 4, u8)
+#define GDBPORT_SERIAL_LCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LCR * 4, u8)
+#define GDBPORT_SERIAL_MCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MCR * 4, u8)
+#define GDBPORT_SERIAL_LSR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LSR * 4, u8)
+#define GDBPORT_SERIAL_MSR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MSR * 4, u8)
+#define GDBPORT_SERIAL_SCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_SCR * 4, u8)
+#define GDBPORT_SERIAL_IRQ SERIAL_IRQ
+
+#elif defined(CONFIG_GDBSTUB_ON_TTYS1)
+#error The ASB2364 does not have a /dev/ttyS1
+#endif
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+ char ch;
+
+#define LSR_WAIT_FOR(STATE) \
+ do {} while (!(GDBPORT_SERIAL_LSR & UART_LSR_##STATE))
+#define FLOWCTL_QUERY(LINE) \
+ ({ GDBPORT_SERIAL_MSR & UART_MSR_##LINE; })
+#define FLOWCTL_WAIT_FOR(LINE) \
+ do {} while (!(GDBPORT_SERIAL_MSR & UART_MSR_##LINE))
+#define FLOWCTL_CLEAR(LINE) \
+ do { GDBPORT_SERIAL_MCR &= ~UART_MCR_##LINE; } while (0)
+#define FLOWCTL_SET(LINE) \
+ do { GDBPORT_SERIAL_MCR |= UART_MCR_##LINE; } while (0)
+
+ FLOWCTL_SET(DTR);
+
+ for (; n > 0; n--) {
+ LSR_WAIT_FOR(THRE);
+ FLOWCTL_WAIT_FOR(CTS);
+
+ ch = *p++;
+ if (ch == 0x0a) {
+ GDBPORT_SERIAL_TX = 0x0d;
+ LSR_WAIT_FOR(THRE);
+ FLOWCTL_WAIT_FOR(CTS);
+ }
+ GDBPORT_SERIAL_TX = ch;
+ }
+
+ FLOWCTL_CLEAR(DTR);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_INITIALIZE \
+do { \
+ /* release reset */ \
+ ASB2364_FPGA_REG_RESET_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_CHECK_INTERRUPT \
+do { \
+ if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) == 0x0001) { \
+ return IRQ_NONE; \
+ } \
+} while (0)
+
+#define SERIAL_CLEAR_INTERRUPT \
+do { \
+ ASB2364_FPGA_REG_IRQ_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_SET_INT_MASK \
+do { \
+ ASB2364_FPGA_REG_MASK_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_CLEAR_INT_MASK \
+do { \
+ ASB2364_FPGA_REG_MASK_UART = 0x0000; \
+ SyncExBus(); \
+} while (0)
+
+#endif /* _ASM_UNIT_SERIAL_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/smsc911x.h b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
new file mode 100644
index 000000000000..4c1ede535fa9
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
@@ -0,0 +1,171 @@
+/* Support for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_SMSC911X_H
+#define _ASM_UNIT_SMSC911X_H
+
+#include <linux/netdevice.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+
+#define MN10300_USE_EXT_EEPROM
+
+
+#define SMSC911X_BASE 0xA8000000UL
+#define SMSC911X_BASE_END 0xA8000100UL
+#define SMSC911X_IRQ FPGA_LAN_IRQ
+
+/*
+ * Allow the FPGA to be initialised by the SMSC911x driver
+ */
+#undef SMSC_INITIALIZE
+#define SMSC_INITIALIZE() \
+do { \
+ /* release reset */ \
+ ASB2364_FPGA_REG_RESET_LAN = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#ifdef MN10300_USE_EXT_EEPROM
+#include <linux/delay.h>
+#include <unit/clock.h>
+
+#define EEPROM_ADDRESS 0xA0
+#define MAC_OFFSET 0x0008
+#define USE_IIC_CH 0 /* 0 or 1 */
+#define IIC_OFFSET (0x80000 * USE_IIC_CH)
+#define IIC_DTRM __SYSREG(0xd8400000 + IIC_OFFSET, u32)
+#define IIC_DREC __SYSREG(0xd8400004 + IIC_OFFSET, u32)
+#define IIC_MYADD __SYSREG(0xd8400008 + IIC_OFFSET, u32)
+#define IIC_CLK __SYSREG(0xd840000c + IIC_OFFSET, u32)
+#define IIC_BRST __SYSREG(0xd8400010 + IIC_OFFSET, u32)
+#define IIC_HOLD __SYSREG(0xd8400014 + IIC_OFFSET, u32)
+#define IIC_BSTS __SYSREG(0xd8400018 + IIC_OFFSET, u32)
+#define IIC_ICR __SYSREG(0xd4000080 + 4 * USE_IIC_CH, u16)
+
+#define IIC_CLK_PLS ((unsigned short)(MN10300_IOCLK / 100000 - 1))
+#define IIC_CLK_LOW ((unsigned short)(IIC_CLK_PLS / 2))
+
+#define SYS_IIC_DTRM_Bit_STA ((unsigned short)0x0400)
+#define SYS_IIC_DTRM_Bit_STO ((unsigned short)0x0200)
+#define SYS_IIC_DTRM_Bit_ACK ((unsigned short)0x0100)
+#define SYS_IIC_DTRM_Bit_DATA ((unsigned short)0x00FF)
+
+static inline void POLL_INT_REQ(volatile u16 *icr)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ while (!(*icr & GxICR_REQUEST))
+ ;
+ flags = arch_local_cli_save();
+ tmp = *icr;
+ *icr = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = *icr;
+ arch_local_irq_restore(flags);
+}
+
+/*
+ * Implement the SMSC911x hook for MAC address retrieval
+ */
+#undef smsc_get_mac
+static inline int smsc_get_mac(struct net_device *dev)
+{
+ unsigned char *mac_buf = dev->dev_addr;
+ int i;
+ unsigned short value;
+ unsigned int data;
+ int mac_length = 6;
+ int check;
+ u16 orig_gicr, tmp;
+ unsigned long flags;
+
+ /* save original GnICR and clear GnICR.IE */
+ flags = arch_local_cli_save();
+ orig_gicr = IIC_ICR;
+ IIC_ICR = orig_gicr & GxICR_LEVEL;
+ tmp = IIC_ICR;
+ arch_local_irq_restore(flags);
+
+ IIC_MYADD = 0x00000008;
+ IIC_CLK = (IIC_CLK_LOW << 16) + (IIC_CLK_PLS);
+ /* bus hung recovery */
+
+ while (1) {
+ check = 0;
+ for (i = 0; i < 3; i++) {
+ if ((IIC_BSTS & 0x00000003) == 0x00000003)
+ check++;
+ udelay(3);
+ }
+
+ if (check == 3) {
+ IIC_BRST = 0x00000003;
+ break;
+ } else {
+ for (i = 0; i < 3; i++) {
+ IIC_BRST = 0x00000002;
+ udelay(8);
+ IIC_BRST = 0x00000003;
+ udelay(8);
+ }
+ }
+ }
+
+ IIC_BRST = 0x00000002;
+ IIC_BRST = 0x00000003;
+
+ value = SYS_IIC_DTRM_Bit_STA | SYS_IIC_DTRM_Bit_ACK;
+ value |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+ (unsigned short)0x0000);
+ IIC_DTRM = value;
+ POLL_INT_REQ(&IIC_ICR);
+
+ /** send offset of MAC address in EEPROM **/
+ IIC_DTRM = (unsigned char)((MAC_OFFSET & 0xFF00) >> 8);
+ POLL_INT_REQ(&IIC_ICR);
+
+ IIC_DTRM = (unsigned char)(MAC_OFFSET & 0x00FF);
+ POLL_INT_REQ(&IIC_ICR);
+
+ udelay(1000);
+
+ value = SYS_IIC_DTRM_Bit_STA;
+ value |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+ (unsigned short)0x0001);
+ IIC_DTRM = value;
+ POLL_INT_REQ(&IIC_ICR);
+
+ IIC_DTRM = 0x00000000;
+ while (mac_length > 0) {
+ POLL_INT_REQ(&IIC_ICR);
+
+ data = IIC_DREC;
+ mac_length--;
+ if (mac_length == 0)
+ value = 0x00000300; /* stop IIC bus */
+ else if (mac_length == 1)
+ value = 0x00000100; /* no ack */
+ else
+ value = 0x00000000; /* ack */
+ IIC_DTRM = value;
+ *mac_buf++ = (unsigned char)(data & 0xff);
+ }
+
+ /* restore GnICR.LV and GnICR.IE */
+ flags = arch_local_cli_save();
+ IIC_ICR = (orig_gicr & (GxICR_LEVEL | GxICR_ENABLE));
+ tmp = IIC_ICR;
+ arch_local_irq_restore(flags);
+
+ return 0;
+}
+#endif /* MN10300_USE_EXT_EEPROM */
+#endif /* _ASM_UNIT_SMSC911X_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h
new file mode 100644
index 000000000000..ddb7ed010706
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/timex.h
@@ -0,0 +1,159 @@
+/* timex.h: MN2WS0038 architecture timer specifications
+ *
+ * Copyright (C) 2002, 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_TIMEX_H
+#define _ASM_UNIT_TIMEX_H
+
+#ifndef __ASSEMBLY__
+#include <linux/irq.h>
+#endif /* __ASSEMBLY__ */
+
+#include <asm/timer-regs.h>
+#include <unit/clock.h>
+#include <asm/param.h>
+
+/*
+ * jiffies counter specifications
+ */
+
+#define TMJCBR_MAX 0xffffff /* 24bit */
+#define TMJCIRQ TMTIRQ
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_SRC_IOBCLK MN10300_IOBCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+
+#define MN10300_JCCLK (MN10300_SRC_IOBCLK)
+#define MN10300_TSCCLK (MN10300_SRC_IOBCLK)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+/* Check bit width of MTM interval value that sets base register */
+#if (MN10300_JC_PER_HZ - 1) > TMJCBR_MAX
+# error MTM tick timer interval value is overflow.
+#endif
+
+static inline void stop_jiffies_counter(void)
+{
+ u16 tmp;
+ TMTMD = 0;
+ tmp = TMTMD;
+}
+
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
+
+ TMTBR = cnt;
+ tmp = TMTBR;
+
+ TMTMD = TMTMD_TMTLDE;
+ TMTMD = TMTMD_TMTCNE;
+ tmp = TMTMD;
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS) && \
+ !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/*
+ * If we aren't using broadcasting, each core needs its own event timer.
+ * Since CPU0 uses the tick timer which is 24-bits, we use timer 4 & 5
+ * cascaded to 32-bits for CPU1 (but only really use 24-bits to match
+ * CPU0).
+ */
+
+#define TMJC1IRQ TM5IRQ
+
+static inline void stop_jiffies_counter1(void)
+{
+ u8 tmp;
+ TM4MD = 0;
+ TM5MD = 0;
+ tmp = TM4MD;
+ tmp = TM5MD;
+}
+
+static inline void reload_jiffies_counter1(u32 cnt)
+{
+ u32 tmp;
+
+ TM45BR = cnt;
+ tmp = TM45BR;
+
+ TM4MD = TM4MD_INIT_COUNTER;
+ tmp = TM4MD;
+
+ TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_INIT_COUNTER;
+ TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_COUNT_ENABLE;
+ tmp = TM5MD;
+
+ TM4MD = TM4MD_COUNT_ENABLE;
+ tmp = TM4MD;
+}
+#endif /* CONFIG_SMP&GENERIC_CLOCKEVENTS&!GENERIC_CLOCKEVENTS_BROADCAST */
+
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * timestamp counter specifications
+ */
+#define TMTSCBR_MAX 0xffffffff
+
+#ifndef __ASSEMBLY__
+
+/* Use 32-bit timestamp counter */
+#define TMTSCMD TMSMD
+#define TMTSCBR TMSBR
+#define TMTSCBC TMSBC
+#define TMTSCICR TMSICR
+
+static inline void startup_timestamp_counter(void)
+{
+ u32 sync;
+
+ /* set up TMS(Timestamp) 32bit timer register to count real time
+ * - count down from 4Gig-1 to 0 and wrap at IOBCLK rate
+ */
+
+ TMTSCBR = TMTSCBR_MAX;
+ sync = TMTSCBR;
+
+ TMTSCICR = 0;
+ sync = TMTSCICR;
+
+ TMTSCMD = TMTMD_TMTLDE;
+ TMTSCMD = TMTMD_TMTCNE;
+ sync = TMTSCMD;
+}
+
+static inline void shutdown_timestamp_counter(void)
+{
+ TMTSCMD = 0;
+}
+
+/*
+ * we use a cascaded pair of 16-bit down-counting timers to count I/O
+ * clock cycles for the purposes of time keeping
+ */
+typedef unsigned long cycles_t;
+
+static inline cycles_t read_timestamp_counter(void)
+{
+ return (cycles_t)~TMTSCBC;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_TIMEX_H */
diff --git a/arch/mn10300/unit-asb2364/irq-fpga.c b/arch/mn10300/unit-asb2364/irq-fpga.c
new file mode 100644
index 000000000000..fcf29754e4d1
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/irq-fpga.c
@@ -0,0 +1,96 @@
+/* ASB2364 FPGA interrupt multiplexing
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * FPGA PIC operations
+ */
+static void asb2364_fpga_mask(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_ack(unsigned int irq)
+{
+ ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_mask_ack(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+ ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_unmask(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0000;
+ SyncExBus();
+}
+
+static struct irq_chip asb2364_fpga_pic = {
+ .name = "fpga",
+ .ack = asb2364_fpga_ack,
+ .mask = asb2364_fpga_mask,
+ .mask_ack = asb2364_fpga_mask_ack,
+ .unmask = asb2364_fpga_unmask,
+};
+
+/*
+ * FPGA PIC interrupt handler
+ */
+static irqreturn_t fpga_interrupt(int irq, void *_mask)
+{
+ if ((ASB2364_FPGA_REG_IRQ_LAN & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_LAN_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_UART_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_I2C & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_I2C_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_USB & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_USB_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_FPGA & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_FPGA_IRQ);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define an interrupt action for each FPGA PIC output
+ */
+static struct irqaction fpga_irq[] = {
+ [0] = {
+ .handler = fpga_interrupt,
+ .flags = IRQF_DISABLED | IRQF_SHARED,
+ .name = "fpga",
+ },
+};
+
+/*
+ * Initialise the FPGA's PIC
+ */
+void __init irq_fpga_init(void)
+{
+ int irq;
+
+ for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++)
+ set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq);
+
+ /* the FPGA drives the XIRQ1 input on the CPU PIC */
+ setup_irq(XIRQ1, &fpga_irq[0]);
+}
diff --git a/arch/mn10300/unit-asb2364/leds.c b/arch/mn10300/unit-asb2364/leds.c
new file mode 100644
index 000000000000..1ff830c372b3
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/leds.c
@@ -0,0 +1,98 @@
+/* leds.c: ASB2364 peripheral 7seg LEDs x4 support
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/intctl-regs.h>
+#include <asm/rtc-regs.h>
+#include <unit/leds.h>
+
+#if MN10300_USE_7SEGLEDS
+static const u8 asb2364_led_hex_tbl[16] = {
+ 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0,
+ 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c
+};
+
+static const u32 asb2364_led_chase_tbl[6] = {
+ ~0x02020202, /* top - segA */
+ ~0x04040404, /* right top - segB */
+ ~0x08080808, /* right bottom - segC */
+ ~0x10101010, /* bottom - segD */
+ ~0x20202020, /* left bottom - segE */
+ ~0x40404040, /* left top - segF */
+};
+
+static unsigned asb2364_led_chase;
+
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(val/1000) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/100) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/10) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[val % 10];
+ leds |= points^0x01010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(val/1000) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/100) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/10) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[val % 10];
+ leds |= points^0x01010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+/* display triple horizontal bar and exception code */
+void peripheral_leds_display_exception(enum exception_code code)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(code/0x100) % 0x10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(code/0x10) % 0x10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[code % 0x10];
+ leds |= 0x6d010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds_led_chase(void)
+{
+ ASB2364_7SEGLEDS = asb2364_led_chase_tbl[asb2364_led_chase];
+ asb2364_led_chase++;
+ if (asb2364_led_chase >= 6)
+ asb2364_led_chase = 0;
+}
+#else /* MN10300_USE_7SEGLEDS */
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { }
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { }
+void peripheral_leds_display_exception(enum exception_code code) { }
+void peripheral_leds_led_chase(void) { }
+#endif /* MN10300_USE_7SEGLEDS */
diff --git a/arch/mn10300/unit-asb2364/smsc911x.c b/arch/mn10300/unit-asb2364/smsc911x.c
new file mode 100644
index 000000000000..544a73e94c81
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/smsc911x.c
@@ -0,0 +1,58 @@
+/* Specification for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/smsc911x.h>
+#include <unit/smsc911x.h>
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .flags = SMSC911X_USE_32BIT,
+};
+
+static struct resource smsc911x_resources[] = {
+ [0] = {
+ .start = SMSC911X_BASE,
+ .end = SMSC911X_BASE_END,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = SMSC911X_IRQ,
+ .end = SMSC911X_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smsc911x_device = {
+ .name = "smsc911x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smsc911x_resources),
+ .resource = smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc911x_config,
+ }
+};
+
+/*
+ * add platform devices
+ */
+static int __init unit_device_init(void)
+{
+ platform_device_register(&smsc911x_device);
+ return 0;
+}
+
+device_initcall(unit_device_init);
diff --git a/arch/mn10300/unit-asb2364/unit-init.c b/arch/mn10300/unit-asb2364/unit-init.c
new file mode 100644
index 000000000000..11440803db10
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/unit-init.c
@@ -0,0 +1,88 @@
+/* ASB2364 initialisation
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/intctl-regs.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * initialise some of the unit hardware before gdbstub is set up
+ */
+asmlinkage void __init unit_init(void)
+{
+ /* set up the external interrupts */
+
+ /* XIRQ[0]: NAND RXBY */
+ /* SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_LOWLEVEL); */
+
+ /* XIRQ[1]: LAN, UART, I2C, USB, PCI, FPGA */
+ SET_XIRQ_TRIGGER(1, XIRQ_TRIGGER_LOWLEVEL);
+
+ /* XIRQ[2]: Extend Slot 1-9 */
+ /* SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); */
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) && \
+ defined(CONFIG_ETHERNET_IRQ_LEVEL) && \
+ (CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL)
+# error CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL
+#endif
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL)
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#elif defined(CONFIG_ETHERNET_IRQ_LEVEL)
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
+}
+
+/*
+ * initialise the rest of the unit hardware after gdbstub is ready
+ */
+asmlinkage void __init unit_setup(void)
+{
+
+}
+
+/*
+ * initialise the external interrupts used by a unit of this type
+ */
+void __init unit_init_IRQ(void)
+{
+ unsigned int extnum;
+
+ for (extnum = 0 ; extnum < NR_XIRQS ; extnum++) {
+ switch (GET_XIRQ_TRIGGER(extnum)) {
+ /* LEVEL triggered interrupts should be made
+ * post-ACK'able as they hold their lines until
+ * serviced
+ */
+ case XIRQ_TRIGGER_HILEVEL:
+ case XIRQ_TRIGGER_LOWLEVEL:
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
+ break;
+ default:
+ break;
+ }
+ }
+
+#define IRQCTL __SYSREG(0xd5000090, u32)
+ IRQCTL |= 0x02;
+
+ irq_fpga_init();
+}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 79a04a9394d5..abde955b1c21 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -19,6 +19,7 @@ config PARISC
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_HARDIRQS_NO__DO_IRQ
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
@@ -85,6 +86,9 @@ config IRQ_PER_CPU
bool
default y
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ def_bool y
+
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 039880e7d2c9..47f11c707b65 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -24,8 +24,6 @@
#ifndef __ASSEMBLY__
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index dba11aedce1b..f388a85bba11 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -126,20 +126,20 @@ static inline void *kmap(struct page *page)
#define kunmap(page) kunmap_parisc(page_address(page))
-static inline void *kmap_atomic(struct page *page, enum km_type idx)
+static inline void *__kmap_atomic(struct page *page)
{
pagefault_disable();
return page_address(page);
}
-static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
+static inline void __kunmap_atomic(void *addr)
{
kunmap_parisc(addr);
pagefault_enable();
}
-#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
-#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
+#define kmap_atomic_prot(page, prot) kmap_atomic(page)
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h
index dfa26b67f919..c67dccf2e31f 100644
--- a/arch/parisc/include/asm/irq.h
+++ b/arch/parisc/include/asm/irq.h
@@ -40,7 +40,7 @@ struct irq_chip;
void no_ack_irq(unsigned int irq);
void no_end_irq(unsigned int irq);
void cpu_ack_irq(unsigned int irq);
-void cpu_end_irq(unsigned int irq);
+void cpu_eoi_irq(unsigned int irq);
extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 01c15035e783..865f37a8a881 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -397,9 +397,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 1ce7d2851d90..3eb82c2a5ec3 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -813,8 +813,9 @@
#define __NR_perf_event_open (__NR_Linux + 318)
#define __NR_recvmmsg (__NR_Linux + 319)
#define __NR_accept4 (__NR_Linux + 320)
+#define __NR_prlimit64 (__NR_Linux + 321)
-#define __NR_Linux_syscalls (__NR_accept4 + 1)
+#define __NR_Linux_syscalls (__NR_prlimit64 + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index efbcee5d2220..5024f643b3b1 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -52,7 +52,7 @@ static volatile unsigned long cpu_eiem = 0;
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
-static void cpu_disable_irq(unsigned int irq)
+static void cpu_mask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -63,7 +63,7 @@ static void cpu_disable_irq(unsigned int irq)
* then gets disabled */
}
-static void cpu_enable_irq(unsigned int irq)
+static void cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -75,12 +75,6 @@ static void cpu_enable_irq(unsigned int irq)
smp_send_all_nop();
}
-static unsigned int cpu_startup_irq(unsigned int irq)
-{
- cpu_enable_irq(irq);
- return 0;
-}
-
void no_ack_irq(unsigned int irq) { }
void no_end_irq(unsigned int irq) { }
@@ -99,7 +93,7 @@ void cpu_ack_irq(unsigned int irq)
mtctl(mask, 23);
}
-void cpu_end_irq(unsigned int irq)
+void cpu_eoi_irq(unsigned int irq)
{
unsigned long mask = EIEM_MASK(irq);
int cpu = smp_processor_id();
@@ -146,12 +140,10 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
static struct irq_chip cpu_interrupt_type = {
.name = "CPU",
- .startup = cpu_startup_irq,
- .shutdown = cpu_disable_irq,
- .enable = cpu_enable_irq,
- .disable = cpu_disable_irq,
+ .mask = cpu_mask_irq,
+ .unmask = cpu_unmask_irq,
.ack = cpu_ack_irq,
- .end = cpu_end_irq,
+ .eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq,
#endif
@@ -247,10 +239,11 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
if (irq_desc[irq].chip != &cpu_interrupt_type)
return -EBUSY;
+ /* for iosapic interrupts */
if (type) {
- irq_desc[irq].chip = type;
- irq_desc[irq].chip_data = data;
- cpu_interrupt_type.enable(irq);
+ set_irq_chip_and_handler(irq, type, handle_level_irq);
+ set_irq_chip_data(irq, data);
+ cpu_unmask_irq(irq);
}
return 0;
}
@@ -368,7 +361,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
}
#endif
- __do_IRQ(irq);
+ generic_handle_irq(irq);
out:
irq_exit();
@@ -398,14 +391,15 @@ static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
- irq_desc[i].chip = &cpu_interrupt_type;
+ set_irq_chip_and_handler(i, &cpu_interrupt_type,
+ handle_level_irq);
}
- irq_desc[TIMER_IRQ].action = &timer_action;
- irq_desc[TIMER_IRQ].status = IRQ_PER_CPU;
+ set_irq_handler(TIMER_IRQ, handle_percpu_irq);
+ setup_irq(TIMER_IRQ, &timer_action);
#ifdef CONFIG_SMP
- irq_desc[IPI_IRQ].action = &ipi_action;
- irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
+ set_irq_handler(IPI_IRQ, handle_percpu_irq);
+ setup_irq(IPI_IRQ, &ipi_action);
#endif
}
@@ -423,3 +417,4 @@ void __init init_IRQ(void)
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}
+
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 1ff366cb9685..66d1f17fdb94 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -12,6 +12,7 @@
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -31,12 +32,11 @@
/*
* The PDC console is a simple console, which can be used for debugging
- * boot related problems on HP PA-RISC machines.
+ * boot related problems on HP PA-RISC machines. It is also useful when no
+ * other console works.
*
* This code uses the ROM (=PDC) based functions to read and write characters
* from and to PDC's boot path.
- * Since all character read from that path must be polled, this code never
- * can or will be a fully functional linux console.
*/
/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
@@ -53,6 +53,7 @@
#include <asm/pdc.h> /* for iodc_call() proto and friends */
static DEFINE_SPINLOCK(pdc_console_lock);
+static struct console pdc_cons;
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
@@ -85,12 +86,138 @@ static int pdc_console_setup(struct console *co, char *options)
#if defined(CONFIG_PDC_CONSOLE)
#include <linux/vt_kern.h>
+#include <linux/tty_flip.h>
+
+#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
+
+static struct timer_list pdc_console_timer;
+
+extern struct console * console_drivers;
+
+static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
+{
+
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+
+ return 0;
+}
+
+static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ if (!tty->count)
+ del_timer(&pdc_console_timer);
+}
+
+static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ pdc_console_write(NULL, buf, count);
+ return count;
+}
+
+static int pdc_console_tty_write_room(struct tty_struct *tty)
+{
+ return 32768; /* no limit, no buffer used */
+}
+
+static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0; /* no buffer */
+}
+
+static struct tty_driver *pdc_console_tty_driver;
+
+static const struct tty_operations pdc_console_tty_ops = {
+ .open = pdc_console_tty_open,
+ .close = pdc_console_tty_close,
+ .write = pdc_console_tty_write,
+ .write_room = pdc_console_tty_write_room,
+ .chars_in_buffer = pdc_console_tty_chars_in_buffer,
+};
+
+static void pdc_console_poll(unsigned long unused)
+{
+
+ int data, count = 0;
+
+ struct tty_struct *tty = pdc_console_tty_driver->ttys[0];
+
+ if (!tty)
+ return;
+
+ while (1) {
+ data = pdc_console_poll_key(NULL);
+ if (data == -1)
+ break;
+ tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ count ++;
+ }
+
+ if (count)
+ tty_flip_buffer_push(tty);
+
+ if (tty->count && (pdc_cons.flags & CON_ENABLED))
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+}
+
+static int __init pdc_console_tty_driver_init(void)
+{
+
+ int err;
+ struct tty_driver *drv;
+
+ /* Check if the console driver is still registered.
+ * It is unregistered if the pdc console was not selected as the
+ * primary console. */
+
+ struct console *tmp = console_drivers;
+
+ for (tmp = console_drivers; tmp; tmp = tmp->next)
+ if (tmp == &pdc_cons)
+ break;
+
+ if (!tmp) {
+ printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
+ pdc_cons.flags &= ~CON_BOOT;
+
+ drv = alloc_tty_driver(1);
+
+ if (!drv)
+ return -ENOMEM;
+
+ drv->driver_name = "pdc_cons";
+ drv->name = "ttyB";
+ drv->major = MUX_MAJOR;
+ drv->minor_start = 0;
+ drv->type = TTY_DRIVER_TYPE_SYSTEM;
+ drv->init_termios = tty_std_termios;
+ drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
+ tty_set_operations(drv, &pdc_console_tty_ops);
+
+ err = tty_register_driver(drv);
+ if (err) {
+ printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
+ return err;
+ }
+
+ pdc_console_tty_driver = drv;
+
+ /* No need to initialize the pdc_console_timer if tty isn't allocated */
+ init_timer(&pdc_console_timer);
+ pdc_console_timer.function = pdc_console_poll;
+
+ return 0;
+}
+
+module_init(pdc_console_tty_driver_init);
static struct tty_driver * pdc_console_device (struct console *c, int *index)
{
- extern struct tty_driver console_driver;
- *index = c->index ? c->index-1 : fg_console;
- return &console_driver;
+ *index = c->index;
+ return pdc_console_tty_driver;
}
#else
#define pdc_console_device NULL
@@ -101,7 +228,7 @@ static struct console pdc_cons = {
.write = pdc_console_write,
.device = pdc_console_device,
.setup = pdc_console_setup,
- .flags = CON_BOOT | CON_PRINTBUFFER | CON_ENABLED,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
.index = -1,
};
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index c4f49e45129d..2905b1f52d30 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -110,7 +110,8 @@ void user_enable_block_step(struct task_struct *task)
pa_psw(task)->l = 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
long ret = -EIO;
@@ -120,11 +121,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Read the word at location addr in the USER area. For ptraced
processes, the kernel saves all regs on a syscall. */
case PTRACE_PEEKUSR:
- if ((addr & (sizeof(long)-1)) ||
- (unsigned long) addr >= sizeof(struct pt_regs))
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
break;
tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, (unsigned long __user *) data);
break;
/* Write the word at location addr in the USER area. This will need
@@ -151,8 +152,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- if ((addr & (sizeof(long)-1)) ||
- (unsigned long) addr >= sizeof(struct pt_regs))
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
break;
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3d52c978738f..74867dfdabe5 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -419,6 +419,7 @@
ENTRY_SAME(perf_event_open)
ENTRY_COMP(recvmmsg)
ENTRY_SAME(accept4) /* 320 */
+ ENTRY_SAME(prlimit64)
/* Nothing yet */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 92d977bb5ea8..234e3682cf09 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -619,15 +619,12 @@ void handle_unaligned(struct pt_regs *regs)
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
-
-#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
-#endif
}
#endif
switch (regs->iir & OPCODE3_MASK)
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index d58eac1a8288..76ed62ed785b 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -80,8 +80,11 @@ find_unwind_entry(unsigned long addr)
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
- if (e)
+ if (e) {
+ /* Move-to-front to exploit common traces */
+ list_move(&table->list, &unwind_tables);
break;
+ }
}
return e;
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index 1f3f225897f5..0bd63b08a79a 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -3,7 +3,7 @@
#
# See arch/parisc/math-emu/README
-EXTRA_CFLAGS += -Wno-parentheses -Wno-implicit-function-declaration \
+ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \
-Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \
-Wno-implicit-int
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 8bdc6a9e5773..1cf20bdfbeca 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -124,23 +124,23 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
}
/*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
*/
extern u64 __cputime_msec_factor;
-static inline unsigned long cputime_to_msecs(const cputime_t ct)
+static inline unsigned long cputime_to_usecs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_msec_factor);
+ return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
}
-static inline cputime_t msecs_to_cputime(const unsigned long ms)
+static inline cputime_t usecs_to_cputime(const unsigned long us)
{
cputime_t ct;
unsigned long sec;
/* have to be a little careful about overflow */
- ct = ms % 1000;
- sec = ms / 1000;
+ ct = us % 1000000;
+ sec = us / 1000000;
if (ct) {
ct *= tb_ticks_per_sec;
do_div(ct, 1000);
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
deleted file mode 100644
index debc5ed96d6e..000000000000
--- a/arch/powerpc/include/asm/fsldma.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Freescale MPC83XX / MPC85XX DMA Controller
- *
- * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
-#define __ARCH_POWERPC_ASM_FSLDMA_H__
-
-#include <linux/slab.h>
-#include <linux/dmaengine.h>
-
-/*
- * Definitions for the Freescale DMA controller's DMA_SLAVE implemention
- *
- * The Freescale DMA_SLAVE implementation was designed to handle many-to-many
- * transfers. An example usage would be an accelerated copy between two
- * scatterlists. Another example use would be an accelerated copy from
- * multiple non-contiguous device buffers into a single scatterlist.
- *
- * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
- * structure contains a list of hardware addresses that should be copied
- * to/from the scatterlist passed into device_prep_slave_sg(). The structure
- * also has some fields to enable hardware-specific features.
- */
-
-/**
- * struct fsl_dma_hw_addr
- * @entry: linked list entry
- * @address: the hardware address
- * @length: length to transfer
- *
- * Holds a single physical hardware address / length pair for use
- * with the DMAEngine DMA_SLAVE API.
- */
-struct fsl_dma_hw_addr {
- struct list_head entry;
-
- dma_addr_t address;
- size_t length;
-};
-
-/**
- * struct fsl_dma_slave
- * @addresses: a linked list of struct fsl_dma_hw_addr structures
- * @request_count: value for DMA request count
- * @src_loop_size: setup and enable constant source-address DMA transfers
- * @dst_loop_size: setup and enable constant destination address DMA transfers
- * @external_start: enable externally started DMA transfers
- * @external_pause: enable externally paused DMA transfers
- *
- * Holds a list of address / length pairs for use with the DMAEngine
- * DMA_SLAVE API implementation for the Freescale DMA controller.
- */
-struct fsl_dma_slave {
-
- /* List of hardware address/length pairs */
- struct list_head addresses;
-
- /* Support for extra controller features */
- unsigned int request_count;
- unsigned int src_loop_size;
- unsigned int dst_loop_size;
- bool external_start;
- bool external_pause;
-};
-
-/**
- * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
- * @slave: the &struct fsl_dma_slave to add to
- * @address: the hardware address to add
- * @length: the length of bytes to transfer from @address
- *
- * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
- * success, -ERRNO otherwise.
- */
-static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
- dma_addr_t address, size_t length)
-{
- struct fsl_dma_hw_addr *addr;
-
- addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
- if (!addr)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&addr->entry);
- addr->address = address;
- addr->length = length;
-
- list_add_tail(&addr->entry, &slave->addresses);
- return 0;
-}
-
-/**
- * fsl_dma_slave_free - free a struct fsl_dma_slave
- * @slave: the struct fsl_dma_slave to free
- *
- * Free a struct fsl_dma_slave and all associated address/length pairs
- */
-static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
-{
- struct fsl_dma_hw_addr *addr, *tmp;
-
- if (slave) {
- list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
- list_del(&addr->entry);
- kfree(addr);
- }
-
- kfree(slave);
- }
-}
-
-/**
- * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
- * @gfp: the flags to pass to kmalloc when allocating this structure
- *
- * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
- * struct fsl_dma_slave on success, or NULL on failure.
- */
-static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
-{
- struct fsl_dma_slave *slave;
-
- slave = kzalloc(sizeof(*slave), gfp);
- if (!slave)
- return NULL;
-
- INIT_LIST_HEAD(&slave->addresses);
- return slave;
-}
-
-#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index d10d64a4be38..dbc264010d0b 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
-extern void *kmap_atomic_prot(struct page *page, enum km_type type,
- pgprot_t prot);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+extern void __kunmap_atomic(void *kvaddr);
static inline void *kmap(struct page *page)
{
@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *__kmap_atomic(struct page *page)
{
- return kmap_atomic_prot(page, type, kmap_prot);
+ return kmap_atomic_prot(page, kmap_prot);
}
static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index a7db96f2b5c3..47edde8c3556 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
-#define pte_offset_map_nested(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
-
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+ ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte) kunmap_atomic(pte)
/*
* Encode and decode a swap entry.
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 49865045d56f..2b09cd522d33 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -193,9 +193,7 @@
(((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_unmap(pte) do { } while(0)
-#define pte_unmap_nested(pte) do { } while(0)
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 286d9783d93f..a9b32967cff6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1406,37 +1406,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
-static long arch_ptrace_old(struct task_struct *child, long request, long addr,
- long data)
+static long arch_ptrace_old(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
+ void __user *datavp = (void __user *) data;
+
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
- (void __user *) data);
+ datavp);
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
- (const void __user *) data);
+ datavp);
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
- (void __user *) data);
+ datavp);
case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
- (const void __user *) data);
+ datavp);
}
return -EPERM;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EPERM;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -1446,11 +1451,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
- index = (unsigned long) addr >> 2;
+ index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
- index = (unsigned long) addr >> 3;
+ index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
@@ -1463,7 +1468,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = ((unsigned long *)child->thread.fpr)
[TS_FPRWIDTH * (index - PT_FPR0)];
}
- ret = put_user(tmp,(unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -1474,11 +1479,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
- index = (unsigned long) addr >> 2;
+ index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
- index = (unsigned long) addr >> 3;
+ index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
@@ -1525,11 +1530,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
dbginfo.features = 0;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
- if (!access_ok(VERIFY_WRITE, data,
+ if (!access_ok(VERIFY_WRITE, datavp,
sizeof(struct ppc_debug_info)))
return -EFAULT;
- ret = __copy_to_user((struct ppc_debug_info __user *)data,
- &dbginfo, sizeof(struct ppc_debug_info)) ?
+ ret = __copy_to_user(datavp, &dbginfo,
+ sizeof(struct ppc_debug_info)) ?
-EFAULT : 0;
break;
}
@@ -1537,11 +1542,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
- if (!access_ok(VERIFY_READ, data,
+ if (!access_ok(VERIFY_READ, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
- ret = __copy_from_user(&bp_info,
- (struct ppc_hw_breakpoint __user *)data,
+ ret = __copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)) ?
-EFAULT : 0;
if (!ret)
@@ -1560,11 +1564,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- ret = put_user(child->thread.dac1,
- (unsigned long __user *)data);
+ ret = put_user(child->thread.dac1, datalp);
#else
- ret = put_user(child->thread.dabr,
- (unsigned long __user *)data);
+ ret = put_user(child->thread.dabr, datalp);
#endif
break;
}
@@ -1580,7 +1582,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
- (void __user *) data);
+ datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
@@ -1589,19 +1591,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
- (const void __user *) data);
+ datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
- (void __user *) data);
+ datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
- (const void __user *) data);
+ datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
@@ -1609,40 +1611,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
- (void __user *) data);
+ datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
- (const void __user *) data);
+ datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
- (void __user *) data);
+ datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
- (const void __user *) data);
+ datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
- (void __user *) data);
+ datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
- (const void __user *) data);
+ datavp);
#endif
/* Old reverse args ptrace callss */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index d692989a4318..441d2a722f06 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
* memory in this pool does not change.
*/
if (spare_needed && reserve_freed) {
- tmp = min(spare_needed, min(reserve_freed,
- (viodev->cmo.entitled -
- VIO_CMO_MIN_ENT)));
+ tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
vio_cmo.spare += tmp;
viodev->cmo.entitled -= tmp;
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index 857d4173f9c6..e7450bdbe83a 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -29,17 +29,17 @@
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- unsigned int idx;
unsigned long vaddr;
+ int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -52,26 +52,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+ int type;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
return;
}
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ type = kmap_atomic_idx();
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ {
+ unsigned int idx;
+
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_page(NULL, vaddr);
+ }
#endif
+
+ kmap_atomic_idx_pop();
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 412763672d23..9725369d432a 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -50,6 +50,7 @@
#define RIO_ATMU_REGS_OFFSET 0x10c00
#define RIO_P_MSG_REGS_OFFSET 0x11000
#define RIO_S_MSG_REGS_OFFSET 0x13000
+#define RIO_GCCSR 0x13c
#define RIO_ESCSR 0x158
#define RIO_CCSR 0x15c
#define RIO_LTLEDCSR 0x0608
@@ -87,6 +88,9 @@
#define RIO_IPWSR_PWD 0x00000008
#define RIO_IPWSR_PWB 0x00000004
+#define RIO_EPWISR_PINT 0x80000000
+#define RIO_EPWISR_PW 0x00000001
+
#define RIO_MSG_DESC_SIZE 32
#define RIO_MSG_BUFFER_SIZE 4096
#define RIO_MIN_TX_RING_SIZE 2
@@ -1082,18 +1086,12 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
struct rio_priv *priv = port->priv;
u32 epwisr, tmp;
- ipwmr = in_be32(&priv->msg_regs->pwmr);
- ipwsr = in_be32(&priv->msg_regs->pwsr);
-
epwisr = in_be32(priv->regs_win + RIO_EPWISR);
- if (epwisr & 0x80000000) {
- tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
- pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
- out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
- }
+ if (!(epwisr & RIO_EPWISR_PW))
+ goto pw_done;
- if (!(epwisr & 0x00000001))
- return IRQ_HANDLED;
+ ipwmr = in_be32(&priv->msg_regs->pwmr);
+ ipwsr = in_be32(&priv->msg_regs->pwsr);
#ifdef DEBUG_PW
pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
@@ -1109,20 +1107,6 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
pr_debug(" PWB");
pr_debug(" )\n");
#endif
- out_be32(&priv->msg_regs->pwsr,
- ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
-
- if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
- priv->port_write_msg.err_count++;
- pr_info("RIO: Port-Write Transaction Err (%d)\n",
- priv->port_write_msg.err_count);
- }
- if (ipwsr & RIO_IPWSR_PWD) {
- priv->port_write_msg.discard_count++;
- pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
- priv->port_write_msg.discard_count);
- }
-
/* Schedule deferred processing if PW was received */
if (ipwsr & RIO_IPWSR_QFI) {
/* Save PW message (if there is room in FIFO),
@@ -1134,16 +1118,43 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
RIO_PW_MSG_SIZE);
} else {
priv->port_write_msg.discard_count++;
- pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+ pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
priv->port_write_msg.discard_count);
}
+ /* Clear interrupt and issue Clear Queue command. This allows
+ * another port-write to be received.
+ */
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
+ out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
schedule_work(&priv->pw_work);
}
- /* Issue Clear Queue command. This allows another
- * port-write to be received.
- */
- out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+ if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+ priv->port_write_msg.err_count++;
+ pr_debug("RIO: Port-Write Transaction Err (%d)\n",
+ priv->port_write_msg.err_count);
+ /* Clear Transaction Error: port-write controller should be
+ * disabled when clearing this error
+ */
+ out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
+ out_be32(&priv->msg_regs->pwmr, ipwmr);
+ }
+
+ if (ipwsr & RIO_IPWSR_PWD) {
+ priv->port_write_msg.discard_count++;
+ pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+ priv->port_write_msg.discard_count);
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
+ }
+
+pw_done:
+ if (epwisr & RIO_EPWISR_PINT) {
+ tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+ }
return IRQ_HANDLED;
}
@@ -1461,6 +1472,7 @@ int fsl_rio_setup(struct platform_device *dev)
port->host_deviceid = fsl_rio_get_hdid(port->id);
port->priv = priv;
+ port->phys_efptr = 0x100;
rio_register_mport(port);
priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
@@ -1508,6 +1520,12 @@ int fsl_rio_setup(struct platform_device *dev)
dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
port->sys_size ? 65536 : 256);
+ if (port->host_deviceid >= 0)
+ out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
+ RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
+ else
+ out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
+
priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
+ RIO_ATMU_REGS_OFFSET);
priv->maint_atmu_regs = priv->atmu_regs + 1;
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 8b1a52a137c5..40e2ab0fa3f0 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -73,18 +73,18 @@ cputime64_to_jiffies64(cputime64_t cputime)
}
/*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
*/
static inline unsigned int
-cputime_to_msecs(const cputime_t cputime)
+cputime_to_usecs(const cputime_t cputime)
{
- return cputime_div(cputime, 4096000);
+ return cputime_div(cputime, 4096);
}
static inline cputime_t
-msecs_to_cputime(const unsigned int m)
+usecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 4096000;
+ return (cputime_t) m * 4096;
}
/*
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 986dc9476c21..02ace3491c51 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1094,9 +1094,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/*
* 31 bit swap entry format:
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 83339d33c4b1..019bb714db49 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -343,7 +343,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
return __poke_user(child, addr, data);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
ptrace_area parea;
int copied, ret;
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index ccf38f06c57d..2fd469807683 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -88,10 +88,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
/*
* Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 174c6422b096..55836188b217 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -325,7 +325,8 @@ void ptrace_disable(struct task_struct *child)
}
long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (void __user *)data;
@@ -335,14 +336,14 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = copy_regset_to_user(child, &user_score_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)datap);
+ datap);
break;
case PTRACE_SETREGS:
ret = copy_regset_from_user(child, &user_score_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)datap);
+ datap);
break;
default:
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 3da116f47f01..881a3a5f5647 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -481,7 +481,6 @@ static struct soc_camera_link ov7725_link = {
.power = ov7725_power,
.board_info = &ap325rxa_i2c_camera[0],
.i2c_adapter_id = 0,
- .module_name = "ov772x",
.priv = &ov7725_info,
};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 71a3368ab1fc..ddc7e4e4d2a0 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -620,7 +620,6 @@ static struct soc_camera_link tw9910_link = {
.bus_id = 1,
.power = tw9910_power,
.board_info = &i2c_camera[0],
- .module_name = "tw9910",
.priv = &tw9910_info,
};
@@ -644,7 +643,6 @@ static struct soc_camera_link mt9t112_link1 = {
.power = mt9t112_power1,
.bus_id = 0,
.board_info = &i2c_camera[1],
- .module_name = "mt9t112",
.priv = &mt9t112_info1,
};
@@ -667,7 +665,6 @@ static struct soc_camera_link mt9t112_link2 = {
.power = mt9t112_power2,
.bus_id = 1,
.board_info = &i2c_camera[2],
- .module_name = "mt9t112",
.priv = &mt9t112_info2,
};
@@ -793,7 +790,6 @@ static struct sh_vou_pdata sh_vou_pdata = {
.flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
.board_info = &ak8813,
.i2c_adap = 0,
- .module_name = "ak881x",
};
static struct resource sh_vou_resources[] = {
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 68994a163f6c..1742849db648 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -333,7 +333,6 @@ static struct soc_camera_link rj54n1_link = {
.power = camera_power,
.board_info = &kfr2r09_i2c_camera,
.i2c_adapter_id = 1,
- .module_name = "rj54n1cb0c",
.priv = &rj54n1_priv,
};
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 662debe4ead2..03af84842559 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -450,7 +450,6 @@ static struct soc_camera_link ov7725_link = {
.power = ov7725_power,
.board_info = &migor_i2c_camera[0],
.i2c_adapter_id = 0,
- .module_name = "ov772x",
.priv = &ov7725_info,
};
@@ -463,7 +462,6 @@ static struct soc_camera_link tw9910_link = {
.power = tw9910_power,
.board_info = &migor_i2c_camera[1],
.i2c_adapter_id = 0,
- .module_name = "tw9910",
.priv = &tw9910_info,
};
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 552ebd9ba82b..8cc1d7295d85 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -550,7 +550,6 @@ static struct sh_vou_pdata sh_vou_pdata = {
.flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
.board_info = &ak8813,
.i2c_adap = 0,
- .module_name = "ak881x",
};
static struct resource sh_vou_resources[] = {
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index ae1a6ef71f37..43528ec656ba 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -427,10 +427,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
-
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#ifdef CONFIG_X2TLB
#define pte_ERROR(e) \
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index 22113f7fc83c..42cb9dd52161 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -84,9 +84,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#ifndef __ASSEMBLY__
#define IOBASE_VADDR 0xff000000
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 2cd42b58cb20..90a15d29feeb 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -365,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sh_native_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
- struct user * dummy = NULL;
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
@@ -383,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
- if (addr == (long)&dummy->fpu.fpscr)
+ if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
- } else
- tmp = ((long *)child->thread.xstate)
- [(addr - (long)&dummy->fpu) >> 2];
- } else if (addr == (long) &dummy->u_fpvalid)
+ } else {
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ tmp = ((unsigned long *)child->thread.xstate)
+ [index >> 2];
+ }
+ } else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
@@ -417,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
- ((long *)child->thread.xstate)
- [(addr - (long)&dummy->fpu) >> 2] = data;
+ ((unsigned long *)child->thread.xstate)
+ [index >> 2] = data;
ret = 0;
- } else if (addr == (long) &dummy->u_fpvalid) {
+ } else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
@@ -433,35 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (const void __user *)data);
+ datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
- (void __user *)data);
+ datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
- (const void __user *)data);
+ datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index e0fb065914aa..4436eacddb15 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -383,9 +383,11 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sh64_native_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -400,13 +402,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
}
@@ -437,7 +441,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ ret = put_fpu_long(child, index, data);
}
break;
@@ -445,23 +451,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (const void __user *)data);
+ datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
@@ -471,7 +477,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+asmlinkage int sh64_ptrace(long request, long pid,
+ unsigned long addr, unsigned long data)
{
#define WPC_DBRMODE 0x0d104008
static unsigned long first_call;
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index ec23b0a87b98..3d7afbb7f4bb 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -70,8 +70,8 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
extern struct page *kmap_atomic_to_page(void *vaddr);
#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 2889574608db..c2ced21c9dc1 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -208,6 +208,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
#define memset_io(d,c,sz) _memset_io(d,c,sz)
static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
+{
+ char *d = dst;
+
+ while (n--) {
+ char tmp = sbus_readb(src);
+ *d++ = tmp;
+ src++;
+ }
+}
+
+#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
+
+static inline void
_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
{
char *d = dst;
@@ -222,6 +237,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
+{
+ const char *s = src;
+ volatile void __iomem *d = dst;
+
+ while (n--) {
+ char tmp = *s++;
+ sbus_writeb(tmp, d);
+ d++;
+ }
+}
+
+#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
+
+static inline void
_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
{
const char *s = src;
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9517d063c79c..9c8965415f0a 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -419,6 +419,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
#define memset_io(d,c,sz) _memset_io(d,c,sz)
static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
+{
+ char *d = dst;
+
+ while (n--) {
+ char tmp = sbus_readb(src);
+ *d++ = tmp;
+ src++;
+ }
+}
+
+#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
+
+static inline void
_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
{
char *d = dst;
@@ -433,6 +448,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
+{
+ const char *s = src;
+ volatile void __iomem *d = dst;
+
+ while (n--) {
+ char tmp = *s++;
+ sbus_writeb(tmp, d);
+ d++;
+ }
+}
+
+#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
+
+static inline void
_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
{
const char *s = src;
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 5312782f0b5e..948b686ec089 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -38,7 +38,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
* types on sparc64. However, it requires that the device
* can drive enough of the 64 bits.
*/
-#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
+#define PCI64_REQUIRED_MASK (~(u64)0)
#define PCI64_ADDR_BASE 0xfffc000000000000UL
#ifdef CONFIG_PCI
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 0ece77f47753..303bd4dc8292 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -304,10 +304,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
* and sun4c is guaranteed to have no highmem anyway.
*/
#define pte_offset_map(d, a) pte_offset_kernel(d,a)
-#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
-
#define pte_unmap(pte) do{}while(0)
-#define pte_unmap_nested(pte) do{}while(0)
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index f5b5fa76c02d..f8dddb7045bb 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -652,9 +652,7 @@ static inline int pte_special(pte_t pte)
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_kernel pte_index
#define pte_offset_map pte_index
-#define pte_offset_map_nested pte_index
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/* Actual page table PTE updates. */
extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index e608f397e11f..27b9e93d0121 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -323,18 +323,35 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sparc32_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+struct fps {
+ unsigned long regs[32];
+ unsigned long fsr;
+ unsigned long flags;
+ unsigned long extra;
+ unsigned long fpqd;
+ struct fq {
+ unsigned long *insnaddr;
+ unsigned long insn;
+ } fpq[16];
+};
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
+ void __user *addr2p;
const struct user_regset_view *view;
+ struct pt_regs __user *pregs;
+ struct fps __user *fps;
int ret;
view = task_user_regset_view(current);
+ addr2p = (void __user *) addr2;
+ pregs = (struct pt_regs __user *) addr;
+ fps = (struct fps __user *) addr;
switch(request) {
case PTRACE_GETREGS: {
- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
ret = copy_regset_to_user(child, view, REGSET_GENERAL,
32 * sizeof(u32),
4 * sizeof(u32),
@@ -348,8 +365,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_SETREGS: {
- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
ret = copy_regset_from_user(child, view, REGSET_GENERAL,
32 * sizeof(u32),
4 * sizeof(u32),
@@ -363,19 +378,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETFPREGS: {
- struct fps {
- unsigned long regs[32];
- unsigned long fsr;
- unsigned long flags;
- unsigned long extra;
- unsigned long fpqd;
- struct fq {
- unsigned long *insnaddr;
- unsigned long insn;
- } fpq[16];
- };
- struct fps __user *fps = (struct fps __user *) addr;
-
ret = copy_regset_to_user(child, view, REGSET_FP,
0 * sizeof(u32),
32 * sizeof(u32),
@@ -397,19 +399,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_SETFPREGS: {
- struct fps {
- unsigned long regs[32];
- unsigned long fsr;
- unsigned long flags;
- unsigned long extra;
- unsigned long fpqd;
- struct fq {
- unsigned long *insnaddr;
- unsigned long insn;
- } fpq[16];
- };
- struct fps __user *fps = (struct fps __user *) addr;
-
ret = copy_regset_from_user(child, view, REGSET_FP,
0 * sizeof(u32),
32 * sizeof(u32),
@@ -424,8 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_READTEXT:
case PTRACE_READDATA:
- ret = ptrace_readdata(child, addr,
- (void __user *) addr2, data);
+ ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
@@ -435,8 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
- ret = ptrace_writedata(child, (void __user *) addr2,
- addr, data);
+ ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index aa90da08bf61..9ccc812bc09e 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -969,16 +969,19 @@ struct fps {
unsigned long fsr;
};
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
const struct user_regset_view *view = task_user_regset_view(current);
unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
struct pt_regs __user *pregs;
struct fps __user *fps;
+ void __user *addr2p;
int ret;
- pregs = (struct pt_regs __user *) (unsigned long) addr;
- fps = (struct fps __user *) (unsigned long) addr;
+ pregs = (struct pt_regs __user *) addr;
+ fps = (struct fps __user *) addr;
+ addr2p = (void __user *) addr2;
switch (request) {
case PTRACE_PEEKUSR:
@@ -1029,8 +1032,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_READTEXT:
case PTRACE_READDATA:
- ret = ptrace_readdata(child, addr,
- (char __user *)addr2, data);
+ ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
@@ -1039,8 +1041,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
- ret = ptrace_writedata(child, (char __user *) addr2,
- addr, data);
+ ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index e139e9cbf5f7..4730eac0747b 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -29,17 +29,17 @@
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
- unsigned long idx;
unsigned long vaddr;
+ long idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -63,44 +63,52 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void*) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
+ int type;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
return;
}
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+ type = kmap_atomic_idx();
-/* XXX Fix - Anton */
+#ifdef CONFIG_DEBUG_HIGHMEM
+ {
+ unsigned long idx;
+
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+
+ /* XXX Fix - Anton */
#if 0
- __flush_cache_one(vaddr);
+ __flush_cache_one(vaddr);
#else
- flush_cache_all();
+ flush_cache_all();
#endif
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
-/* XXX Fix - Anton */
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ /* XXX Fix - Anton */
#if 0
- __flush_tlb_one(vaddr);
+ __flush_tlb_one(vaddr);
#else
- flush_tlb_all();
+ flush_tlb_all();
#endif
+ }
#endif
+ kmap_atomic_idx_pop();
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
/* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 1eb308cb711a..7e8c2844e093 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -58,6 +58,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_PHYS_ADDR_T_64BIT
def_bool y
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+
config LOCKDEP_SUPPORT
def_bool y
@@ -96,6 +99,7 @@ config HVC_TILE
config TILE
def_bool y
+ select HAVE_KVM if !TILEGX
select GENERIC_FIND_FIRST_BIT
select GENERIC_FIND_NEXT_BIT
select USE_GENERIC_SMP_HELPERS
@@ -236,9 +240,9 @@ choice
If you are not absolutely sure what you are doing, leave this
option alone!
- config VMSPLIT_375G
+ config VMSPLIT_3_75G
bool "3.75G/0.25G user/kernel split (no kernel networking)"
- config VMSPLIT_35G
+ config VMSPLIT_3_5G
bool "3.5G/0.5G user/kernel split"
config VMSPLIT_3G
bool "3G/1G user/kernel split"
@@ -252,8 +256,8 @@ endchoice
config PAGE_OFFSET
hex
- default 0xF0000000 if VMSPLIT_375G
- default 0xE0000000 if VMSPLIT_35G
+ default 0xF0000000 if VMSPLIT_3_75G
+ default 0xE0000000 if VMSPLIT_3_5G
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
default 0x40000000 if VMSPLIT_1G
@@ -314,6 +318,15 @@ config HARDWALL
bool "Hardwall support to allow access to user dynamic network"
default y
+config KERNEL_PL
+ int "Processor protection level for kernel"
+ range 1 2
+ default "1"
+ ---help---
+ This setting determines the processor protection level the
+ kernel will be built to run at. Generally you should use
+ the default value here.
+
endmenu # Tilera-specific configuration
menu "Bus options"
@@ -354,3 +367,5 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+source "arch/tile/kvm/Kconfig"
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index fd8f6bb5face..17acce70569b 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -26,8 +26,9 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
endif
endif
-
+ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
+endif
LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
@@ -49,6 +50,20 @@ head-y := arch/tile/kernel/head_$(BITS).o
libs-y += arch/tile/lib/
libs-y += $(LIBGCC_PATH)
-
# See arch/tile/Kbuild for content of core part of the kernel
core-y += arch/tile/
+
+core-$(CONFIG_KVM) += arch/tile/kvm/
+
+ifdef TILERA_ROOT
+INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
+endif
+
+install:
+ install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
+ install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
+ install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+
+define archhelp
+ echo ' install - install kernel into $(INSTALL_PATH)'
+endef
diff --git a/arch/tile/include/arch/sim.h b/arch/tile/include/arch/sim.h
new file mode 100644
index 000000000000..74b7c1624d34
--- /dev/null
+++ b/arch/tile/include/arch/sim.h
@@ -0,0 +1,619 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file
+ *
+ * Provides an API for controlling the simulator at runtime.
+ */
+
+/**
+ * @addtogroup arch_sim
+ * @{
+ *
+ * An API for controlling the simulator at runtime.
+ *
+ * The simulator's behavior can be modified while it is running.
+ * For example, human-readable trace output can be enabled and disabled
+ * around code of interest.
+ *
+ * There are two ways to modify simulator behavior:
+ * programmatically, by calling various sim_* functions, and
+ * interactively, by entering commands like "sim set functional true"
+ * at the tile-monitor prompt. Typing "sim help" at that prompt provides
+ * a list of interactive commands.
+ *
+ * All interactive commands can also be executed programmatically by
+ * passing a string to the sim_command function.
+ */
+
+#ifndef __ARCH_SIM_H__
+#define __ARCH_SIM_H__
+
+#include <arch/sim_def.h>
+#include <arch/abi.h>
+
+#ifndef __ASSEMBLER__
+
+#include <arch/spr_def.h>
+
+
+/**
+ * Return true if the current program is running under a simulator,
+ * rather than on real hardware. If running on hardware, other "sim_xxx()"
+ * calls have no useful effect.
+ */
+static inline int
+sim_is_simulator(void)
+{
+ return __insn_mfspr(SPR_SIM_CONTROL) != 0;
+}
+
+
+/**
+ * Checkpoint the simulator state to a checkpoint file.
+ *
+ * The checkpoint file name is either the default or the name specified
+ * on the command line with "--checkpoint-file".
+ */
+static __inline void
+sim_checkpoint(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_CHECKPOINT);
+}
+
+
+/**
+ * Report whether or not various kinds of simulator tracing are enabled.
+ *
+ * @return The bitwise OR of these values:
+ *
+ * SIM_TRACE_CYCLES (--trace-cycles),
+ * SIM_TRACE_ROUTER (--trace-router),
+ * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
+ * SIM_TRACE_DISASM (--trace-disasm),
+ * SIM_TRACE_STALL_INFO (--trace-stall-info)
+ * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
+ * SIM_TRACE_L2_CACHE (--trace-l2)
+ * SIM_TRACE_LINES (--trace-lines)
+ */
+static __inline unsigned int
+sim_get_tracing(void)
+{
+ return __insn_mfspr(SPR_SIM_CONTROL) & SIM_TRACE_FLAG_MASK;
+}
+
+
+/**
+ * Turn on or off different kinds of simulator tracing.
+ *
+ * @param mask Either one of these special values:
+ *
+ * SIM_TRACE_NONE (turns off tracing),
+ * SIM_TRACE_ALL (turns on all possible tracing).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_TRACE_CYCLES (--trace-cycles),
+ * SIM_TRACE_ROUTER (--trace-router),
+ * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
+ * SIM_TRACE_DISASM (--trace-disasm),
+ * SIM_TRACE_STALL_INFO (--trace-stall-info)
+ * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
+ * SIM_TRACE_L2_CACHE (--trace-l2)
+ * SIM_TRACE_LINES (--trace-lines)
+ */
+static __inline void
+sim_set_tracing(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_TRACE_SPR_ARG(mask));
+}
+
+
+/**
+ * Request dumping of different kinds of simulator state.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_DUMP_ALL (dump all known state)
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_DUMP_REGS (the register file),
+ * SIM_DUMP_SPRS (the SPRs),
+ * SIM_DUMP_ITLB (the iTLB),
+ * SIM_DUMP_DTLB (the dTLB),
+ * SIM_DUMP_L1I (the L1 I-cache),
+ * SIM_DUMP_L1D (the L1 D-cache),
+ * SIM_DUMP_L2 (the L2 cache),
+ * SIM_DUMP_SNREGS (the switch register file),
+ * SIM_DUMP_SNITLB (the switch iTLB),
+ * SIM_DUMP_SNL1I (the switch L1 I-cache),
+ * SIM_DUMP_BACKTRACE (the current backtrace)
+ */
+static __inline void
+sim_dump(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_DUMP_SPR_ARG(mask));
+}
+
+
+/**
+ * Print a string to the simulator stdout.
+ *
+ * @param str The string to be written; a newline is automatically added.
+ */
+static __inline void
+sim_print_string(const char* str)
+{
+ int i;
+ for (i = 0; str[i] != 0; i++)
+ {
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+ (str[i] << _SIM_CONTROL_OPERATOR_BITS));
+ }
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+ (SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS));
+}
+
+
+/**
+ * Execute a simulator command string.
+ *
+ * Type 'sim help' at the tile-monitor prompt to learn what commands
+ * are available. Note the use of the tile-monitor "sim" command to
+ * pass commands to the simulator.
+ *
+ * The argument to sim_command() does not include the leading "sim"
+ * prefix used at the tile-monitor prompt; for example, you might call
+ * sim_command("trace disasm").
+ */
+static __inline void
+sim_command(const char* str)
+{
+ int c;
+ do
+ {
+ c = *str++;
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_COMMAND |
+ (c << _SIM_CONTROL_OPERATOR_BITS));
+ }
+ while (c);
+}
+
+
+
+#ifndef __DOXYGEN__
+
+/**
+ * The underlying implementation of "_sim_syscall()".
+ *
+ * We use extra "and" instructions to ensure that all the values
+ * we are passing to the simulator are actually valid in the registers
+ * (i.e. returned from memory) prior to the SIM_CONTROL spr.
+ */
+static __inline int _sim_syscall0(int val)
+{
+ long result;
+ __asm__ __volatile__ ("mtspr SIM_CONTROL, r0"
+ : "=R00" (result) : "R00" (val));
+ return result;
+}
+
+static __inline int _sim_syscall1(int val, long arg1)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result) : "R00" (val), "R01" (arg1));
+ return result;
+}
+
+static __inline int _sim_syscall2(int val, long arg1, long arg2)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2));
+ return result;
+}
+
+/* Note that _sim_syscall3() and higher are technically at risk of
+ receiving an interrupt right before the mtspr bundle, in which case
+ the register values for arguments 3 and up may still be in flight
+ to the core from a stack frame reload. */
+
+static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r3, r3 };"
+ "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2),
+ "R03" (arg3));
+ return result;
+}
+
+static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3,
+ long arg4)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r3, r4 };"
+ "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2),
+ "R03" (arg3), "R04" (arg4));
+ return result;
+}
+
+static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
+ long arg4, long arg5)
+{
+ long result;
+ __asm__ __volatile__ ("{ and zero, r3, r4; and zero, r5, r5 };"
+ "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (result)
+ : "R00" (val), "R01" (arg1), "R02" (arg2),
+ "R03" (arg3), "R04" (arg4), "R05" (arg5));
+ return result;
+}
+
+
+/**
+ * Make a special syscall to the simulator itself, if running under
+ * simulation. This is used as the implementation of other functions
+ * and should not be used outside this file.
+ *
+ * @param syscall_num The simulator syscall number.
+ * @param nr The number of additional arguments provided.
+ *
+ * @return Varies by syscall.
+ */
+#define _sim_syscall(syscall_num, nr, args...) \
+ _sim_syscall##nr( \
+ ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, args)
+
+
+/* Values for the "access_mask" parameters below. */
+#define SIM_WATCHPOINT_READ 1
+#define SIM_WATCHPOINT_WRITE 2
+#define SIM_WATCHPOINT_EXECUTE 4
+
+
+static __inline int
+sim_add_watchpoint(unsigned int process_id,
+ unsigned long address,
+ unsigned long size,
+ unsigned int access_mask,
+ unsigned long user_data)
+{
+ return _sim_syscall(SIM_SYSCALL_ADD_WATCHPOINT, 5, process_id,
+ address, size, access_mask, user_data);
+}
+
+
+static __inline int
+sim_remove_watchpoint(unsigned int process_id,
+ unsigned long address,
+ unsigned long size,
+ unsigned int access_mask,
+ unsigned long user_data)
+{
+ return _sim_syscall(SIM_SYSCALL_REMOVE_WATCHPOINT, 5, process_id,
+ address, size, access_mask, user_data);
+}
+
+
+/**
+ * Return value from sim_query_watchpoint.
+ */
+struct SimQueryWatchpointStatus
+{
+ /**
+ * 0 if a watchpoint fired, 1 if no watchpoint fired, or -1 for
+ * error (meaning a bad process_id).
+ */
+ int syscall_status;
+
+ /**
+ * The address of the watchpoint that fired (this is the address
+ * passed to sim_add_watchpoint, not an address within that range
+ * that actually triggered the watchpoint).
+ */
+ unsigned long address;
+
+ /** The arbitrary user_data installed by sim_add_watchpoint. */
+ unsigned long user_data;
+};
+
+
+static __inline struct SimQueryWatchpointStatus
+sim_query_watchpoint(unsigned int process_id)
+{
+ struct SimQueryWatchpointStatus status;
+ long val = SIM_CONTROL_SYSCALL |
+ (SIM_SYSCALL_QUERY_WATCHPOINT << _SIM_CONTROL_OPERATOR_BITS);
+ __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
+ : "=R00" (status.syscall_status),
+ "=R01" (status.address),
+ "=R02" (status.user_data)
+ : "R00" (val), "R01" (process_id));
+ return status;
+}
+
+
+/* On the simulator, confirm lines have been evicted everywhere. */
+static __inline void
+sim_validate_lines_evicted(unsigned long long pa, unsigned long length)
+{
+#ifdef __LP64__
+ _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 2, pa, length);
+#else
+ _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 4,
+ 0 /* dummy */, (long)(pa), (long)(pa >> 32), length);
+#endif
+}
+
+
+#endif /* !__DOXYGEN__ */
+
+
+
+
+/**
+ * Modify the shaping parameters of a shim.
+ *
+ * @param shim The shim to modify. One of:
+ * SIM_CONTROL_SHAPING_GBE_0
+ * SIM_CONTROL_SHAPING_GBE_1
+ * SIM_CONTROL_SHAPING_GBE_2
+ * SIM_CONTROL_SHAPING_GBE_3
+ * SIM_CONTROL_SHAPING_XGBE_0
+ * SIM_CONTROL_SHAPING_XGBE_1
+ *
+ * @param type The type of shaping. This should be the same type of
+ * shaping that is already in place on the shim. One of:
+ * SIM_CONTROL_SHAPING_MULTIPLIER
+ * SIM_CONTROL_SHAPING_PPS
+ * SIM_CONTROL_SHAPING_BPS
+ *
+ * @param units The magnitude of the rate. One of:
+ * SIM_CONTROL_SHAPING_UNITS_SINGLE
+ * SIM_CONTROL_SHAPING_UNITS_KILO
+ * SIM_CONTROL_SHAPING_UNITS_MEGA
+ * SIM_CONTROL_SHAPING_UNITS_GIGA
+ *
+ * @param rate The rate to which to change it. This must fit in
+ * SIM_CONTROL_SHAPING_RATE_BITS bits or a warning is issued and
+ * the shaping is not changed.
+ *
+ * @return 0 if no problems were detected in the arguments to sim_set_shaping
+ * or 1 if problems were detected (for example, rate does not fit in 17 bits).
+ */
+static __inline int
+sim_set_shaping(unsigned shim,
+ unsigned type,
+ unsigned units,
+ unsigned rate)
+{
+ if ((rate & ~((1 << SIM_CONTROL_SHAPING_RATE_BITS) - 1)) != 0)
+ return 1;
+
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_SHAPING_SPR_ARG(shim, type, units, rate));
+ return 0;
+}
+
+#ifdef __tilegx__
+
+/** Enable a set of mPIPE links. Pass a -1 link_mask to enable all links. */
+static __inline void
+sim_enable_mpipe_links(unsigned mpipe, unsigned long link_mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL,
+ (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
+ (mpipe << 8) | (1 << 16) | ((uint_reg_t)link_mask << 32)));
+}
+
+/** Disable a set of mPIPE links. Pass a -1 link_mask to disable all links. */
+static __inline void
+sim_disable_mpipe_links(unsigned mpipe, unsigned long link_mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL,
+ (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
+ (mpipe << 8) | (0 << 16) | ((uint_reg_t)link_mask << 32)));
+}
+
+#endif /* __tilegx__ */
+
+
+/*
+ * An API for changing "functional" mode.
+ */
+
+#ifndef __DOXYGEN__
+
+#define sim_enable_functional() \
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_ENABLE_FUNCTIONAL)
+
+#define sim_disable_functional() \
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_DISABLE_FUNCTIONAL)
+
+#endif /* __DOXYGEN__ */
+
+
+/*
+ * Profiler support.
+ */
+
+/**
+ * Turn profiling on for the current task.
+ *
+ * Note that this has no effect if run in an environment without
+ * profiling support (thus, the proper flags to the simulator must
+ * be supplied).
+ */
+static __inline void
+sim_profiler_enable(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_ENABLE);
+}
+
+
+/** Turn profiling off for the current task. */
+static __inline void
+sim_profiler_disable(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_DISABLE);
+}
+
+
+/**
+ * Turn profiling on or off for the current task.
+ *
+ * @param enabled If true, turns on profiling. If false, turns it off.
+ *
+ * Note that this has no effect if run in an environment without
+ * profiling support (thus, the proper flags to the simulator must
+ * be supplied).
+ */
+static __inline void
+sim_profiler_set_enabled(int enabled)
+{
+ int val =
+ enabled ? SIM_CONTROL_PROFILER_ENABLE : SIM_CONTROL_PROFILER_DISABLE;
+ __insn_mtspr(SPR_SIM_CONTROL, val);
+}
+
+
+/**
+ * Return true if and only if profiling is currently enabled
+ * for the current task.
+ *
+ * This returns false even if sim_profiler_enable() was called
+ * if the current execution environment does not support profiling.
+ */
+static __inline int
+sim_profiler_is_enabled(void)
+{
+ return ((__insn_mfspr(SPR_SIM_CONTROL) & SIM_PROFILER_ENABLED_MASK) != 0);
+}
+
+
+/**
+ * Reset profiling counters to zero for the current task.
+ *
+ * Resetting can be done while profiling is enabled. It does not affect
+ * the chip-wide profiling counters.
+ */
+static __inline void
+sim_profiler_clear(void)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_CLEAR);
+}
+
+
+/**
+ * Enable specified chip-level profiling counters.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (enables all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (enable all memory controllers)
+ * SIM_CHIP_XAUI (enable all XAUI controllers)
+ * SIM_CHIP_MPIPE (enable all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_enable(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask));
+}
+
+
+/**
+ * Disable specified chip-level profiling counters.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (disables all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (disable all memory controllers)
+ * SIM_CHIP_XAUI (disable all XAUI controllers)
+ * SIM_CHIP_MPIPE (disable all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_disable(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask));
+}
+
+
+/**
+ * Reset specified chip-level profiling counters to zero.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (clears all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (clear all memory controllers)
+ * SIM_CHIP_XAUI (clear all XAUI controllers)
+ * SIM_CHIP_MPIPE (clear all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_clear(unsigned int mask)
+{
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask));
+}
+
+
+/*
+ * Event support.
+ */
+
+#ifndef __DOXYGEN__
+
+static __inline void
+sim_event_begin(unsigned int x)
+{
+#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
+ __insn_mtspr(SPR_EVENT_BEGIN, x);
+#endif
+}
+
+static __inline void
+sim_event_end(unsigned int x)
+{
+#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
+ __insn_mtspr(SPR_EVENT_END, x);
+#endif
+}
+
+#endif /* !__DOXYGEN__ */
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* !__ARCH_SIM_H__ */
+
+/** @} */
diff --git a/arch/tile/include/arch/sim_def.h b/arch/tile/include/arch/sim_def.h
index 6418fbde063e..7a17082c3773 100644
--- a/arch/tile/include/arch/sim_def.h
+++ b/arch/tile/include/arch/sim_def.h
@@ -1,477 +1,461 @@
-// Copyright 2010 Tilera Corporation. All Rights Reserved.
-//
-// This program is free software; you can redistribute it and/or
-// modify it under the terms of the GNU General Public License
-// as published by the Free Software Foundation, version 2.
-//
-// This program is distributed in the hope that it will be useful, but
-// WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
-// NON INFRINGEMENT. See the GNU General Public License for
-// more details.
-
-//! @file
-//!
-//! Some low-level simulator definitions.
-//!
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file
+ *
+ * Some low-level simulator definitions.
+ */
#ifndef __ARCH_SIM_DEF_H__
#define __ARCH_SIM_DEF_H__
-//! Internal: the low bits of the SIM_CONTROL_* SPR values specify
-//! the operation to perform, and the remaining bits are
-//! an operation-specific parameter (often unused).
-//!
+/**
+ * Internal: the low bits of the SIM_CONTROL_* SPR values specify
+ * the operation to perform, and the remaining bits are
+ * an operation-specific parameter (often unused).
+ */
#define _SIM_CONTROL_OPERATOR_BITS 8
-//== Values which can be written to SPR_SIM_CONTROL.
+/*
+ * Values which can be written to SPR_SIM_CONTROL.
+ */
-//! If written to SPR_SIM_CONTROL, stops profiling.
-//!
+/** If written to SPR_SIM_CONTROL, stops profiling. */
#define SIM_CONTROL_PROFILER_DISABLE 0
-//! If written to SPR_SIM_CONTROL, starts profiling.
-//!
+/** If written to SPR_SIM_CONTROL, starts profiling. */
#define SIM_CONTROL_PROFILER_ENABLE 1
-//! If written to SPR_SIM_CONTROL, clears profiling counters.
-//!
+/** If written to SPR_SIM_CONTROL, clears profiling counters. */
#define SIM_CONTROL_PROFILER_CLEAR 2
-//! If written to SPR_SIM_CONTROL, checkpoints the simulator.
-//!
+/** If written to SPR_SIM_CONTROL, checkpoints the simulator. */
#define SIM_CONTROL_CHECKPOINT 3
-//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
-//! sets the tracing mask to the given mask. See "sim_set_tracing()".
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
+ * sets the tracing mask to the given mask. See "sim_set_tracing()".
+ */
#define SIM_CONTROL_SET_TRACING 4
-//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
-//! dumps the requested items of machine state to the log.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
+ * dumps the requested items of machine state to the log.
+ */
#define SIM_CONTROL_DUMP 5
-//! If written to SPR_SIM_CONTROL, clears chip-level profiling counters.
-//!
+/** If written to SPR_SIM_CONTROL, clears chip-level profiling counters. */
#define SIM_CONTROL_PROFILER_CHIP_CLEAR 6
-//! If written to SPR_SIM_CONTROL, disables chip-level profiling.
-//!
+/** If written to SPR_SIM_CONTROL, disables chip-level profiling. */
#define SIM_CONTROL_PROFILER_CHIP_DISABLE 7
-//! If written to SPR_SIM_CONTROL, enables chip-level profiling.
-//!
+/** If written to SPR_SIM_CONTROL, enables chip-level profiling. */
#define SIM_CONTROL_PROFILER_CHIP_ENABLE 8
-//! If written to SPR_SIM_CONTROL, enables chip-level functional mode
-//!
+/** If written to SPR_SIM_CONTROL, enables chip-level functional mode */
#define SIM_CONTROL_ENABLE_FUNCTIONAL 9
-//! If written to SPR_SIM_CONTROL, disables chip-level functional mode.
-//!
+/** If written to SPR_SIM_CONTROL, disables chip-level functional mode. */
#define SIM_CONTROL_DISABLE_FUNCTIONAL 10
-//! If written to SPR_SIM_CONTROL, enables chip-level functional mode.
-//! All tiles must perform this write for functional mode to be enabled.
-//! Ignored in naked boot mode unless --functional is specified.
-//! WARNING: Only the hypervisor startup code should use this!
-//!
+/**
+ * If written to SPR_SIM_CONTROL, enables chip-level functional mode.
+ * All tiles must perform this write for functional mode to be enabled.
+ * Ignored in naked boot mode unless --functional is specified.
+ * WARNING: Only the hypervisor startup code should use this!
+ */
#define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! writes a string directly to the simulator output. Written to once for
-//! each character in the string, plus a final NUL. Instead of NUL,
-//! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
-//!
-// ISSUE: Document the meaning of "newline", and the handling of NUL.
-//
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * writes a string directly to the simulator output. Written to once for
+ * each character in the string, plus a final NUL. Instead of NUL,
+ * you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
+ */
+/* ISSUE: Document the meaning of "newline", and the handling of NUL. */
#define SIM_CONTROL_PUTC 12
-//! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
-//! this core. This is intended to be used before a loop that will
-//! invalidate the cache by loading new data and evicting all current data.
-//! Generally speaking, this API should only be used by system code.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
+ * this core. This is intended to be used before a loop that will
+ * invalidate the cache by loading new data and evicting all current data.
+ * Generally speaking, this API should only be used by system code.
+ */
#define SIM_CONTROL_GRINDER_CLEAR 13
-//! If written to SPR_SIM_CONTROL, shuts down the simulator.
-//!
+/** If written to SPR_SIM_CONTROL, shuts down the simulator. */
#define SIM_CONTROL_SHUTDOWN 14
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that a fork syscall just created the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that a fork syscall just created the given process.
+ */
#define SIM_CONTROL_OS_FORK 15
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that an exit syscall was just executed by the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that an exit syscall was just executed by the given process.
+ */
#define SIM_CONTROL_OS_EXIT 16
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that the OS just switched to the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that the OS just switched to the given process.
+ */
#define SIM_CONTROL_OS_SWITCH 17
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that an exec syscall was just executed. Written to once for
-//! each character in the executable name, plus a final NUL.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that an exec syscall was just executed. Written to once for
+ * each character in the executable name, plus a final NUL.
+ */
#define SIM_CONTROL_OS_EXEC 18
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that an interpreter (PT_INTERP) was loaded. Written to once
-//! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
-//! hex load address starting with "0x", and "PATH" is the executable name.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that an interpreter (PT_INTERP) was loaded. Written to once
+ * for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
+ * hex load address starting with "0x", and "PATH" is the executable name.
+ */
#define SIM_CONTROL_OS_INTERP 19
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that a dll was loaded. Written to once for each character
-//! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
-//! address starting with "0x", and "PATH" is the executable name.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that a dll was loaded. Written to once for each character
+ * in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
+ * address starting with "0x", and "PATH" is the executable name.
+ */
#define SIM_CONTROL_DLOPEN 20
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that a dll was unloaded. Written to once for each character
-//! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
-//! address starting with "0x".
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that a dll was unloaded. Written to once for each character
+ * in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
+ * address starting with "0x".
+ */
#define SIM_CONTROL_DLCLOSE 21
-//! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
-//! indicates whether to allow data reads to remotely-cached
-//! dirty cache lines to be cached locally without grinder warnings or
-//! assertions (used by Linux kernel fast memcpy).
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
+ * indicates whether to allow data reads to remotely-cached
+ * dirty cache lines to be cached locally without grinder warnings or
+ * assertions (used by Linux kernel fast memcpy).
+ */
#define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22
-//! If written to SPR_SIM_CONTROL, enables memory tracing.
-//!
+/** If written to SPR_SIM_CONTROL, enables memory tracing. */
#define SIM_CONTROL_ENABLE_MEM_LOGGING 23
-//! If written to SPR_SIM_CONTROL, disables memory tracing.
-//!
+/** If written to SPR_SIM_CONTROL, disables memory tracing. */
#define SIM_CONTROL_DISABLE_MEM_LOGGING 24
-//! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
-//! the gbe or xgbe shims. Must specify the shim id, the type, the units, and
-//! the rate, as defined in SIM_SHAPING_SPR_ARG.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
+ * the gbe or xgbe shims. Must specify the shim id, the type, the units, and
+ * the rate, as defined in SIM_SHAPING_SPR_ARG.
+ */
#define SIM_CONTROL_SHAPING 25
-//! If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
-//! requests that a simulator command be executed. Written to once for each
-//! character in the command, plus a final NUL.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
+ * requests that a simulator command be executed. Written to once for each
+ * character in the command, plus a final NUL.
+ */
#define SIM_CONTROL_COMMAND 26
-//! If written to SPR_SIM_CONTROL, indicates that the simulated system
-//! is panicking, to allow debugging via --debug-on-panic.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, indicates that the simulated system
+ * is panicking, to allow debugging via --debug-on-panic.
+ */
#define SIM_CONTROL_PANIC 27
-//! If written to SPR_SIM_CONTROL, triggers a simulator syscall.
-//! See "sim_syscall()" for more info.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, triggers a simulator syscall.
+ * See "sim_syscall()" for more info.
+ */
#define SIM_CONTROL_SYSCALL 32
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
-//! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
+ * use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
+ */
#define SIM_CONTROL_OS_FORK_PARENT 33
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8), clears the pending magic data section. The cleared
-//! pending magic data section and any subsequently appended magic bytes
-//! will only take effect when the classifier blast programmer is run.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8), clears the pending magic data section. The cleared
+ * pending magic data section and any subsequently appended magic bytes
+ * will only take effect when the classifier blast programmer is run.
+ */
#define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8) and a byte of data (shifted by 16), appends that byte
-//! to the shim's pending magic data section. The pending magic data
-//! section takes effect when the classifier blast programmer is run.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8) and a byte of data (shifted by 16), appends that byte
+ * to the shim's pending magic data section. The pending magic data
+ * section takes effect when the classifier blast programmer is run.
+ */
#define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
-//! mask of links (shifted by 32), enable or disable the corresponding
-//! mPIPE links.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
+ * mask of links (shifted by 32), enable or disable the corresponding
+ * mPIPE links.
+ */
#define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
-//== Syscall numbers for use with "sim_syscall()".
-//! Syscall number for sim_add_watchpoint().
-//!
+/*
+ * Syscall numbers for use with "sim_syscall()".
+ */
+
+/** Syscall number for sim_add_watchpoint(). */
#define SIM_SYSCALL_ADD_WATCHPOINT 2
-//! Syscall number for sim_remove_watchpoint().
-//!
+/** Syscall number for sim_remove_watchpoint(). */
#define SIM_SYSCALL_REMOVE_WATCHPOINT 3
-//! Syscall number for sim_query_watchpoint().
-//!
+/** Syscall number for sim_query_watchpoint(). */
#define SIM_SYSCALL_QUERY_WATCHPOINT 4
-//! Syscall number that asserts that the cache lines whose 64-bit PA
-//! is passed as the second argument to sim_syscall(), and over a
-//! range passed as the third argument, are no longer in cache.
-//! The simulator raises an error if this is not the case.
-//!
+/**
+ * Syscall number that asserts that the cache lines whose 64-bit PA
+ * is passed as the second argument to sim_syscall(), and over a
+ * range passed as the third argument, are no longer in cache.
+ * The simulator raises an error if this is not the case.
+ */
#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
+ */
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Enable --trace-cycle when passed to simulator_set_tracing().
-//!
+/** Enable --trace-cycle when passed to simulator_set_tracing(). */
#define SIM_TRACE_CYCLES 0x01
-//! Enable --trace-router when passed to simulator_set_tracing().
-//!
+/** Enable --trace-router when passed to simulator_set_tracing(). */
#define SIM_TRACE_ROUTER 0x02
-//! Enable --trace-register-writes when passed to simulator_set_tracing().
-//!
+/** Enable --trace-register-writes when passed to simulator_set_tracing(). */
#define SIM_TRACE_REGISTER_WRITES 0x04
-//! Enable --trace-disasm when passed to simulator_set_tracing().
-//!
+/** Enable --trace-disasm when passed to simulator_set_tracing(). */
#define SIM_TRACE_DISASM 0x08
-//! Enable --trace-stall-info when passed to simulator_set_tracing().
-//!
+/** Enable --trace-stall-info when passed to simulator_set_tracing(). */
#define SIM_TRACE_STALL_INFO 0x10
-//! Enable --trace-memory-controller when passed to simulator_set_tracing().
-//!
+/** Enable --trace-memory-controller when passed to simulator_set_tracing(). */
#define SIM_TRACE_MEMORY_CONTROLLER 0x20
-//! Enable --trace-l2 when passed to simulator_set_tracing().
-//!
+/** Enable --trace-l2 when passed to simulator_set_tracing(). */
#define SIM_TRACE_L2_CACHE 0x40
-//! Enable --trace-lines when passed to simulator_set_tracing().
-//!
+/** Enable --trace-lines when passed to simulator_set_tracing(). */
#define SIM_TRACE_LINES 0x80
-//! Turn off all tracing when passed to simulator_set_tracing().
-//!
+/** Turn off all tracing when passed to simulator_set_tracing(). */
#define SIM_TRACE_NONE 0
-//! Turn on all tracing when passed to simulator_set_tracing().
-//!
+/** Turn on all tracing when passed to simulator_set_tracing(). */
#define SIM_TRACE_ALL (-1)
-//! @}
+/** @} */
-//! Computes the value to write to SPR_SIM_CONTROL to set tracing flags.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to set tracing flags. */
#define SIM_TRACE_SPR_ARG(mask) \
(SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
+ */
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Dump the general-purpose registers.
-//!
+/** Dump the general-purpose registers. */
#define SIM_DUMP_REGS 0x001
-//! Dump the SPRs.
-//!
+/** Dump the SPRs. */
#define SIM_DUMP_SPRS 0x002
-//! Dump the ITLB.
-//!
+/** Dump the ITLB. */
#define SIM_DUMP_ITLB 0x004
-//! Dump the DTLB.
-//!
+/** Dump the DTLB. */
#define SIM_DUMP_DTLB 0x008
-//! Dump the L1 I-cache.
-//!
+/** Dump the L1 I-cache. */
#define SIM_DUMP_L1I 0x010
-//! Dump the L1 D-cache.
-//!
+/** Dump the L1 D-cache. */
#define SIM_DUMP_L1D 0x020
-//! Dump the L2 cache.
-//!
+/** Dump the L2 cache. */
#define SIM_DUMP_L2 0x040
-//! Dump the switch registers.
-//!
+/** Dump the switch registers. */
#define SIM_DUMP_SNREGS 0x080
-//! Dump the switch ITLB.
-//!
+/** Dump the switch ITLB. */
#define SIM_DUMP_SNITLB 0x100
-//! Dump the switch L1 I-cache.
-//!
+/** Dump the switch L1 I-cache. */
#define SIM_DUMP_SNL1I 0x200
-//! Dump the current backtrace.
-//!
+/** Dump the current backtrace. */
#define SIM_DUMP_BACKTRACE 0x400
-//! Only dump valid lines in caches.
-//!
+/** Only dump valid lines in caches. */
#define SIM_DUMP_VALID_LINES 0x800
-//! Dump everything that is dumpable.
-//!
+/** Dump everything that is dumpable. */
#define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES)
-// @}
+/** @} */
-//! Computes the value to write to SPR_SIM_CONTROL to dump machine state.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to dump machine state. */
#define SIM_DUMP_SPR_ARG(mask) \
(SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
+ */
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
#define SIM_CHIP_MEMCTL 0x001
-//! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
#define SIM_CHIP_XAUI 0x002
-//! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
#define SIM_CHIP_PCIE 0x004
-//! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
#define SIM_CHIP_MPIPE 0x008
-//! Reference all chip devices.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
+#define SIM_CHIP_TRIO 0x010
+
+/** Reference all chip devices. */
#define SIM_CHIP_ALL (-1)
-//! @}
+/** @} */
-//! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. */
#define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \
(SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.*/
#define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \
(SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-//! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. */
#define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \
(SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
-// Shim bitrate controls.
+/* Shim bitrate controls. */
-//! The number of bits used to store the shim id.
-//!
+/** The number of bits used to store the shim id. */
#define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
-//! Change the gbe 0 bitrate.
-//!
+/** Change the gbe 0 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_0 0x0
-//! Change the gbe 1 bitrate.
-//!
+/** Change the gbe 1 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_1 0x1
-//! Change the gbe 2 bitrate.
-//!
+/** Change the gbe 2 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_2 0x2
-//! Change the gbe 3 bitrate.
-//!
+/** Change the gbe 3 bitrate. */
#define SIM_CONTROL_SHAPING_GBE_3 0x3
-//! Change the xgbe 0 bitrate.
-//!
+/** Change the xgbe 0 bitrate. */
#define SIM_CONTROL_SHAPING_XGBE_0 0x4
-//! Change the xgbe 1 bitrate.
-//!
+/** Change the xgbe 1 bitrate. */
#define SIM_CONTROL_SHAPING_XGBE_1 0x5
-//! The type of shaping to do.
-//!
+/** The type of shaping to do. */
#define SIM_CONTROL_SHAPING_TYPE_BITS 2
-//! Control the multiplier.
-//!
+/** Control the multiplier. */
#define SIM_CONTROL_SHAPING_MULTIPLIER 0
-//! Control the PPS.
-//!
+/** Control the PPS. */
#define SIM_CONTROL_SHAPING_PPS 1
-//! Control the BPS.
-//!
+/** Control the BPS. */
#define SIM_CONTROL_SHAPING_BPS 2
-//! The number of bits for the units for the shaping parameter.
-//!
+/** The number of bits for the units for the shaping parameter. */
#define SIM_CONTROL_SHAPING_UNITS_BITS 2
-//! Provide a number in single units.
-//!
+/** Provide a number in single units. */
#define SIM_CONTROL_SHAPING_UNITS_SINGLE 0
-//! Provide a number in kilo units.
-//!
+/** Provide a number in kilo units. */
#define SIM_CONTROL_SHAPING_UNITS_KILO 1
-//! Provide a number in mega units.
-//!
+/** Provide a number in mega units. */
#define SIM_CONTROL_SHAPING_UNITS_MEGA 2
-//! Provide a number in giga units.
-//!
+/** Provide a number in giga units. */
#define SIM_CONTROL_SHAPING_UNITS_GIGA 3
-// @}
+/** @} */
-//! How many bits are available for the rate.
-//!
+/** How many bits are available for the rate. */
#define SIM_CONTROL_SHAPING_RATE_BITS \
(32 - (_SIM_CONTROL_OPERATOR_BITS + \
SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
SIM_CONTROL_SHAPING_TYPE_BITS + \
SIM_CONTROL_SHAPING_UNITS_BITS))
-//! Computes the value to write to SPR_SIM_CONTROL to change a bitrate.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to change a bitrate. */
#define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \
(SIM_CONTROL_SHAPING | \
((shim) | \
@@ -483,30 +467,36 @@
SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS)
-//== Values returned when reading SPR_SIM_CONTROL.
-// ISSUE: These names should share a longer common prefix.
+/*
+ * Values returned when reading SPR_SIM_CONTROL.
+ * ISSUE: These names should share a longer common prefix.
+ */
-//! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
-//! (SIM_TRACE_xxx values).
-//!
+/**
+ * When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
+ * (SIM_TRACE_xxx values).
+ */
#define SIM_TRACE_FLAG_MASK 0xFFFF
-//! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled.
-//!
+/** When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. */
#define SIM_PROFILER_ENABLED_MASK 0x10000
-//== Special arguments for "SIM_CONTROL_PUTC".
+/*
+ * Special arguments for "SIM_CONTROL_PUTC".
+ */
-//! Flag value for forcing a PUTC string-flush, including
-//! coordinate/cycle prefix and newline.
-//!
+/**
+ * Flag value for forcing a PUTC string-flush, including
+ * coordinate/cycle prefix and newline.
+ */
#define SIM_PUTC_FLUSH_STRING 0x100
-//! Flag value for forcing a PUTC binary-data-flush, which skips the
-//! prefix and does not append a newline.
-//!
+/**
+ * Flag value for forcing a PUTC binary-data-flush, which skips the
+ * prefix and does not append a newline.
+ */
#define SIM_PUTC_FLUSH_BINARY 0x101
-#endif //__ARCH_SIM_DEF_H__
+#endif /* __ARCH_SIM_DEF_H__ */
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index c8fdbd9a45e6..442fcba0d122 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -12,8 +12,93 @@
* more details.
*/
+/*
+ * In addition to including the proper base SPR definition file, depending
+ * on machine architecture, this file defines several macros which allow
+ * kernel code to use protection-level dependent SPRs without worrying
+ * about which PL it's running at. In these macros, the PL that the SPR
+ * or interrupt number applies to is replaced by K.
+ */
+
+#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
+#error CONFIG_KERNEL_PL must be 1 or 2
+#endif
+
+/* Concatenate 4 strings. */
+#define __concat4(a, b, c, d) a ## b ## c ## d
+#define _concat4(a, b, c, d) __concat4(a, b, c, d)
+
#ifdef __tilegx__
#include <arch/spr_def_64.h>
+
+/* TILE-Gx dependent, protection-level dependent SPRs. */
+
+#define SPR_INTERRUPT_MASK_K \
+ _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_MASK_SET_K \
+ _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_MASK_RESET_K \
+ _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_VECTOR_BASE_K \
+ _concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
+
+#define SPR_IPI_MASK_K \
+ _concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_RESET_K \
+ _concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_SET_K \
+ _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_K \
+ _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_RESET_K \
+ _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_SET_K \
+ _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define INT_IPI_K \
+ _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
+
+#define SPR_SINGLE_STEP_CONTROL_K \
+ _concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
+#define SPR_SINGLE_STEP_EN_K_K \
+ _concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
+#define INT_SINGLE_STEP_K \
+ _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
+
#else
#include <arch/spr_def_32.h>
+
+/* TILEPro dependent, protection-level dependent SPRs. */
+
+#define SPR_INTERRUPT_MASK_K_0 \
+ _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_K_1 \
+ _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTERRUPT_MASK_SET_K_0 \
+ _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_SET_K_1 \
+ _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTERRUPT_MASK_RESET_K_0 \
+ _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_RESET_K_1 \
+ _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
+
#endif
+
+/* Generic protection-level dependent SPRs. */
+
+#define SPR_SYSTEM_SAVE_K_0 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
+#define SPR_SYSTEM_SAVE_K_1 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
+#define SPR_SYSTEM_SAVE_K_2 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
+#define SPR_SYSTEM_SAVE_K_3 \
+ _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
+#define SPR_EX_CONTEXT_K_0 \
+ _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
+#define SPR_EX_CONTEXT_K_1 \
+ _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTCTRL_K_STATUS \
+ _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
+#define INT_INTCTRL_K \
+ _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index b4fc06864df6..bbc1f4c924ee 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -56,58 +56,93 @@
#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
+#define SPR_EX_CONTEXT_2_0 0x4605
+#define SPR_EX_CONTEXT_2_1 0x4606
+#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
+#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
+#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
+#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
+#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
+#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
#define SPR_FAIL 0x4e09
#define SPR_INTCTRL_0_STATUS 0x4a07
#define SPR_INTCTRL_1_STATUS 0x4807
+#define SPR_INTCTRL_2_STATUS 0x4607
#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
#define SPR_INTERRUPT_MASK_0_0 0x4a08
#define SPR_INTERRUPT_MASK_0_1 0x4a09
#define SPR_INTERRUPT_MASK_1_0 0x4809
#define SPR_INTERRUPT_MASK_1_1 0x480a
+#define SPR_INTERRUPT_MASK_2_0 0x4608
+#define SPR_INTERRUPT_MASK_2_1 0x4609
#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
+#define SPR_INTERRUPT_MASK_RESET_2_0 0x460a
+#define SPR_INTERRUPT_MASK_RESET_2_1 0x460b
#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
#define SPR_INTERRUPT_MASK_SET_1_0 0x480d
#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
+#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
+#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
#define SPR_MPL_DMA_CPL_SET_0 0x5800
#define SPR_MPL_DMA_CPL_SET_1 0x5801
+#define SPR_MPL_DMA_CPL_SET_2 0x5802
#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
+#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
+#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
#define SPR_MPL_INTCTRL_1_SET_0 0x4800
#define SPR_MPL_INTCTRL_1_SET_1 0x4801
+#define SPR_MPL_INTCTRL_1_SET_2 0x4802
+#define SPR_MPL_INTCTRL_2_SET_0 0x4600
+#define SPR_MPL_INTCTRL_2_SET_1 0x4601
+#define SPR_MPL_INTCTRL_2_SET_2 0x4602
#define SPR_MPL_SN_ACCESS_SET_0 0x0800
#define SPR_MPL_SN_ACCESS_SET_1 0x0801
+#define SPR_MPL_SN_ACCESS_SET_2 0x0802
#define SPR_MPL_SN_CPL_SET_0 0x5a00
#define SPR_MPL_SN_CPL_SET_1 0x5a01
+#define SPR_MPL_SN_CPL_SET_2 0x5a02
#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
+#define SPR_MPL_SN_FIREWALL_SET_2 0x2c02
#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
+#define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
+#define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
#define SPR_MPL_UDN_AVAIL_SET_0 0x4000
#define SPR_MPL_UDN_AVAIL_SET_1 0x4001
+#define SPR_MPL_UDN_AVAIL_SET_2 0x4002
#define SPR_MPL_UDN_CA_SET_0 0x3c00
#define SPR_MPL_UDN_CA_SET_1 0x3c01
+#define SPR_MPL_UDN_CA_SET_2 0x3c02
#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
+#define SPR_MPL_UDN_COMPLETE_SET_2 0x1402
#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
+#define SPR_MPL_UDN_FIREWALL_SET_2 0x3002
#define SPR_MPL_UDN_REFILL_SET_0 0x1000
#define SPR_MPL_UDN_REFILL_SET_1 0x1001
+#define SPR_MPL_UDN_REFILL_SET_2 0x1002
#define SPR_MPL_UDN_TIMER_SET_0 0x3600
#define SPR_MPL_UDN_TIMER_SET_1 0x3601
+#define SPR_MPL_UDN_TIMER_SET_2 0x3602
#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
+#define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
#define SPR_PASS 0x4e0b
#define SPR_PERF_COUNT_0 0x4205
#define SPR_PERF_COUNT_1 0x4206
#define SPR_PERF_COUNT_CTL 0x4207
+#define SPR_PERF_COUNT_DN_CTL 0x4210
#define SPR_PERF_COUNT_STS 0x4208
#define SPR_PROC_STATUS 0x4f00
#define SPR_SIM_CONTROL 0x4e0c
@@ -124,6 +159,10 @@
#define SPR_SYSTEM_SAVE_1_1 0x4901
#define SPR_SYSTEM_SAVE_1_2 0x4902
#define SPR_SYSTEM_SAVE_1_3 0x4903
+#define SPR_SYSTEM_SAVE_2_0 0x4700
+#define SPR_SYSTEM_SAVE_2_1 0x4701
+#define SPR_SYSTEM_SAVE_2_2 0x4702
+#define SPR_SYSTEM_SAVE_2_3 0x4703
#define SPR_TILE_COORD 0x4c17
#define SPR_TILE_RTF_HWM 0x4e10
#define SPR_TILE_TIMER_CONTROL 0x3205
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
index 758ca4619d50..f18887d82399 100644
--- a/arch/tile/include/asm/backtrace.h
+++ b/arch/tile/include/asm/backtrace.h
@@ -146,7 +146,10 @@ enum {
CALLER_SP_IN_R52_BASE = 4,
- CALLER_SP_OFFSET_BASE = 8
+ CALLER_SP_OFFSET_BASE = 8,
+
+ /* Marks the entry point of certain functions. */
+ ENTRY_POINT_INFO_OP = 16
};
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 8b60ec8b2d19..c3ae570c0a5d 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -216,15 +216,16 @@ struct compat_siginfo;
struct compat_sigaltstack;
long compat_sys_execve(const char __user *path,
const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
+ const compat_uptr_t __user *envp, struct pt_regs *);
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
struct compat_sigaction __user *oact,
size_t sigsetsize);
long compat_sys_rt_sigqueueinfo(int pid, int sig,
struct compat_siginfo __user *uinfo);
-long compat_sys_rt_sigreturn(void);
+long compat_sys_rt_sigreturn(struct pt_regs *);
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr);
+ struct compat_sigaltstack __user *uoss_ptr,
+ struct pt_regs *);
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
@@ -255,4 +256,12 @@ long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
/* Tilera Linux syscalls that don't have "compat" versions. */
#define compat_sys_flush_cache sys_flush_cache
+/* These are the intvec_64.S trampolines. */
+long _compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+ struct compat_sigaltstack __user *uoss_ptr);
+long _compat_sys_rt_sigreturn(void);
+
#endif /* _ASM_TILE_COMPAT_H */
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index d155db6fa9bd..e0f7ee186721 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page)
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kmap_atomic_fix_kpte(struct page *page, int finished);
#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index a11d4837ee4d..641e4ff3d805 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -47,53 +47,53 @@
int __n = (n); \
int __mask = 1 << (__n & 0x1f); \
if (__n < 32) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
else \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
} while (0)
#define interrupt_mask_reset(n) do { \
int __n = (n); \
int __mask = 1 << (__n & 0x1f); \
if (__n < 32) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
else \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
} while (0)
#define interrupt_mask_check(n) ({ \
int __n = (n); \
(((__n < 32) ? \
- __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \
- __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \
+ __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
+ __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
>> (__n & 0x1f)) & 1; \
})
#define interrupt_mask_set_mask(mask) do { \
unsigned long long __m = (mask); \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
} while (0)
#define interrupt_mask_reset_mask(mask) do { \
unsigned long long __m = (mask); \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
} while (0)
#else
#define interrupt_mask_set(n) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n)))
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
#define interrupt_mask_reset(n) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n)))
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
#define interrupt_mask_check(n) \
- ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1)
+ ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
#define interrupt_mask_set_mask(mask) \
- __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask))
+ __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
#define interrupt_mask_reset_mask(mask) \
- __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask))
+ __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
#endif
/*
* The set of interrupts we want active if irqs are enabled.
* Note that in particular, the tile timer interrupt comes and goes
* from this set, since we have no other way to turn off the timer.
- * Likewise, INTCTRL_1 is removed and re-added during device
+ * Likewise, INTCTRL_K is removed and re-added during device
* interrupts, as is the the hardwall UDN_FIREWALL interrupt.
* We use a low bit (MEM_ERROR) as our sentinel value and make sure it
* is always claimed as an "active interrupt" so we can query that bit
@@ -170,14 +170,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
#define IRQS_DISABLED(tmp) \
- mfspr tmp, INTERRUPT_MASK_1; \
+ mfspr tmp, SPR_INTERRUPT_MASK_K; \
andi tmp, tmp, 1
/* Load up a pointer to &interrupts_enabled_mask. */
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
- moveli reg, hw2_last(interrupts_enabled_mask); \
- shl16insli reg, reg, hw1(interrupts_enabled_mask); \
- shl16insli reg, reg, hw0(interrupts_enabled_mask); \
+ moveli reg, hw2_last(interrupts_enabled_mask); \
+ shl16insli reg, reg, hw1(interrupts_enabled_mask); \
+ shl16insli reg, reg, hw0(interrupts_enabled_mask); \
add reg, reg, tp
/* Disable interrupts. */
@@ -185,18 +185,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
- mtspr INTERRUPT_MASK_SET_1, tmp0
+ mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
/* Disable ALL synchronous interrupts (used by NMI entry). */
#define IRQ_DISABLE_ALL(tmp) \
movei tmp, -1; \
- mtspr INTERRUPT_MASK_SET_1, tmp
+ mtspr SPR_INTERRUPT_MASK_SET_K, tmp
/* Enable interrupts. */
#define IRQ_ENABLE(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
ld tmp0, tmp0; \
- mtspr INTERRUPT_MASK_RESET_1, tmp0
+ mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
#else /* !__tilegx__ */
@@ -210,14 +210,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
* (making the original code's write of the "high" mask word idempotent).
*/
#define IRQS_DISABLED(tmp) \
- mfspr tmp, INTERRUPT_MASK_1_0; \
+ mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
shri tmp, tmp, INT_MEM_ERROR; \
andi tmp, tmp, 1
/* Load up a pointer to &interrupts_enabled_mask. */
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
- moveli reg, lo16(interrupts_enabled_mask); \
- auli reg, reg, ha16(interrupts_enabled_mask);\
+ moveli reg, lo16(interrupts_enabled_mask); \
+ auli reg, reg, ha16(interrupts_enabled_mask); \
add reg, reg, tp
/* Disable interrupts. */
@@ -227,16 +227,16 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
}; \
{ \
- mtspr INTERRUPT_MASK_SET_1_0, tmp0; \
+ mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
}; \
- mtspr INTERRUPT_MASK_SET_1_1, tmp1
+ mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
/* Disable ALL synchronous interrupts (used by NMI entry). */
#define IRQ_DISABLE_ALL(tmp) \
movei tmp, -1; \
- mtspr INTERRUPT_MASK_SET_1_0, tmp; \
- mtspr INTERRUPT_MASK_SET_1_1, tmp
+ mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
+ mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
/* Enable interrupts. */
#define IRQ_ENABLE(tmp0, tmp1) \
@@ -246,8 +246,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
addi tmp1, tmp0, 4 \
}; \
lw tmp1, tmp1; \
- mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \
- mtspr INTERRUPT_MASK_RESET_1_1, tmp1
+ mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
+ mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
#endif
/*
diff --git a/arch/tile/include/asm/mman.h b/arch/tile/include/asm/mman.h
index 4c6811e3e8dc..81b8fc348d63 100644
--- a/arch/tile/include/asm/mman.h
+++ b/arch/tile/include/asm/mman.h
@@ -23,6 +23,7 @@
#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x0080 /* do not block on IO */
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_STACK MAP_GROWSDOWN /* provide convenience alias */
#define MAP_LOCKED 0x0200 /* pages are locked */
#define MAP_NORESERVE 0x0400 /* don't check for reservations */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 7d90641cf18d..7979a45430d3 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -199,17 +199,17 @@ static inline __attribute_const__ int get_order(unsigned long size)
* If you want more physical memory than this then see the CONFIG_HIGHMEM
* option in the kernel configuration.
*
- * The top two 16MB chunks in the table below (VIRT and HV) are
- * unavailable to Linux. Since the kernel interrupt vectors must live
- * at 0xfd000000, we map all of the bottom of RAM at this address with
- * a huge page table entry to minimize its ITLB footprint (as well as
- * at PAGE_OFFSET). The last architected requirement is that user
- * interrupt vectors live at 0xfc000000, so we make that range of
- * memory available to user processes. The remaining regions are sized
- * as shown; after the first four addresses, we show "typical" values,
- * since the actual addresses depend on kernel #defines.
+ * The top 16MB chunk in the table below is unavailable to Linux. Since
+ * the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
+ * (depending on whether the kernel is at PL2 or Pl1), we map all of the
+ * bottom of RAM at this address with a huge page table entry to minimize
+ * its ITLB footprint (as well as at PAGE_OFFSET). The last architected
+ * requirement is that user interrupt vectors live at 0xfc000000, so we
+ * make that range of memory available to user processes. The remaining
+ * regions are sized as shown; the first four addresses use the PL 1
+ * values, and after that, we show "typical" values, since the actual
+ * addresses depend on kernel #defines.
*
- * MEM_VIRT_INTRPT 0xff000000
* MEM_HV_INTRPT 0xfe000000
* MEM_SV_INTRPT (kernel code) 0xfd000000
* MEM_USER_INTRPT (user vector) 0xfc000000
@@ -221,9 +221,14 @@ static inline __attribute_const__ int get_order(unsigned long size)
*/
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
+#if CONFIG_KERNEL_PL == 1
#define MEM_SV_INTRPT _AC(0xfd000000, UL)
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
-#define MEM_VIRT_INTRPT _AC(0xff000000, UL)
+#else
+#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
+#define MEM_SV_INTRPT _AC(0xfe000000, UL)
+#define MEM_HV_INTRPT _AC(0xff000000, UL)
+#endif
#define INTRPT_SIZE 0x4000
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index b3367379d537..dc4ccdd855bc 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -347,15 +347,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
#define pte_offset_map(dir, address) \
_pte_offset_map(dir, address, KM_PTE0)
-#define pte_offset_map_nested(dir, address) \
- _pte_offset_map(dir, address, KM_PTE1)
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#endif
/* Clear a non-executable kernel PTE and flush it from the TLB. */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index ccd5f8425688..1747ff3946b2 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -328,18 +328,21 @@ extern int kdata_huge;
* Note that assembly code assumes that USER_PL is zero.
*/
#define USER_PL 0
-#define KERNEL_PL 1
+#if CONFIG_KERNEL_PL == 2
+#define GUEST_PL 1
+#endif
+#define KERNEL_PL CONFIG_KERNEL_PL
-/* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */
+/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
#define CPU_LOG_MASK_VALUE 12
#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
#if CONFIG_NR_CPUS > CPU_MASK_VALUE
# error Too many cpus!
#endif
#define raw_smp_processor_id() \
- ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE)
+ ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
#define get_current_ksp0() \
- (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE)
+ (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
#define next_current_ksp0(task) ({ \
unsigned long __ksp0 = task_ksp0(task); \
int __cpu = raw_smp_processor_id(); \
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 4a02bb073979..ac6d343129d3 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -62,8 +62,8 @@ struct pt_regs {
pt_reg_t lr; /* aliases regs[TREG_LR] */
/* Saved special registers. */
- pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */
- pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */
+ pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
+ pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
pt_reg_t flags; /* flags (see below) */
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index ce99ffefeacf..3b5507c31eae 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -32,8 +32,9 @@ extern void *compat_sys_call_table[];
/*
* Note that by convention, any syscall which requires the current
- * register set takes an additional "struct pt_regs *" pointer; the
- * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx().
+ * register set takes an additional "struct pt_regs *" pointer; a
+ * _sys_xxx() trampoline in intvec*.S just sets up the pointer and
+ * jumps to sys_xxx().
*/
/* kernel/sys.c */
@@ -43,66 +44,17 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi, int advice);
long sys_flush_cache(void);
-long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-#ifdef __tilegx__
-long sys_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t pgoff);
+#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
+#define sys_mmap sys_mmap
#endif
-/* kernel/process.c */
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid);
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid,
- struct pt_regs *regs);
-long sys_fork(void);
-long _sys_fork(struct pt_regs *regs);
-long sys_vfork(void);
-long _sys_vfork(struct pt_regs *regs);
-long sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp);
-long _sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs);
-
-/* kernel/signal.c */
-long sys_sigaltstack(const stack_t __user *, stack_t __user *);
-long _sys_sigaltstack(const stack_t __user *, stack_t __user *,
- struct pt_regs *);
-long sys_rt_sigreturn(void);
-long _sys_rt_sigreturn(struct pt_regs *regs);
-
-/* platform-independent functions */
-long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
-long sys_rt_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact, size_t sigsetsize);
-
#ifndef __tilegx__
/* mm/fault.c */
-int sys_cmpxchg_badaddr(unsigned long address);
-int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+long sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+long _sys_cmpxchg_badaddr(unsigned long address);
#endif
#ifdef CONFIG_COMPAT
-long compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
-long _compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp,
- struct pt_regs *regs);
-long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr);
-long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr,
- struct pt_regs *regs);
-long compat_sys_rt_sigreturn(void);
-long _compat_sys_rt_sigreturn(struct pt_regs *regs);
-
/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
@@ -110,4 +62,15 @@ long sys_truncate64(const char __user *path, loff_t length);
long sys_ftruncate64(unsigned int fd, loff_t length);
#endif
+/* These are the intvec*.S trampolines. */
+long _sys_sigaltstack(const stack_t __user *, stack_t __user *);
+long _sys_rt_sigreturn(void);
+long _sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid);
+long _sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp);
+
+#include <asm-generic/syscalls.h>
+
#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h
index f749be327ce0..5388850deeb2 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/system.h
@@ -89,6 +89,10 @@
#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
#endif
+#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
+int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */
+#endif
+
/* Fence to guarantee visibility of stores to incoherent memory. */
static inline void
mb_incoherent(void)
@@ -97,7 +101,6 @@ mb_incoherent(void)
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
{
- int __mb_incoherent(void);
#if CHIP_HAS_TILE_WRITE_PENDING()
const unsigned long WRITE_TIMEOUT_CYCLES = 400;
unsigned long start = get_cycles_low();
@@ -161,7 +164,7 @@ extern struct task_struct *_switch_to(struct task_struct *prev,
/* Helper function for _switch_to(). */
extern struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next,
- unsigned long new_system_save_1_0);
+ unsigned long new_system_save_k_0);
/* Address that switched-away from tasks are at. */
extern unsigned long get_switch_to_pc(void);
@@ -214,13 +217,6 @@ int hardwall_deactivate(struct task_struct *task);
} while (0)
#endif
-/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
-extern int _sim_syscall(int syscall_num, ...);
-#define sim_syscall(syscall_num, ...) \
- _sim_syscall(SIM_CONTROL_SYSCALL + \
- ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
- ## __VA_ARGS__)
-
/*
* Kernel threads can check to see if they need to migrate their
* stack whenever they return from a context switch; for user
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 432a9c15c8a2..d06e35f57201 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -59,4 +59,8 @@ void do_hardwall_trap(struct pt_regs *, int fault_num);
void do_breakpoint(struct pt_regs *, int fault_num);
+#ifdef __tilegx__
+void gx_singlestep_handle(struct pt_regs *, int fault_num);
+#endif
+
#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 9bd303a141b2..f672544cd4f9 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -1003,37 +1003,37 @@ int hv_console_write(HV_VirtAddr bytes, int len);
* when these occur in a client's interrupt critical section, they must
* be delivered through the downcall mechanism.
*
- * A downcall is initially delivered to the client as an INTCTRL_1
- * interrupt. Upon entry to the INTCTRL_1 vector, the client must
- * immediately invoke the hv_downcall_dispatch service. This service
- * will not return; instead it will cause one of the client's actual
- * downcall-handling interrupt vectors to be entered. The EX_CONTEXT
- * registers in the client will be set so that when the client irets,
- * it will return to the code which was interrupted by the INTCTRL_1
- * interrupt.
- *
- * Under some circumstances, the firing of INTCTRL_1 can race with
+ * A downcall is initially delivered to the client as an INTCTRL_CL
+ * interrupt, where CL is the client's PL. Upon entry to the INTCTRL_CL
+ * vector, the client must immediately invoke the hv_downcall_dispatch
+ * service. This service will not return; instead it will cause one of
+ * the client's actual downcall-handling interrupt vectors to be entered.
+ * The EX_CONTEXT registers in the client will be set so that when the
+ * client irets, it will return to the code which was interrupted by the
+ * INTCTRL_CL interrupt.
+ *
+ * Under some circumstances, the firing of INTCTRL_CL can race with
* the lowering of a device interrupt. In such a case, the
* hv_downcall_dispatch service may issue an iret instruction instead
* of entering one of the client's actual downcall-handling interrupt
* vectors. This will return execution to the location that was
- * interrupted by INTCTRL_1.
+ * interrupted by INTCTRL_CL.
*
* Any saving of registers should be done by the actual handling
- * vectors; no registers should be changed by the INTCTRL_1 handler.
+ * vectors; no registers should be changed by the INTCTRL_CL handler.
* In particular, the client should not use a jal instruction to invoke
* the hv_downcall_dispatch service, as that would overwrite the client's
* lr register. Note that the hv_downcall_dispatch service may overwrite
* one or more of the client's system save registers.
*
- * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor
+ * The client must not modify the INTCTRL_CL_STATUS SPR. The hypervisor
* will set this register to cause a downcall to happen, and will clear
* it when no further downcalls are pending.
*
- * When a downcall vector is entered, the INTCTRL_1 interrupt will be
+ * When a downcall vector is entered, the INTCTRL_CL interrupt will be
* masked. When the client is done processing a downcall, and is ready
* to accept another, it must unmask this interrupt; if more downcalls
- * are pending, this will cause the INTCTRL_1 vector to be reentered.
+ * are pending, this will cause the INTCTRL_CL vector to be reentered.
* Currently the following interrupt vectors can be entered through a
* downcall:
*
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index d3c41c1ff6bd..55a6a74974b4 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -369,6 +369,10 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
/* Weird; reserved value, ignore it. */
continue;
}
+ if (info_operand & ENTRY_POINT_INFO_OP) {
+ /* This info op is ignored by the backtracer. */
+ continue;
+ }
/* Skip info ops which are not in the
* "one_ago" mode we want right now.
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index b1e06d041555..77739cdd9462 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -154,8 +154,14 @@ long tile_compat_sys_msgrcv(int msqid,
#define compat_sys_fstat64 sys_newfstat
#define compat_sys_fstatat64 sys_newfstatat
-/* Pass full 64-bit values through ptrace. */
-#define compat_sys_ptrace tile_compat_sys_ptrace
+/* The native sys_ptrace dynamically handles compat binaries. */
+#define compat_sys_ptrace sys_ptrace
+
+/* Call the trampolines to manage pt_regs where necessary. */
+#define compat_sys_execve _compat_sys_execve
+#define compat_sys_sigaltstack _compat_sys_sigaltstack
+#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
+#define sys_clone _sys_clone
/*
* Note that we can't include <linux/unistd.h> here since the header
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 9c710db43f13..fb64b99959d4 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -256,9 +256,9 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
return err;
}
-long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
- struct compat_sigaltstack __user *uoss_ptr,
- struct pt_regs *regs)
+long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+ struct compat_sigaltstack __user *uoss_ptr,
+ struct pt_regs *regs)
{
stack_t uss, uoss;
int ret;
@@ -291,7 +291,7 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
return ret;
}
-long _compat_sys_rt_sigreturn(struct pt_regs *regs)
+long compat_sys_rt_sigreturn(struct pt_regs *regs)
{
struct compat_rt_sigframe __user *frame =
(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
@@ -312,7 +312,7 @@ long _compat_sys_rt_sigreturn(struct pt_regs *regs)
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
- if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
+ if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
goto badframe;
return r0;
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 3d01383b1b0e..fd8dc42abdcb 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -15,7 +15,9 @@
#include <linux/linkage.h>
#include <linux/unistd.h>
#include <asm/irqflags.h>
+#include <asm/processor.h>
#include <arch/abi.h>
+#include <arch/spr_def.h>
#ifdef __tilegx__
#define bnzt bnezt
@@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr)
{ move r0, lr; jrp lr }
STD_ENDPROC(current_text_addr)
-STD_ENTRY(_sim_syscall)
- /*
- * Wait for r0-r9 to be ready (and lr on the off chance we
- * want the syscall to locate its caller), then make a magic
- * simulator syscall.
- *
- * We carefully stall until the registers are readable in case they
- * are the target of a slow load, etc. so that tile-sim will
- * definitely be able to read all of them inside the magic syscall.
- *
- * Technically this is wrong for r3-r9 and lr, since an interrupt
- * could come in and restore the registers with a slow load right
- * before executing the mtspr. We may need to modify tile-sim to
- * explicitly stall for this case, but we do not yet have
- * a way to implement such a stall.
- */
- { and zero, lr, r9 ; and zero, r8, r7 }
- { and zero, r6, r5 ; and zero, r4, r3 }
- { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
- { jrp lr }
- STD_ENDPROC(_sim_syscall)
-
/*
* Implement execve(). The i386 code has a note that forking from kernel
* space results in no copy on write until the execve, so we should be
@@ -102,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
STD_ENTRY(cpu_idle_on_new_stack)
{
move sp, r1
- mtspr SYSTEM_SAVE_1_0, r2
+ mtspr SPR_SYSTEM_SAVE_K_0, r2
}
jal free_thread_info
j cpu_idle
@@ -124,15 +104,15 @@ STD_ENTRY(smp_nap)
STD_ENTRY(_cpu_idle)
{
lnk r0
- movei r1, 1
+ movei r1, KERNEL_PL
}
{
addli r0, r0, _cpu_idle_nap - .
mtspr INTERRUPT_CRITICAL_SECTION, r1
}
- IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
- mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */
- mtspr EX_CONTEXT_1_0, r0
+ IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
+ mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
+ mtspr SPR_EX_CONTEXT_K_0, r0
iret
.global _cpu_idle_nap
_cpu_idle_nap:
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 2b4f6c091701..90e7c4435693 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -23,6 +23,7 @@
#include <asm/asm-offsets.h>
#include <hv/hypervisor.h>
#include <arch/chip.h>
+#include <arch/spr_def.h>
/*
* This module contains the entry code for kernel images. It performs the
@@ -76,7 +77,7 @@ ENTRY(_start)
}
1:
- /* Get our processor number and save it away in SAVE_1_0. */
+ /* Get our processor number and save it away in SAVE_K_0. */
jal hv_inquire_topology
mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
@@ -124,7 +125,7 @@ ENTRY(_start)
lw r0, r0
lw sp, r1
or r4, sp, r4
- mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */
+ mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
addi sp, sp, -STACK_TOP_DELTA
{
move lr, zero /* stop backtraces in the called function */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 8f58bdff20d7..f5821626247f 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,8 +32,8 @@
# error "No support for kernel preemption currently"
#endif
-#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48
-# error INT_INTCTRL_1 coded to set high interrupt mask
+#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
+# error INT_INTCTRL_K coded to set high interrupt mask
#endif
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
@@ -132,8 +132,8 @@ intvec_\vecname:
/* Temporarily save a register so we have somewhere to work. */
- mtspr SYSTEM_SAVE_1_1, r0
- mfspr r0, EX_CONTEXT_1_1
+ mtspr SPR_SYSTEM_SAVE_K_1, r0
+ mfspr r0, SPR_EX_CONTEXT_K_1
/* The cmpxchg code clears sp to force us to reset it here on fault. */
{
@@ -167,18 +167,18 @@ intvec_\vecname:
* The page_fault handler may be downcalled directly by the
* hypervisor even when Linux is running and has ICS set.
*
- * In this case the contents of EX_CONTEXT_1_1 reflect the
+ * In this case the contents of EX_CONTEXT_K_1 reflect the
* previous fault and can't be relied on to choose whether or
* not to reinitialize the stack pointer. So we add a test
- * to see whether SYSTEM_SAVE_1_2 has the high bit set,
+ * to see whether SYSTEM_SAVE_K_2 has the high bit set,
* and if so we don't reinitialize sp, since we must be coming
* from Linux. (In fact the precise case is !(val & ~1),
* but any Linux PC has to have the high bit set.)
*
- * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for
+ * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
* any path that turns into a downcall to one of our TLB handlers.
*/
- mfspr r0, SYSTEM_SAVE_1_2
+ mfspr r0, SPR_SYSTEM_SAVE_K_2
{
blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
move r0, sp
@@ -187,12 +187,12 @@ intvec_\vecname:
2:
/*
- * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and
+ * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
* the current stack top in the higher bits. So we recover
* our stack top by just masking off the low bits, then
* point sp at the top aligned address on the actual stack page.
*/
- mfspr r0, SYSTEM_SAVE_1_0
+ mfspr r0, SPR_SYSTEM_SAVE_K_0
mm r0, r0, zero, LOG2_THREAD_SIZE, 31
0:
@@ -254,7 +254,7 @@ intvec_\vecname:
sw sp, r3
addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
}
- mfspr r0, EX_CONTEXT_1_0
+ mfspr r0, SPR_EX_CONTEXT_K_0
.ifc \processing,handle_syscall
/*
* Bump the saved PC by one bundle so that when we return, we won't
@@ -267,7 +267,7 @@ intvec_\vecname:
sw sp, r0
addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
}
- mfspr r0, EX_CONTEXT_1_1
+ mfspr r0, SPR_EX_CONTEXT_K_1
{
sw sp, r0
addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
@@ -289,7 +289,7 @@ intvec_\vecname:
.endif
addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
}
- mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */
+ mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
{
sw sp, r0
addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
@@ -309,12 +309,12 @@ intvec_\vecname:
* See discussion below at "finish_interrupt_save".
*/
.ifc \c_routine, do_page_fault
- mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */
- mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */
+ mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
+ mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
.else
.ifc \vecnum, INT_DOUBLE_FAULT
{
- mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */
+ mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
movei r3, 0
}
.else
@@ -467,7 +467,7 @@ intvec_\vecname:
/* Load tp with our per-cpu offset. */
#ifdef CONFIG_SMP
{
- mfspr r20, SYSTEM_SAVE_1_0
+ mfspr r20, SPR_SYSTEM_SAVE_K_0
moveli r21, lo16(__per_cpu_offset)
}
{
@@ -487,7 +487,7 @@ intvec_\vecname:
* We load flags in r32 here so we can jump to .Lrestore_regs
* directly after do_page_fault_ics() if necessary.
*/
- mfspr r32, EX_CONTEXT_1_1
+ mfspr r32, SPR_EX_CONTEXT_K_1
{
andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
@@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
{
- mtspr EX_CONTEXT_1_0, r21
+ mtspr SPR_EX_CONTEXT_K_0, r21
move r5, zero
}
{
- mtspr EX_CONTEXT_1_1, lr
+ mtspr SPR_EX_CONTEXT_K_1, lr
andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
}
@@ -1020,7 +1020,7 @@ STD_ENTRY(interrupt_return)
/* Set r1 to errno if we are returning an error, otherwise zero. */
{
- moveli r29, 1024
+ moveli r29, 4096
sub r1, zero, r0
}
slt_u r29, r1, r29
@@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
STD_ENDPROC(interrupt_return)
/*
- * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit
+ * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
* before returning, so we can properly get more downcalls.
*/
.pushsection .text.handle_interrupt_downcall,"ax"
@@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
check_single_stepping normal, .Ldispatch_downcall
.Ldispatch_downcall:
- /* Clear INTCTRL_1 from the set of interrupts we ever enable. */
+ /* Clear INTCTRL_K from the set of interrupts we ever enable. */
GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
{
addi r30, r30, 4
- movei r31, INT_MASK(INT_INTCTRL_1)
+ movei r31, INT_MASK(INT_INTCTRL_K)
}
{
lw r20, r30
@@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
}
FEEDBACK_REENTER(handle_interrupt_downcall)
- /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */
+ /* Allow INTCTRL_K to be enabled next time we enable interrupts. */
lw r20, r30
or r20, r20, r31
sw r30, r20
@@ -1472,7 +1472,12 @@ handle_ill:
lw r26, r24
sw r28, r26
- /* Clear TIF_SINGLESTEP */
+ /*
+ * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
+ * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
+ * need to clear it here and can't really impose on all other arches.
+ * So what's another write between friends?
+ */
GET_THREAD_INFO(r0)
addi r1, r0, THREAD_INFO_FLAGS_OFFSET
@@ -1509,7 +1514,7 @@ handle_ill:
/* Various stub interrupt handlers and syscall handlers */
STD_ENTRY_LOCAL(_kernel_double_fault)
- mfspr r1, EX_CONTEXT_1_0
+ mfspr r1, SPR_EX_CONTEXT_K_0
move r2, lr
move r3, sp
move r4, r52
@@ -1518,34 +1523,29 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
STD_ENDPROC(_kernel_double_fault)
STD_ENTRY_LOCAL(bad_intr)
- mfspr r2, EX_CONTEXT_1_0
+ mfspr r2, SPR_EX_CONTEXT_K_0
panic "Unhandled interrupt %#x: PC %#lx"
STD_ENDPROC(bad_intr)
/* Put address of pt_regs in reg and jump. */
#define PTREGS_SYSCALL(x, reg) \
- STD_ENTRY(x); \
+ STD_ENTRY(_##x); \
{ \
PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
- j _##x \
+ j x \
}; \
- STD_ENDPROC(x)
+ STD_ENDPROC(_##x)
PTREGS_SYSCALL(sys_execve, r3)
PTREGS_SYSCALL(sys_sigaltstack, r2)
PTREGS_SYSCALL(sys_rt_sigreturn, r0)
+PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
-/* Save additional callee-saves to pt_regs, put address in reg and jump. */
-#define PTREGS_SYSCALL_ALL_REGS(x, reg) \
- STD_ENTRY(x); \
- push_extra_callee_saves reg; \
- j _##x; \
- STD_ENDPROC(x)
-
-PTREGS_SYSCALL_ALL_REGS(sys_fork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_clone, r4)
-PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
+/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
+STD_ENTRY(_sys_clone)
+ push_extra_callee_saves r4
+ j sys_clone
+ STD_ENDPROC(_sys_clone)
/*
* This entrypoint is taken for the cmpxchg and atomic_update fast
@@ -1558,12 +1558,14 @@ PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
* to be available to it on entry. It does not modify any callee-save
* registers (including "lr"). It does not check what PL it is being
* called at, so you'd better not call it other than at PL0.
+ * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
+ * it ever is necessary to use more registers, be aware.
*
* It does not use the stack, but since it might be re-interrupted by
* a page fault which would assume the stack was valid, it does
* save/restore the stack pointer and zero it out to make sure it gets reset.
* Since we always keep interrupts disabled, the hypervisor won't
- * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them
+ * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
* (other than to advance the PC on return).
*
* We have to manually validate the user vs kernel address range
@@ -1769,7 +1771,7 @@ ENTRY(sys_cmpxchg)
/* Do slow mtspr here so the following "mf" waits less. */
{
move sp, r27
- mtspr EX_CONTEXT_1_0, r28
+ mtspr SPR_EX_CONTEXT_K_0, r28
}
mf
@@ -1788,7 +1790,7 @@ ENTRY(sys_cmpxchg)
}
{
move sp, r27
- mtspr EX_CONTEXT_1_0, r28
+ mtspr SPR_EX_CONTEXT_K_0, r28
}
iret
@@ -1816,7 +1818,7 @@ ENTRY(sys_cmpxchg)
#endif
/* Issue the slow SPR here while the tns result is in flight. */
- mfspr r28, EX_CONTEXT_1_0
+ mfspr r28, SPR_EX_CONTEXT_K_0
{
addi r28, r28, 8 /* return to the instruction after the swint1 */
@@ -1904,7 +1906,7 @@ ENTRY(sys_cmpxchg)
.Lcmpxchg64_mismatch:
{
move sp, r27
- mtspr EX_CONTEXT_1_0, r28
+ mtspr SPR_EX_CONTEXT_K_0, r28
}
mf
{
@@ -1985,8 +1987,13 @@ int_unalign:
int_hand INT_PERF_COUNT, PERF_COUNT, \
op_handle_perf_interrupt, handle_nmi
int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
+#if CONFIG_KERNEL_PL == 2
+ dc_dispatch INT_INTCTRL_2, INTCTRL_2
+ int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
+#else
int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
dc_dispatch INT_INTCTRL_1, INTCTRL_1
+#endif
int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
hv_message_intr, handle_interrupt_downcall
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 9a27d563fc30..e63917687e99 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
#if CHIP_HAS_IPI()
/* Use SPRs to manipulate device interrupts. */
-#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask)
-#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask)
-#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask)
+#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
+#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
+#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
#else
/* Use HV to manipulate device interrupts. */
#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
@@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* masked by a previous interrupt. Then, mask out the ones
* we're going to handle.
*/
- unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
- original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
- __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
+ unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
+ original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
+ __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
#else
/*
* Hypervisor performs the equivalent of the Gx code above and
* then puts the pending interrupt mask into a system save reg
* for us to find.
*/
- original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
+ original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
#endif
remaining_irqs = original_irqs;
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
/* Enable interrupt delivery. */
unmask_irqs(~0UL);
#if CHIP_HAS_IPI()
- raw_local_irq_unmask(INT_IPI_1);
+ raw_local_irq_unmask(INT_IPI_K);
#endif
}
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 6d23ed271d10..997e3933f726 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
panic("hv_register_message_state: error %d", rc);
/* Make sure downcall interrupts will be enabled. */
- raw_local_irq_unmask(INT_INTCTRL_1);
+ raw_local_irq_unmask(INT_INTCTRL_K);
}
void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 84c29111756c..8430f45daea6 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -214,9 +214,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
/*
* Copy the callee-saved registers from the passed pt_regs struct
* into the context-switch callee-saved registers area.
- * We have to restore the callee-saved registers since we may
- * be cloning a userspace task with userspace register state,
- * and we won't be unwinding the same kernel frames to restore them.
+ * This way when we start the interrupt-return sequence, the
+ * callee-save registers will be correctly in registers, which
+ * is how we assume the compiler leaves them as we start doing
+ * the normal return-from-interrupt path after calling C code.
* Zero out the C ABI save area to mark the top of the stack.
*/
ksp = (unsigned long) childregs;
@@ -304,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
/* Allow user processes to access the DMA SPRs */
void grant_dma_mpls(void)
{
+#if CONFIG_KERNEL_PL == 2
+ __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
+ __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#else
__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
+#endif
}
/* Forbid user processes from accessing the DMA SPRs */
void restrict_dma_mpls(void)
{
+#if CONFIG_KERNEL_PL == 2
+ __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
+ __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
+#else
__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#endif
}
/* Pause the DMA engine, then save off its state registers. */
@@ -523,19 +534,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
* Switch kernel SP, PC, and callee-saved registers.
* In the context of the new task, return the old task pointer
* (i.e. the task that actually called __switch_to).
- * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp.
+ * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
*/
return __switch_to(prev, next, next_current_ksp0(next));
}
-long _sys_fork(struct pt_regs *regs)
-{
- return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
-}
-
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tidptr, void __user *child_tidptr,
- struct pt_regs *regs)
+SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
+ void __user *, parent_tidptr, void __user *, child_tidptr,
+ struct pt_regs *, regs)
{
if (!newsp)
newsp = regs->sp;
@@ -543,18 +549,13 @@ long _sys_clone(unsigned long clone_flags, unsigned long newsp,
parent_tidptr, child_tidptr);
}
-long _sys_vfork(struct pt_regs *regs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
- regs, 0, NULL, NULL);
-}
-
/*
* sys_execve() executes a new program.
*/
-long _sys_execve(const char __user *path,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs)
+SYSCALL_DEFINE4(execve, const char __user *, path,
+ const char __user *const __user *, argv,
+ const char __user *const __user *, envp,
+ struct pt_regs *, regs)
{
long error;
char *filename;
@@ -570,9 +571,10 @@ out:
}
#ifdef CONFIG_COMPAT
-long _compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp, struct pt_regs *regs)
+long compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp,
+ struct pt_regs *regs)
{
long error;
char *filename;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 7161bd03d2fd..9cd29884c09f 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -32,25 +32,6 @@ void user_disable_single_step(struct task_struct *child)
}
/*
- * This routine will put a word on the process's privileged stack.
- */
-static void putreg(struct task_struct *task,
- unsigned long addr, unsigned long value)
-{
- unsigned int regno = addr / sizeof(unsigned long);
- struct pt_regs *childregs = task_pt_regs(task);
- childregs->regs[regno] = value;
- childregs->flags |= PT_FLAGS_RESTORE_REGS;
-}
-
-static unsigned long getreg(struct task_struct *task, unsigned long addr)
-{
- unsigned int regno = addr / sizeof(unsigned long);
- struct pt_regs *childregs = task_pt_regs(task);
- return childregs->regs[regno];
-}
-
-/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
@@ -64,61 +45,77 @@ void ptrace_disable(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
- unsigned long __user *datap;
+ unsigned long __user *datap = (long __user __force *)data;
unsigned long tmp;
int i;
long ret = -EIO;
-
-#ifdef CONFIG_COMPAT
- if (task_thread_info(current)->status & TS_COMPAT)
- data = (u32)data;
- if (task_thread_info(child)->status & TS_COMPAT)
- addr = (u32)addr;
-#endif
- datap = (unsigned long __user __force *)data;
+ unsigned long *childregs;
+ char *childreg;
switch (request) {
case PTRACE_PEEKUSR: /* Read register from pt_regs. */
- if (addr & (sizeof(data)-1))
- break;
- if (addr < 0 || addr >= PTREGS_SIZE)
+ if (addr >= PTREGS_SIZE)
break;
- tmp = getreg(child, addr); /* Read register */
- ret = put_user(tmp, datap);
+ childreg = (char *)task_pt_regs(child) + addr;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ if (addr & (sizeof(compat_long_t)-1))
+ break;
+ ret = put_user(*(compat_long_t *)childreg,
+ (compat_long_t __user *)datap);
+ } else
+#endif
+ {
+ if (addr & (sizeof(long)-1))
+ break;
+ ret = put_user(*(long *)childreg, datap);
+ }
break;
case PTRACE_POKEUSR: /* Write register in pt_regs. */
- if (addr & (sizeof(data)-1))
- break;
- if (addr < 0 || addr >= PTREGS_SIZE)
+ if (addr >= PTREGS_SIZE)
break;
- putreg(child, addr, data); /* Write register */
+ childreg = (char *)task_pt_regs(child) + addr;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ if (addr & (sizeof(compat_long_t)-1))
+ break;
+ *(compat_long_t *)childreg = data;
+ } else
+#endif
+ {
+ if (addr & (sizeof(long)-1))
+ break;
+ *(long *)childreg = data;
+ }
ret = 0;
break;
case PTRACE_GETREGS: /* Get all registers from the child. */
if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
break;
- for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
- ret = __put_user(getreg(child, i), datap);
+ childregs = (long *)task_pt_regs(child);
+ for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
+ ++i) {
+ ret = __put_user(childregs[i], &datap[i]);
if (ret != 0)
break;
- datap++;
}
break;
case PTRACE_SETREGS: /* Set all registers in the child. */
if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
break;
- for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
- ret = __get_user(tmp, datap);
+ childregs = (long *)task_pt_regs(child);
+ for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
+ ++i) {
+ ret = __get_user(childregs[i], &datap[i]);
if (ret != 0)
break;
- putreg(child, i, tmp);
- datap++;
}
break;
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
index e88d6e122783..caa13101c264 100644
--- a/arch/tile/kernel/regs_32.S
+++ b/arch/tile/kernel/regs_32.S
@@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
{
/* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
move sp, r13
- mtspr SYSTEM_SAVE_1_0, r2
+ mtspr SPR_SYSTEM_SAVE_K_0, r2
}
FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
.L__switch_to_pc:
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index e7d54c73d5c1..ae51cad12da0 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -30,8 +30,6 @@
#include <linux/timex.h>
#include <asm/setup.h>
#include <asm/sections.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@@ -187,11 +185,11 @@ early_param("vmalloc", parse_vmalloc);
#ifdef CONFIG_HIGHMEM
/*
- * Determine for each controller where its lowmem is mapped and how
- * much of it is mapped there. On controller zero, the first few
- * megabytes are mapped at 0xfd000000 as code, so in principle we
- * could start our data mappings higher up, but for now we don't
- * bother, to avoid additional confusion.
+ * Determine for each controller where its lowmem is mapped and how much of
+ * it is mapped there. On controller zero, the first few megabytes are
+ * already mapped in as code at MEM_SV_INTRPT, so in principle we could
+ * start our data mappings higher up, but for now we don't bother, to avoid
+ * additional confusion.
*
* One question is whether, on systems with more than 768 Mb and
* controllers of different sizes, to map in a proportionate amount of
@@ -311,7 +309,7 @@ static void __init setup_memory(void)
#endif
/* We are using a char to hold the cpu_2_node[] mapping */
- BUG_ON(MAX_NUMNODES > 127);
+ BUILD_BUG_ON(MAX_NUMNODES > 127);
/* Discover the ranges of memory available to us */
for (i = 0; ; ++i) {
@@ -876,6 +874,9 @@ void __cpuinit setup_cpu(int boot)
#if CHIP_HAS_SN_PROC()
raw_local_irq_unmask(INT_SNITLB_MISS);
#endif
+#ifdef __tilegx__
+ raw_local_irq_unmask(INT_SINGLE_STEP_K);
+#endif
/*
* Allow user access to many generic SPRs, like the cycle
@@ -893,11 +894,12 @@ void __cpuinit setup_cpu(int boot)
#endif
/*
- * Set the MPL for interrupt control 0 to user level.
- * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs,
- * as well as the PL 0 interrupt mask.
+ * Set the MPL for interrupt control 0 & 1 to the corresponding
+ * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
+ * SPRs, as well as the interrupt mask.
*/
__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
+ __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
/* Initialize IRQ support for this cpu. */
setup_irq_regs();
@@ -1033,7 +1035,7 @@ static void __init validate_va(void)
* In addition, make sure we CAN'T use the end of memory, since
* we use the last chunk of each pgd for the pgd_list.
*/
- int i, fc_fd_ok = 0;
+ int i, user_kernel_ok = 0;
unsigned long max_va = 0;
unsigned long list_va =
((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1046,13 @@ static void __init validate_va(void)
break;
if (range.start <= MEM_USER_INTRPT &&
range.start + range.size >= MEM_HV_INTRPT)
- fc_fd_ok = 1;
+ user_kernel_ok = 1;
if (range.start == 0)
max_va = range.size;
BUG_ON(range.start + range.size > list_va);
}
- if (!fc_fd_ok)
- early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n");
+ if (!user_kernel_ok)
+ early_panic("Hypervisor not configured for user/kernel VAs\n");
if (max_va == 0)
early_panic("Hypervisor not configured for low VAs\n");
if (max_va < KERNEL_HIGH_VADDR)
@@ -1334,6 +1336,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
pte_t *pte;
BUG_ON(pgd_addr_invalid(addr));
+ if (addr < VMALLOC_START || addr >= VMALLOC_END)
+ panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
+ " try increasing CONFIG_VMALLOC_RESERVE\n",
+ addr, VMALLOC_START, VMALLOC_END);
pgd = swapper_pg_dir + pgd_index(addr);
pud = pud_offset(pgd, addr);
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index ce183aa1492c..fb28e85ae3ae 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -41,8 +41,8 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-long _sys_sigaltstack(const stack_t __user *uss,
- stack_t __user *uoss, struct pt_regs *regs)
+SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
+ stack_t __user *, uoss, struct pt_regs *, regs)
{
return do_sigaltstack(uss, uoss, regs->sp);
}
@@ -78,7 +78,7 @@ int restore_sigcontext(struct pt_regs *regs,
}
/* sigreturn() returns long since it restores r0 in the interrupted code. */
-long _sys_rt_sigreturn(struct pt_regs *regs)
+SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
{
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(regs->sp);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 5ec4b9c651f2..1eb3b39e36c7 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -15,7 +15,7 @@
* Derived from iLib's single-stepping code.
*/
-#ifndef __tilegx__ /* No support for single-step yet. */
+#ifndef __tilegx__ /* Hardware support for single step unavailable. */
/* These functions are only used on the TILE platform */
#include <linux/slab.h>
@@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
regs->pc += 8;
}
+#else
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <arch/spr_def.h>
+
+static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
+
+
+/*
+ * Called directly on the occasion of an interrupt.
+ *
+ * If the process doesn't have single step set, then we use this as an
+ * opportunity to turn single step off.
+ *
+ * It has been mentioned that we could conditionally turn off single stepping
+ * on each entry into the kernel and rely on single_step_once to turn it
+ * on for the processes that matter (as we already do), but this
+ * implementation is somewhat more efficient in that we muck with registers
+ * once on a bum interrupt rather than on every entry into the kernel.
+ *
+ * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
+ * so we have to run through this process again before we can say that an
+ * instruction has executed.
+ *
+ * swint will set CANCELED, but it's a legitimate instruction. Fortunately
+ * it changes the PC. If it hasn't changed, then we know that the interrupt
+ * wasn't generated by swint and we'll need to run this process again before
+ * we can say an instruction has executed.
+ *
+ * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
+ * on with our lives.
+ */
+
+void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
+{
+ unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ struct thread_info *info = (void *)current_thread_info();
+ int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
+ unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+ if (is_single_step == 0) {
+ __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
+
+ } else if ((*ss_pc != regs->pc) ||
+ (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
+
+ ptrace_notify(SIGTRAP);
+ control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+ control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+ __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+ }
+}
+
+
+/*
+ * Called from need_singlestep. Set up the control registers and the enable
+ * register, then return back.
+ */
+
+void single_step_once(struct pt_regs *regs)
+{
+ unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+ *ss_pc = regs->pc;
+ control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+ control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+ __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+ __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
+}
+
#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 1cb5ec79de04..75255d90aff3 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -212,7 +212,7 @@ void __init ipi_init(void)
tile.x = cpu_x(cpu);
tile.y = cpu_y(cpu);
- if (hv_get_ipi_pte(tile, 1, &pte) != 0)
+ if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
panic("Failed to initialize IPI for cpu %d\n", cpu);
offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index ea2e0ce28380..0d54106be3d6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -30,6 +30,10 @@
#include <arch/abi.h>
#include <arch/interrupts.h>
+#define KBT_ONGOING 0 /* Backtrace still ongoing */
+#define KBT_DONE 1 /* Backtrace cleanly completed */
+#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
+#define KBT_LOOP 3 /* Backtrace entered a loop */
/* Is address on the specified kernel stack? */
static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
@@ -207,11 +211,11 @@ static int KBacktraceIterator_next_item_inclusive(
for (;;) {
do {
if (!KBacktraceIterator_is_sigreturn(kbt))
- return 1;
+ return KBT_ONGOING;
} while (backtrace_next(&kbt->it));
if (!KBacktraceIterator_restart(kbt))
- return 0;
+ return KBT_DONE;
}
}
@@ -264,7 +268,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
kbt->pgtable = NULL;
kbt->verbose = 0; /* override in caller if desired */
kbt->profile = 0; /* override in caller if desired */
- kbt->end = 0;
+ kbt->end = KBT_ONGOING;
kbt->new_context = 0;
if (is_current) {
HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
@@ -290,7 +294,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
if (regs == NULL) {
if (is_current || t->state == TASK_RUNNING) {
/* Can't do this; we need registers */
- kbt->end = 1;
+ kbt->end = KBT_RUNNING;
return;
}
pc = get_switch_to_pc();
@@ -305,26 +309,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
}
backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
- kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+ kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
}
EXPORT_SYMBOL(KBacktraceIterator_init);
int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
{
- return kbt->end;
+ return kbt->end != KBT_ONGOING;
}
EXPORT_SYMBOL(KBacktraceIterator_end);
void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
{
+ VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
kbt->new_context = 0;
- if (!backtrace_next(&kbt->it) &&
- !KBacktraceIterator_restart(kbt)) {
- kbt->end = 1;
- return;
- }
-
- kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+ if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
+ kbt->end = KBT_DONE;
+ return;
+ }
+ kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
+ if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
+ /* Trapped in a loop; give up. */
+ kbt->end = KBT_LOOP;
+ }
}
EXPORT_SYMBOL(KBacktraceIterator_next);
@@ -387,6 +394,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
break;
}
}
+ if (kbt->end == KBT_LOOP)
+ pr_err("Stack dump stopped; next frame identical to this one\n");
if (headers)
pr_err("Stack dump complete\n");
}
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index f0f87eab8c39..7e764669a022 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -110,6 +110,15 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
#define sys_sync_file_range sys_sync_file_range2
#endif
+/* Call the trampolines to manage pt_regs where necessary. */
+#define sys_execve _sys_execve
+#define sys_sigaltstack _sys_sigaltstack
+#define sys_rt_sigreturn _sys_rt_sigreturn
+#define sys_clone _sys_clone
+#ifndef __tilegx__
+#define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr
+#endif
+
/*
* Note that we can't include <linux/unistd.h> here since the header
* guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 0f362dc2c57f..5474fc2e77e8 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -260,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
address = regs->pc;
break;
case INT_UNALIGN_DATA:
-#ifndef __tilegx__ /* FIXME: GX: no single-step yet */
+#ifndef __tilegx__ /* Emulated support for single step debugging */
if (unaligned_fixup >= 0) {
struct single_step_state *state =
current_thread_info()->step_state;
@@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
case INT_DOUBLE_FAULT:
/*
* For double fault, "reason" is actually passed as
- * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so
+ * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
* we can provide the original fault number rather than
* the uninteresting "INT_DOUBLE_FAULT" so the user can
* learn what actually struck while PL0 ICS was set.
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
new file mode 100644
index 000000000000..b88f9c047781
--- /dev/null
+++ b/arch/tile/kvm/Kconfig
@@ -0,0 +1,38 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM && MODULES && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ ---help---
+ Support hosting paravirtualized guest machines.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ To compile this as a module, choose M here: the module
+ will be called kvm.
+
+ If unsure, say N.
+
+source drivers/vhost/Kconfig
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 746dc81ed3c4..93122d5b1558 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -3,8 +3,8 @@
#
lib-y = cacheflush.o checksum.o cpumask.o delay.o \
- mb_incoherent.o uaccess.o \
- memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \
+ mb_incoherent.o uaccess.o memmove.o \
+ memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
strchr_$(BITS).o strlen_$(BITS).o
ifeq ($(CONFIG_TILEGX),y)
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 8040b42a8eea..7a5cc706ab62 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -300,7 +300,7 @@ void __init __init_atomic_per_cpu(void)
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* Validate power-of-two and "bigger than cpus" assumption */
- BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
+ BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
/*
@@ -314,17 +314,17 @@ void __init __init_atomic_per_cpu(void)
BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
/* The locks must all fit on one page. */
- BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
+ BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
/*
* We use the page offset of the atomic value's address as
* an index into atomic_locks, excluding the low 3 bits.
* That should not produce more indices than ATOMIC_HASH_SIZE.
*/
- BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
+ BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* The futex code makes this assumption, so we validate it here. */
- BUG_ON(sizeof(atomic_t) != sizeof(int));
+ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
}
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index ce5dbf56578f..1509c5597653 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -45,6 +45,9 @@ EXPORT_SYMBOL(__copy_from_user_zeroing);
EXPORT_SYMBOL(__copy_in_user_inatomic);
#endif
+/* arch/tile/lib/mb_incoherent.S */
+EXPORT_SYMBOL(__mb_incoherent);
+
/* hypervisor glue */
#include <hv/hypervisor.h>
EXPORT_SYMBOL(hv_dev_open);
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index 30c3b7ebb55d..2a419a6122db 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -10,14 +10,16 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
- *
- * This file shares the implementation of the userspace memcpy and
- * the kernel's memcpy, copy_to_user and copy_from_user.
*/
#include <arch/chip.h>
+/*
+ * This file shares the implementation of the userspace memcpy and
+ * the kernel's memcpy, copy_to_user and copy_from_user.
+ */
+
#include <linux/linkage.h>
/* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
@@ -53,9 +55,9 @@
*/
ENTRY(__copy_from_user_inatomic)
.type __copy_from_user_inatomic, @function
- FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
+ FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
.text.memcpy_common, \
- .Lend_memcpy_common - __copy_from_user_inatomic)
+ .Lend_memcpy_common - __copy_from_user_inatomic)
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
.size __copy_from_user_inatomic, . - __copy_from_user_inatomic
@@ -64,7 +66,7 @@ ENTRY(__copy_from_user_inatomic)
*/
ENTRY(__copy_from_user_zeroing)
.type __copy_from_user_zeroing, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
@@ -74,13 +76,13 @@ ENTRY(__copy_from_user_zeroing)
*/
ENTRY(__copy_to_user_inatomic)
.type __copy_to_user_inatomic, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
.size __copy_to_user_inatomic, . - __copy_to_user_inatomic
ENTRY(memcpy)
.type memcpy, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(__copy_from_user_inatomic)
{ movei r29, IS_MEMCPY }
.size memcpy, . - memcpy
/* Fall through */
@@ -157,35 +159,35 @@ EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
{ addi r3, r1, 60; andi r9, r9, -64 }
#if CHIP_HAS_WH64()
- /* No need to prefetch dst, we'll just do the wh64
- * right before we copy a line.
+ /* No need to prefetch dst, we'll just do the wh64
+ * right before we copy a line.
*/
#endif
EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, .; move r27, lr }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, .; move r27, lr }
EX: { lw r6, r3; addi r3, r3, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
EX: { lw r7, r3; addi r3, r3, 64 }
#if !CHIP_HAS_WH64()
- /* Prefetch the dest */
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
- /* Use a real load to cause a TLB miss if necessary. We aren't using
- * r28, so this should be fine.
- */
+ /* Prefetch the dest */
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
+ /* Use a real load to cause a TLB miss if necessary. We aren't using
+ * r28, so this should be fine.
+ */
EX: { lw r28, r9; addi r9, r9, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
- { prefetch r9; addi r9, r9, 64 }
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bnzt zero, . }
- { prefetch r9; addi r9, r9, 64 }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
+ { prefetch r9; addi r9, r9, 64 }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bnzt zero, . }
+ { prefetch r9; addi r9, r9, 64 }
#endif
- /* Intentionally stall for a few cycles to leave L2 cache alone. */
- { bz zero, .Lbig_loop2 }
+ /* Intentionally stall for a few cycles to leave L2 cache alone. */
+ { bz zero, .Lbig_loop2 }
/* On entry to this loop:
* - r0 points to the start of dst line 0
@@ -197,7 +199,7 @@ EX: { lw r28, r9; addi r9, r9, 64 }
* to some "safe" recently loaded address.
* - r5 contains *(r1 + 60) [i.e. last word of source line 0]
* - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1]
- * - r9 contains ((r0 + 63) & -64)
+ * - r9 contains ((r0 + 63) & -64)
* [start of next dst cache line.]
*/
@@ -208,137 +210,137 @@ EX: { lw r28, r9; addi r9, r9, 64 }
/* Copy line 0, first stalling until r5 is ready. */
EX: { move r12, r5; lw r16, r1 }
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
+ /* Prefetch several lines ahead. */
EX: { lw r5, r3; addi r3, r3, 64 }
- { jal .Lcopy_line }
+ { jal .Lcopy_line }
/* Copy line 1, first stalling until r6 is ready. */
EX: { move r12, r6; lw r16, r1 }
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
+ /* Prefetch several lines ahead. */
EX: { lw r6, r3; addi r3, r3, 64 }
{ jal .Lcopy_line }
/* Copy line 2, first stalling until r7 is ready. */
EX: { move r12, r7; lw r16, r1 }
{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
- /* Prefetch several lines ahead. */
+ /* Prefetch several lines ahead. */
EX: { lw r7, r3; addi r3, r3, 64 }
- /* Use up a caches-busy cycle by jumping back to the top of the
- * loop. Might as well get it out of the way now.
- */
- { j .Lbig_loop }
+ /* Use up a caches-busy cycle by jumping back to the top of the
+ * loop. Might as well get it out of the way now.
+ */
+ { j .Lbig_loop }
/* On entry:
* - r0 points to the destination line.
* - r1 points to the source line.
- * - r3 is the next prefetch address.
+ * - r3 is the next prefetch address.
* - r9 holds the last address used for wh64.
* - r12 = WORD_15
- * - r16 = WORD_0.
- * - r17 == r1 + 16.
- * - r27 holds saved lr to restore.
+ * - r16 = WORD_0.
+ * - r17 == r1 + 16.
+ * - r27 holds saved lr to restore.
*
* On exit:
* - r0 is incremented by 64.
* - r1 is incremented by 64, unless that would point to a word
- * beyond the end of the source array, in which case it is redirected
- * to point to an arbitrary word already in the cache.
+ * beyond the end of the source array, in which case it is redirected
+ * to point to an arbitrary word already in the cache.
* - r2 is decremented by 64.
- * - r3 is unchanged, unless it points to a word beyond the
- * end of the source array, in which case it is redirected
- * to point to an arbitrary word already in the cache.
- * Redirecting is OK since if we are that close to the end
- * of the array we will not come back to this subroutine
- * and use the contents of the prefetched address.
+ * - r3 is unchanged, unless it points to a word beyond the
+ * end of the source array, in which case it is redirected
+ * to point to an arbitrary word already in the cache.
+ * Redirecting is OK since if we are that close to the end
+ * of the array we will not come back to this subroutine
+ * and use the contents of the prefetched address.
* - r4 is nonzero iff r2 >= 64.
- * - r9 is incremented by 64, unless it points beyond the
- * end of the last full destination cache line, in which
- * case it is redirected to a "safe address" that can be
- * clobbered (sp - 64)
+ * - r9 is incremented by 64, unless it points beyond the
+ * end of the last full destination cache line, in which
+ * case it is redirected to a "safe address" that can be
+ * clobbered (sp - 64)
* - lr contains the value in r27.
*/
/* r26 unused */
.Lcopy_line:
- /* TODO: when r3 goes past the end, we would like to redirect it
- * to prefetch the last partial cache line (if any) just once, for the
- * benefit of the final cleanup loop. But we don't want to
- * prefetch that line more than once, or subsequent prefetches
- * will go into the RTF. But then .Lbig_loop should unconditionally
- * branch to top of loop to execute final prefetch, and its
- * nop should become a conditional branch.
- */
-
- /* We need two non-memory cycles here to cover the resources
- * used by the loads initiated by the caller.
- */
- { add r15, r1, r2 }
+ /* TODO: when r3 goes past the end, we would like to redirect it
+ * to prefetch the last partial cache line (if any) just once, for the
+ * benefit of the final cleanup loop. But we don't want to
+ * prefetch that line more than once, or subsequent prefetches
+ * will go into the RTF. But then .Lbig_loop should unconditionally
+ * branch to top of loop to execute final prefetch, and its
+ * nop should become a conditional branch.
+ */
+
+ /* We need two non-memory cycles here to cover the resources
+ * used by the loads initiated by the caller.
+ */
+ { add r15, r1, r2 }
.Lcopy_line2:
- { slt_u r13, r3, r15; addi r17, r1, 16 }
+ { slt_u r13, r3, r15; addi r17, r1, 16 }
- /* NOTE: this will stall for one cycle as L1 is busy. */
+ /* NOTE: this will stall for one cycle as L1 is busy. */
- /* Fill second L1D line. */
+ /* Fill second L1D line. */
EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
#if CHIP_HAS_WH64()
- /* Prepare destination line for writing. */
+ /* Prepare destination line for writing. */
EX: { wh64 r9; addi r9, r9, 64 }
#else
- /* Prefetch dest line */
+ /* Prefetch dest line */
{ prefetch r9; addi r9, r9, 64 }
#endif
- /* Load seven words that are L1D hits to cover wh64 L2 usage. */
+ /* Load seven words that are L1D hits to cover wh64 L2 usage. */
- /* Load the three remaining words from the last L1D line, which
- * we know has already filled the L1D.
- */
+ /* Load the three remaining words from the last L1D line, which
+ * we know has already filled the L1D.
+ */
EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */
EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */
EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */
- /* Load the three remaining words from the first L1D line, first
- * stalling until it has filled by "looking at" r16.
- */
+ /* Load the three remaining words from the first L1D line, first
+ * stalling until it has filled by "looking at" r16.
+ */
EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */
EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */
EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
- /* Load second word from the second L1D line, first
- * stalling until it has filled by "looking at" r17.
- */
+ /* Load second word from the second L1D line, first
+ * stalling until it has filled by "looking at" r17.
+ */
EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */
- /* Store last word to the destination line, potentially dirtying it
- * for the first time, which keeps the L2 busy for two cycles.
- */
+ /* Store last word to the destination line, potentially dirtying it
+ * for the first time, which keeps the L2 busy for two cycles.
+ */
EX: { sw r10, r12 } /* store(WORD_15) */
- /* Use two L1D hits to cover the sw L2 access above. */
+ /* Use two L1D hits to cover the sw L2 access above. */
EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */
EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */
- /* Fill third L1D line. */
+ /* Fill third L1D line. */
EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
- /* Store first L1D line. */
+ /* Store first L1D line. */
EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
#if CHIP_HAS_WH64()
EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
#else
- /* Back up the r9 to a cache line we are already storing to
+ /* Back up the r9 to a cache line we are already storing to
* if it gets past the end of the dest vector. Strictly speaking,
* we don't need to back up to the start of a cache line, but it's free
* and tidy, so why not?
- */
+ */
EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
#endif
- /* Store second L1D line. */
+ /* Store second L1D line. */
EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */
EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */
@@ -348,30 +350,30 @@ EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */
EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */
EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */
- /* Store third L1D line. */
+ /* Store third L1D line. */
EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */
EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */
EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */
EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */
- /* Store rest of fourth L1D line. */
+ /* Store rest of fourth L1D line. */
EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */
- {
+ {
EX: sw r0, r8 /* store(WORD_13) */
- addi r0, r0, 4
+ addi r0, r0, 4
/* Will r2 be > 64 after we subtract 64 below? */
- shri r4, r2, 7
- }
- {
+ shri r4, r2, 7
+ }
+ {
EX: sw r0, r11 /* store(WORD_14) */
- addi r0, r0, 8
- /* Record 64 bytes successfully copied. */
- addi r2, r2, -64
- }
+ addi r0, r0, 8
+ /* Record 64 bytes successfully copied. */
+ addi r2, r2, -64
+ }
{ jrp lr; move lr, r27 }
- /* Convey to the backtrace library that the stack frame is size
+ /* Convey to the backtrace library that the stack frame is size
* zero, and the real return address is on the stack rather than
* in 'lr'.
*/
diff --git a/arch/tile/lib/memmove_32.c b/arch/tile/lib/memmove.c
index fd615ae6ade7..fd615ae6ade7 100644
--- a/arch/tile/lib/memmove_32.c
+++ b/arch/tile/lib/memmove.c
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c
index d014c1fbcbc2..57dbb3a5bff8 100644
--- a/arch/tile/lib/memset_32.c
+++ b/arch/tile/lib/memset_32.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/module.h>
+#undef memset
void *memset(void *s, int c, size_t n)
{
diff --git a/arch/tile/lib/strlen_32.c b/arch/tile/lib/strlen_32.c
index f26f88e11e4a..4974292a5534 100644
--- a/arch/tile/lib/strlen_32.c
+++ b/arch/tile/lib/strlen_32.c
@@ -16,6 +16,8 @@
#include <linux/string.h>
#include <linux/module.h>
+#undef strlen
+
size_t strlen(const char *s)
{
/* Get an aligned pointer. */
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 704f3e8a4385..f295b4ac941d 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -66,10 +66,10 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
#ifndef __tilegx__
/*
* Synthesize the fault a PL0 process would get by doing a word-load of
- * an unaligned address or a high kernel address. Called indirectly
- * from sys_cmpxchg() in kernel/intvec.S.
+ * an unaligned address or a high kernel address.
*/
-int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs)
+SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
+ struct pt_regs *, regs)
{
if (address >= PAGE_OFFSET)
force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address,
@@ -563,10 +563,10 @@ do_sigbus:
/*
* When we take an ITLB or DTLB fault or access violation in the
* supervisor while the critical section bit is set, the hypervisor is
- * reluctant to write new values into the EX_CONTEXT_1_x registers,
+ * reluctant to write new values into the EX_CONTEXT_K_x registers,
* since that might indicate we have not yet squirreled the SPR
* contents away and can thus safely take a recursive interrupt.
- * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2.
+ * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
*
* Note that this routine is called before homecache_tlb_defer_enter(),
* which means that we can properly unlock any atomics that might
@@ -610,7 +610,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
* fault. We didn't set up a kernel stack on initial entry to
* sys_cmpxchg, but instead had one set up by the fault, which
* (because sys_cmpxchg never releases ICS) came to us via the
- * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are
+ * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
* still referencing the original user code. We release the
* atomic lock and rewrite pt_regs so that it appears that we
* came from user-space directly, and after we finish the
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 12ab137e7d4f..abb57331cf6e 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -56,50 +56,6 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
-static void debug_kmap_atomic_prot(enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- static unsigned warn_count = 10;
-
- if (unlikely(warn_count == 0))
- return;
-
- if (unlikely(in_interrupt())) {
- if (in_irq()) {
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_BIO_SRC_IRQ &&
- /* type != KM_BIO_DST_IRQ && */
- type != KM_BOUNCE_READ) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (!irqs_disabled()) { /* softirq */
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
- type != KM_SKB_SUNRPC_DATA &&
- type != KM_SKB_DATA_SOFTIRQ &&
- type != KM_BOUNCE_READ) {
- WARN_ON(1);
- warn_count--;
- }
- }
- }
-
- if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
- type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
- if (!irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
- if (irq_count() == 0 && !irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- }
-#endif
-}
-
/*
* Describe a single atomic mapping of a page on a given cpu at a
* given address, and allow it to be linked into a list.
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
* When holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
pte_t *pte;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic_prot(type);
-
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
pte = kmap_get_pte(vaddr);
@@ -269,28 +224,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
/* PAGE_NONE is a magic value that tells us to check immutability. */
return kmap_atomic_prot(page, type, PAGE_NONE);
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
- /*
- * Force other mappings to Oops if they try to access this pte without
- * first remapping it. Keeping stale mappings around is a bad idea.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
pte_t *pte = kmap_get_pte(vaddr);
pte_t pteval = *pte;
+ int idx, type;
+
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+
+ /*
+ * Force other mappings to Oops if they try to access this pte
+ * without first remapping it. Keeping stale mappings around
+ * is a bad idea.
+ */
BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
kmap_atomic_unregister(pte_page(pteval), vaddr);
kpte_clear_flush(pte, vaddr);
+ kmap_atomic_idx_pop();
} else {
/* Must be a lowmem page */
BUG_ON(vaddr < PAGE_OFFSET);
@@ -300,19 +262,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
arch_flush_lazy_mmu_mode();
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
/*
* This API is supposed to allow us to map memory without a "struct page".
* Currently we don't support this, though this may change in the future.
*/
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- return kmap_atomic(pfn_to_page(pfn), type);
+ return kmap_atomic(pfn_to_page(pfn));
}
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
- return kmap_atomic_prot(pfn_to_page(pfn), type, prot);
+ return kmap_atomic_prot(pfn_to_page(pfn), prot);
}
struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index fb3b4a55cec4..d78df3a6ee15 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -37,6 +37,8 @@
#include <asm/pgalloc.h>
#include <asm/homecache.h>
+#include <arch/sim.h>
+
#include "migrate.h"
@@ -217,13 +219,6 @@ static unsigned long cache_flush_length(unsigned long length)
return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
}
-/* On the simulator, confirm lines have been evicted everywhere. */
-static void validate_lines_evicted(unsigned long pfn, size_t length)
-{
- sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED,
- (HV_PhysAddr)pfn << PAGE_SHIFT, length);
-}
-
/* Flush a page out of whatever cache(s) it is in. */
void homecache_flush_cache(struct page *page, int order)
{
@@ -234,7 +229,7 @@ void homecache_flush_cache(struct page *page, int order)
homecache_mask(page, pages, &home_mask);
flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
- validate_lines_evicted(pfn, pages * PAGE_SIZE);
+ sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
}
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index d89c9eacd162..78e1982cb6c9 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -1060,7 +1060,7 @@ void free_initmem(void)
/*
* Free the pages mapped from 0xc0000000 that correspond to code
- * pages from 0xfd000000 that we won't use again after init.
+ * pages from MEM_SV_INTRPT that we won't use again after init.
*/
free_init_pages("unused kernel text",
(unsigned long)_sinittext - text_delta,
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index ec2b8da1aba4..50d6aa20c353 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -120,6 +120,9 @@ config SMP
If you don't know what to do, say N.
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ def_bool y
+
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
@@ -147,3 +150,6 @@ config KERNEL_STACK_ORDER
This option determines the size of UML kernel stacks. They will
be 1 << order pages. The default is OK unless you're running Valgrind
on UML, in which case, set this to 3.
+
+config NO_DMA
+ def_bool y
diff --git a/arch/um/defconfig b/arch/um/defconfig
index 6bd456f96f90..564f3de65b4a 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -566,7 +566,6 @@ CONFIG_CRC32=m
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_PLIST=y
-CONFIG_HAS_DMA=y
#
# SCSI device support
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h
deleted file mode 100644
index 1f469e80fdd3..000000000000
--- a/arch/um/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,112 +0,0 @@
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-#include <asm/scatterlist.h>
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- BUG();
- return(0);
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 dma_mask)
-{
- BUG();
- return(0);
-}
-
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
-{
- BUG();
- return((void *) 0);
-}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
-{
- BUG();
-}
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
- return(0);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
- return(0);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- BUG();
- return(0);
-}
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG();
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
-{
- BUG();
- return 0;
-}
-
-#endif
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index a9f7251b4a8d..41474fb5eee7 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -338,9 +338,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h
index 93af1cf0907d..68a90ecd1450 100644
--- a/arch/um/include/asm/system.h
+++ b/arch/um/include/asm/system.h
@@ -8,23 +8,38 @@ extern int set_signals(int enable);
extern void block_signals(void);
extern void unblock_signals(void);
-#define local_save_flags(flags) do { typecheck(unsigned long, flags); \
- (flags) = get_signals(); } while(0)
-#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \
- set_signals(flags); } while(0)
-
-#define local_irq_save(flags) do { local_save_flags(flags); \
- local_irq_disable(); } while(0)
-
-#define local_irq_enable() unblock_signals()
-#define local_irq_disable() block_signals()
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- (flags == 0); \
-})
+static inline unsigned long arch_local_save_flags(void)
+{
+ return get_signals();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ set_signals(flags);
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unblock_signals();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ block_signals();
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ flags = arch_local_save_flags();
+ arch_local_irq_disable();
+ return flags;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_local_save_flags() == 0;
+}
extern void *_switch_to(void *prev, void *next, void *last);
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 69268014dd8e..a3cab6d3ae02 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -50,8 +50,18 @@ SECTIONS
.rela.got : { *(.rela.got) }
.rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
.rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
- .rel.plt : { *(.rel.plt) }
- .rela.plt : { *(.rela.plt) }
+ .rel.plt : {
+ *(.rel.plt)
+ PROVIDE_HIDDEN(__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN(__rel_iplt_end = .);
+ }
+ .rela.plt : {
+ *(.rela.plt)
+ PROVIDE_HIDDEN(__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN(__rela_iplt_end = .);
+ }
.init : {
KEEP (*(.init))
} =0x90909090
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index a746e3037a5b..3f0ac9e0c966 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -334,7 +334,7 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
irq_enter();
- __do_IRQ(irq);
+ generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
return 1;
@@ -391,17 +391,10 @@ void __init init_IRQ(void)
{
int i;
- irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
- irq_desc[TIMER_IRQ].action = NULL;
- irq_desc[TIMER_IRQ].depth = 1;
- irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
- enable_irq(TIMER_IRQ);
+ set_irq_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
+
for (i = 1; i < NR_IRQS; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &normal_irq_type;
- enable_irq(i);
+ set_irq_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
}
}
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index e0510496596c..a5e33f29bbeb 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -42,10 +42,12 @@ void ptrace_disable(struct task_struct *child)
extern int peek_user(struct task_struct * child, long addr, long data);
extern int poke_user(struct task_struct * child, long addr, long data);
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int i, ret;
- unsigned long __user *p = (void __user *)(unsigned long)data;
+ unsigned long __user *p = (void __user *)data;
+ void __user *vp = p;
switch (request) {
/* read word at location addr. */
@@ -107,24 +109,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#endif
#ifdef PTRACE_GETFPREGS
case PTRACE_GETFPREGS: /* Get the child FPU state. */
- ret = get_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = get_fpregs(vp, child);
break;
#endif
#ifdef PTRACE_SETFPREGS
case PTRACE_SETFPREGS: /* Set the child FPU state. */
- ret = set_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = set_fpregs(vp, child);
break;
#endif
case PTRACE_GET_THREAD_AREA:
- ret = ptrace_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ ret = ptrace_get_thread_area(child, addr, vp);
break;
case PTRACE_SET_THREAD_AREA:
- ret = ptrace_set_thread_area(child, addr,
- (struct user_desc __user *) data);
+ ret = ptrace_set_thread_area(child, addr, datavp);
break;
case PTRACE_FAULTINFO: {
@@ -134,7 +132,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* On i386, ptrace_faultinfo is smaller!
*/
ret = copy_to_user(p, &child->thread.arch.faultinfo,
- sizeof(struct ptrace_faultinfo));
+ sizeof(struct ptrace_faultinfo)) ?
+ -EIO : 0;
break;
}
@@ -158,7 +157,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_ARCH_PRCTL
case PTRACE_ARCH_PRCTL:
/* XXX Calls ptrace on the host - needs some SMP thinking */
- ret = arch_prctl(child, data, (void *) addr);
+ ret = arch_prctl(child, data, (void __user *) addr);
break;
#endif
default:
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index ec6378550671..fbd99402d4d2 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -22,7 +22,7 @@ SECTIONS
_text = .;
_stext = .;
__init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_TEXT_SECTION(0)
. = ALIGN(PAGE_SIZE);
.text :
@@ -43,6 +43,23 @@ SECTIONS
__syscall_stub_end = .;
}
+ /*
+ * These are needed even in a static link, even if they wind up being empty.
+ * Newer glibc needs these __rel{,a}_iplt_{start,end} symbols.
+ */
+ .rel.plt : {
+ *(.rel.plt)
+ PROVIDE_HIDDEN(__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN(__rel_iplt_end = .);
+ }
+ .rela.plt : {
+ *(.rela.plt)
+ PROVIDE_HIDDEN(__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN(__rela_iplt_end = .);
+ }
+
#include "asm/common.lds.S"
init.data : { INIT_DATA }
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index dec5678fc17f..6e3359d6a839 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv)
long long disable_timer(void)
{
struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
- int remain, max = UM_NSEC_PER_SEC / UM_HZ;
+ long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
printk(UM_KERN_ERR "disable_timer - setitimer failed, "
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index c9b176534d65..d23b2d3ea384 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -203,8 +203,8 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
(unsigned long *) &fpregs);
}
-long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data)
+long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
return -EIO;
}
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index f3458d7d1c5a..f43613643cdb 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -175,19 +175,18 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
return restore_fp_registers(userspace_pid[cpu], fpregs);
}
-long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data)
+long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EIO;
+ void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_GETFPXREGS: /* Get the child FPU state. */
- ret = get_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = get_fpregs(datap, child);
break;
case PTRACE_SETFPXREGS: /* Set the child FPU state. */
- ret = set_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = set_fpregs(datap, child);
break;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dfabfefc21c4..299fbc86f570 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -347,6 +347,7 @@ endif
config X86_VSMP
bool "ScaleMP vSMP"
+ select PARAVIRT_GUEST
select PARAVIRT
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 8caac76ac324..3bd04022fd0c 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
void *kmap(struct page *page);
void kunmap(struct page *page);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c4191b3b7056..363e33eb6ec1 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -27,10 +27,10 @@
#include <asm/tlbflush.h>
void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr);
int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 0bf5b0083650..13b0ebaa512f 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
#ifdef CONFIG_X86_32
extern void irq_ctx_init(int cpu);
-extern void irq_ctx_exit(int cpu);
#else
# define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
#endif
#define __ARCH_HAS_DO_SOFTIRQ
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 83c4bb1d917d..3ea3dc487047 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -121,6 +121,7 @@
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSCTL 0xc001103a
+#define MSR_AMD64_IBSBRTARGET 0xc001103b
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 6e742cc4251b..550e26b1dbb3 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -111,17 +111,18 @@ union cpuid10_edx {
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
/* IbsFetchCtl bits/masks */
-#define IBS_FETCH_RAND_EN (1ULL<<57)
-#define IBS_FETCH_VAL (1ULL<<49)
-#define IBS_FETCH_ENABLE (1ULL<<48)
-#define IBS_FETCH_CNT 0xFFFF0000ULL
-#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
+#define IBS_FETCH_RAND_EN (1ULL<<57)
+#define IBS_FETCH_VAL (1ULL<<49)
+#define IBS_FETCH_ENABLE (1ULL<<48)
+#define IBS_FETCH_CNT 0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
/* IbsOpCtl bits */
-#define IBS_OP_CNT_CTL (1ULL<<19)
-#define IBS_OP_VAL (1ULL<<18)
-#define IBS_OP_ENABLE (1ULL<<17)
-#define IBS_OP_MAX_CNT 0x0000FFFFULL
+#define IBS_OP_CNT_CTL (1ULL<<19)
+#define IBS_OP_VAL (1ULL<<18)
+#define IBS_OP_ENABLE (1ULL<<17)
+#define IBS_OP_MAX_CNT 0x0000FFFFULL
+#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 8abde9ec90bf..0c92113c4cb6 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
#endif
#if defined(CONFIG_HIGHPTE)
-#define __KM_PTE \
- (in_nmi() ? KM_NMI_PTE : \
- in_irq() ? KM_IRQ_PTE : \
- KM_PTE0)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
pte_index((address)))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
- pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
-#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
+#define pte_unmap(pte) kunmap_atomic((pte))
#else
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
-#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#endif
/* Clear a kernel PTE and flush it from the TLB */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index f96ac9bedf75..f86da20347f2 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -127,9 +127,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
-#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */
#define update_mmu_cache(vma, address, ptep) do { } while (0)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4cfc90824068..4c2f63c7fc1b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -50,7 +50,7 @@ struct smp_ops {
void (*smp_prepare_cpus)(unsigned max_cpus);
void (*smp_cpus_done)(unsigned max_cpus);
- void (*smp_send_stop)(void);
+ void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
static inline void smp_send_stop(void)
{
- smp_ops.smp_send_stop();
+ smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+ smp_ops.stop_other_cpus(1);
}
static inline void smp_prepare_boot_cpu(void)
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7fda040a76cd..a3c28ae4025b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -200,6 +200,23 @@ extern struct { char _entry[32]; } hypercall_page[];
(type)__res; \
})
+static inline long
+privcmd_call(unsigned call,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5)
+{
+ __HYPERCALL_DECLS;
+ __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+
+ asm volatile("call *%[call]"
+ : __HYPERCALL_5PARAM
+ : [call] "a" (&hypercall_page[call])
+ : __HYPERCALL_CLOBBER5);
+
+ return (long)__res;
+}
+
static inline int
HYPERVISOR_set_trap_table(struct trap_info *table)
{
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index bf5f7d32bd08..dd8c1414b3d5 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -37,14 +37,21 @@ typedef struct xpaddr {
extern unsigned long get_phys_to_machine(unsigned long pfn);
-extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
+ unsigned long mfn;
+
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
- return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
+ mfn = get_phys_to_machine(pfn);
+
+ if (mfn != INVALID_P2M_ENTRY)
+ mfn &= ~FOREIGN_FRAME_BIT;
+
+ return mfn;
}
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -159,6 +166,7 @@ static inline pte_t __pte_ma(pteval_t x)
#define pgd_val_ma(x) ((x).pgd)
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
xmaddr_t arbitrary_virt_to_machine(void *address);
unsigned long arbitrary_virt_to_mfn(void *vaddr);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index cd8da247dda1..a2baafb2fe6d 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
+ kfree(data->freq_table);
kfree(data);
}
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
index 733093d60436..141abebc4516 100644
--- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -393,7 +393,7 @@ static struct cpufreq_driver nforce2_driver = {
* Detects nForce2 A2 and C1 stepping
*
*/
-static unsigned int nforce2_detect_chipset(void)
+static int nforce2_detect_chipset(void)
{
nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NFORCE2,
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
index fc09f142d94d..d9f51367666b 100644
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ b/arch/x86/kernel/cpu/cpufreq/longrun.c
@@ -35,7 +35,7 @@ static unsigned int longrun_low_freq, longrun_high_freq;
* Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
* and MSR_TMTA_LONGRUN_CTRL
*/
-static void __init longrun_get_policy(struct cpufreq_policy *policy)
+static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy)
{
u32 msr_lo, msr_hi;
@@ -165,7 +165,7 @@ static unsigned int longrun_get(unsigned int cpu)
* TMTA rules:
* performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
*/
-static unsigned int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
+static int __cpuinit longrun_determine_freqs(unsigned int *low_freq,
unsigned int *high_freq)
{
u32 msr_lo, msr_hi;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 12cd823c8d03..17ad03366211 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -327,6 +327,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+ l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
}
static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index fe73c1844a9a..ed6310183efb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -49,7 +49,6 @@ static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
- int type = in_nmi() ? KM_NMI : KM_IRQ0;
unsigned long size, len = 0;
struct page *page;
void *map;
@@ -63,9 +62,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
- map = kmap_atomic(page, type);
+ map = kmap_atomic(page);
memcpy(to, map+offset, size);
- kunmap_atomic(map, type);
+ kunmap_atomic(map);
put_page(page);
len += size;
@@ -238,6 +237,7 @@ struct x86_pmu {
* Intel DebugStore bits
*/
int bts, pebs;
+ int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
@@ -381,7 +381,7 @@ static void release_pmc_hardware(void) {}
#endif
-static int reserve_ds_buffers(void);
+static void reserve_ds_buffers(void);
static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
@@ -478,7 +478,7 @@ static int x86_setup_perfctr(struct perf_event *event)
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
(hwc->sample_period == 1)) {
/* BTS is not supported by this architecture. */
- if (!x86_pmu.bts)
+ if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
@@ -497,12 +497,13 @@ static int x86_pmu_hw_config(struct perf_event *event)
int precise = 0;
/* Support for constant skid */
- if (x86_pmu.pebs)
+ if (x86_pmu.pebs_active) {
precise++;
- /* Support for IP fixup */
- if (x86_pmu.lbr_nr)
- precise++;
+ /* Support for IP fixup */
+ if (x86_pmu.lbr_nr)
+ precise++;
+ }
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
@@ -544,11 +545,8 @@ static int __x86_pmu_event_init(struct perf_event *event)
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
- else {
- err = reserve_ds_buffers();
- if (err)
- release_pmc_hardware();
- }
+ else
+ reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 4977f9c400e5..b7dcd9f2b8a0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -74,6 +74,107 @@ static void fini_debug_store_on_cpu(int cpu)
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}
+static int alloc_pebs_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
+ int max, thresh = 1; /* always use a single PEBS record */
+ void *buffer;
+
+ if (!x86_pmu.pebs)
+ return 0;
+
+ buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!buffer))
+ return -ENOMEM;
+
+ max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+ ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ ds->pebs_index = ds->pebs_buffer_base;
+ ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+ max * x86_pmu.pebs_record_size;
+
+ ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+ thresh * x86_pmu.pebs_record_size;
+
+ return 0;
+}
+
+static void release_pebs_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds || !x86_pmu.pebs)
+ return;
+
+ kfree((void *)(unsigned long)ds->pebs_buffer_base);
+ ds->pebs_buffer_base = 0;
+}
+
+static int alloc_bts_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
+ int max, thresh;
+ void *buffer;
+
+ if (!x86_pmu.bts)
+ return 0;
+
+ buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!buffer))
+ return -ENOMEM;
+
+ max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+ thresh = max / 16;
+
+ ds->bts_buffer_base = (u64)(unsigned long)buffer;
+ ds->bts_index = ds->bts_buffer_base;
+ ds->bts_absolute_maximum = ds->bts_buffer_base +
+ max * BTS_RECORD_SIZE;
+ ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+ thresh * BTS_RECORD_SIZE;
+
+ return 0;
+}
+
+static void release_bts_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds || !x86_pmu.bts)
+ return;
+
+ kfree((void *)(unsigned long)ds->bts_buffer_base);
+ ds->bts_buffer_base = 0;
+}
+
+static int alloc_ds_buffer(int cpu)
+{
+ int node = cpu_to_node(cpu);
+ struct debug_store *ds;
+
+ ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!ds))
+ return -ENOMEM;
+
+ per_cpu(cpu_hw_events, cpu).ds = ds;
+
+ return 0;
+}
+
+static void release_ds_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds)
+ return;
+
+ per_cpu(cpu_hw_events, cpu).ds = NULL;
+ kfree(ds);
+}
+
static void release_ds_buffers(void)
{
int cpu;
@@ -82,93 +183,77 @@ static void release_ds_buffers(void)
return;
get_online_cpus();
-
for_each_online_cpu(cpu)
fini_debug_store_on_cpu(cpu);
for_each_possible_cpu(cpu) {
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- continue;
-
- per_cpu(cpu_hw_events, cpu).ds = NULL;
-
- kfree((void *)(unsigned long)ds->pebs_buffer_base);
- kfree((void *)(unsigned long)ds->bts_buffer_base);
- kfree(ds);
+ release_pebs_buffer(cpu);
+ release_bts_buffer(cpu);
+ release_ds_buffer(cpu);
}
-
put_online_cpus();
}
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
{
- int cpu, err = 0;
+ int bts_err = 0, pebs_err = 0;
+ int cpu;
+
+ x86_pmu.bts_active = 0;
+ x86_pmu.pebs_active = 0;
if (!x86_pmu.bts && !x86_pmu.pebs)
- return 0;
+ return;
+
+ if (!x86_pmu.bts)
+ bts_err = 1;
+
+ if (!x86_pmu.pebs)
+ pebs_err = 1;
get_online_cpus();
for_each_possible_cpu(cpu) {
- struct debug_store *ds;
- void *buffer;
- int max, thresh;
+ if (alloc_ds_buffer(cpu)) {
+ bts_err = 1;
+ pebs_err = 1;
+ }
+
+ if (!bts_err && alloc_bts_buffer(cpu))
+ bts_err = 1;
- err = -ENOMEM;
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (unlikely(!ds))
+ if (!pebs_err && alloc_pebs_buffer(cpu))
+ pebs_err = 1;
+
+ if (bts_err && pebs_err)
break;
- per_cpu(cpu_hw_events, cpu).ds = ds;
-
- if (x86_pmu.bts) {
- buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
- thresh = max / 16;
-
- ds->bts_buffer_base = (u64)(unsigned long)buffer;
- ds->bts_index = ds->bts_buffer_base;
- ds->bts_absolute_maximum = ds->bts_buffer_base +
- max * BTS_RECORD_SIZE;
- ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
- thresh * BTS_RECORD_SIZE;
- }
+ }
- if (x86_pmu.pebs) {
- buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
-
- ds->pebs_buffer_base = (u64)(unsigned long)buffer;
- ds->pebs_index = ds->pebs_buffer_base;
- ds->pebs_absolute_maximum = ds->pebs_buffer_base +
- max * x86_pmu.pebs_record_size;
- /*
- * Always use single record PEBS
- */
- ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
- x86_pmu.pebs_record_size;
- }
+ if (bts_err) {
+ for_each_possible_cpu(cpu)
+ release_bts_buffer(cpu);
+ }
- err = 0;
+ if (pebs_err) {
+ for_each_possible_cpu(cpu)
+ release_pebs_buffer(cpu);
}
- if (err)
- release_ds_buffers();
- else {
+ if (bts_err && pebs_err) {
+ for_each_possible_cpu(cpu)
+ release_ds_buffer(cpu);
+ } else {
+ if (x86_pmu.bts && !bts_err)
+ x86_pmu.bts_active = 1;
+
+ if (x86_pmu.pebs && !pebs_err)
+ x86_pmu.pebs_active = 1;
+
for_each_online_cpu(cpu)
init_debug_store_on_cpu(cpu);
}
put_online_cpus();
-
- return err;
}
/*
@@ -233,7 +318,7 @@ static int intel_pmu_drain_bts_buffer(void)
if (!event)
return 0;
- if (!ds)
+ if (!x86_pmu.bts_active)
return 0;
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
@@ -503,7 +588,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
struct pebs_record_core *at, *top;
int n;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
@@ -545,7 +630,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
u64 status = 0;
int bit, n;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
@@ -630,9 +715,8 @@ static void intel_ds_init(void)
#else /* CONFIG_CPU_SUP_INTEL */
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
{
- return 0;
}
static void release_ds_buffers(void)
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 67414550c3cc..d5cd13945d5a 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!is_crashed_pfn_valid(pfn))
return -EFAULT;
- vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
+ vaddr = kmap_atomic_pfn(pfn);
if (!userbuf) {
memcpy(buf, (vaddr + offset), csize);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 0f6376ffa2d9..1bc7f75a5bda 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk("\n%s", log_lvl);
- printk(" %08lx", *stack++);
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT " %08lx", *stack++);
touch_nmi_watchdog();
}
- printk("\n");
+ printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 57a21f11c791..6a340485249a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]);
- printk(" <EOI> ");
+ printk(KERN_CONT " <EOI> ");
}
} else {
if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
}
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk("\n%s", log_lvl);
- printk(" %016lx", *stack++);
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT " %016lx", *stack++);
touch_nmi_watchdog();
}
preempt_enable();
- printk("\n");
+ printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index aff0b3c27509..ae03cab4352e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) {
case CPU_ONLINE:
- INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
+ INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
init_completion(&work.complete);
/* FIXME: add schedule_work_on() */
schedule_delayed_work_on(cpu, &work.work, 0);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 50fbbe60e507..64668dbf00a4 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -60,9 +60,6 @@ union irq_ctx {
static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
-
static void call_on_stack(void *func, void *stack)
{
asm volatile("xchgl %%ebx,%%esp \n"
@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu)
if (per_cpu(hardirq_ctx, cpu))
return;
- irqctx = &per_cpu(hardirq_stack, cpu);
+ irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu)
per_cpu(hardirq_ctx, cpu) = irqctx;
- irqctx = &per_cpu(softirq_stack, cpu);
+ irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
}
-void irq_ctx_exit(int cpu)
-{
- per_cpu(hardirq_ctx, cpu) = NULL;
-}
-
asmlinkage void do_softirq(void)
{
unsigned long flags;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 70c4872cd8aa..45892dc4b72a 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -801,7 +801,8 @@ void ptrace_disable(struct task_struct *child)
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
#endif
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
@@ -812,8 +813,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & (sizeof(data) - 1)) || addr < 0 ||
- addr >= sizeof(struct user))
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
tmp = 0; /* Default return condition */
@@ -830,8 +830,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & (sizeof(data) - 1)) || addr < 0 ||
- addr >= sizeof(struct user))
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
if (addr < sizeof(struct user_regs_struct))
@@ -888,17 +887,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
case PTRACE_GET_THREAD_AREA:
- if (addr < 0)
+ if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ (struct user_desc __user *)data);
break;
case PTRACE_SET_THREAD_AREA:
- if (addr < 0)
+ if ((int) addr < 0)
return -EIO;
ret = do_set_thread_area(child, addr,
- (struct user_desc __user *) data, 0);
+ (struct user_desc __user *)data, 0);
break;
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f7f53dcd3e0a..c495aa8d4815 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.
*/
- smp_send_stop();
+ stop_other_cpus();
#endif
lapic_shutdown();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 95a32746fbf9..21c6746338af 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -769,6 +769,8 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
+ resource_alloc_from_bottom = 0;
+ iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
/* update the e820_saved too */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d801210945d6..513deac7228d 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
irq_exit();
}
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
{
unsigned long flags;
- unsigned long wait;
+ unsigned long timeout;
if (reboot_force)
return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR);
- /* Don't wait longer than a second */
- wait = USEC_PER_SEC;
- while (num_online_cpus() > 1 && wait--)
+ /*
+ * Don't wait longer than a second if the caller
+ * didn't ask us to wait.
+ */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
- .smp_send_stop = native_smp_send_stop,
+ .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6af118511b4a..083e99d1b7df 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
- INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
+ INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
alternatives_smp_switch(1);
@@ -1373,7 +1373,6 @@ void play_dead_common(void)
{
idle_task_exit();
reset_lazy_tlbstate();
- irq_ctx_exit(raw_smp_processor_id());
c1e_remove_cpu(raw_smp_processor_id());
mb();
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 79b0b372d2d0..7d90ceb882a4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -11,6 +11,7 @@
#include <linux/kprobes.h> /* __kprobes, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
+#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
@@ -160,15 +161,20 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
- struct task_struct *tsk)
+ struct task_struct *tsk, int fault)
{
+ unsigned lsb = 0;
siginfo_t info;
info.si_signo = si_signo;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
- info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0;
+ if (fault & VM_FAULT_HWPOISON_LARGE)
+ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+ if (fault & VM_FAULT_HWPOISON)
+ lsb = PAGE_SHIFT;
+ info.si_addr_lsb = lsb;
force_sig_info(si_signo, &info, tsk);
}
@@ -722,7 +728,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
tsk->thread.error_code = error_code | (address >= TASK_SIZE);
tsk->thread.trap_no = 14;
- force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
return;
}
@@ -807,14 +813,14 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
tsk->thread.trap_no = 14;
#ifdef CONFIG_MEMORY_FAILURE
- if (fault & VM_FAULT_HWPOISON) {
+ if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
tsk->comm, tsk->pid, address);
code = BUS_MCEERR_AR;
}
#endif
- force_sig_info_fault(SIGBUS, code, address, tsk);
+ force_sig_info_fault(SIGBUS, code, address, tsk, fault);
}
static noinline void
@@ -824,7 +830,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & VM_FAULT_OOM) {
out_of_memory(regs, error_code, address);
} else {
- if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
+ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+ VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else
BUG();
@@ -912,9 +919,9 @@ spurious_fault(unsigned long error_code, unsigned long address)
int show_unhandled_signals = 1;
static inline int
-access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
+access_error(unsigned long error_code, struct vm_area_struct *vma)
{
- if (write) {
+ if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
@@ -949,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
struct task_struct *tsk;
unsigned long address;
struct mm_struct *mm;
- int write;
int fault;
+ int write = error_code & PF_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
+ (write ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
@@ -1061,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
bad_area_nosemaphore(regs, error_code, address);
return;
}
+retry:
down_read(&mm->mmap_sem);
} else {
/*
@@ -1104,9 +1114,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* we can handle it..
*/
good_area:
- write = error_code & PF_WRITE;
-
- if (unlikely(access_error(error_code, write, vma))) {
+ if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address);
return;
}
@@ -1116,21 +1124,34 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
- fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault);
return;
}
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
- regs, address);
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
}
check_v8086_mode(regs, address, tsk);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 5e8fa12ef861..b49962662101 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -9,6 +9,7 @@ void *kmap(struct page *page)
return page_address(page);
return kmap_high(page);
}
+EXPORT_SYMBOL(kmap);
void kunmap(struct page *page)
{
@@ -18,6 +19,7 @@ void kunmap(struct page *page)
return;
kunmap_high(page);
}
+EXPORT_SYMBOL(kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -27,10 +29,10 @@ void kunmap(struct page *page)
* However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic(type);
-
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -47,44 +48,57 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
return (void *)vaddr;
}
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void *__kmap_atomic(struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+EXPORT_SYMBOL(__kmap_atomic);
-void *kmap_atomic(struct page *page, enum km_type type)
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
{
- return kmap_atomic_prot(page, type, kmap_prot);
+ return kmap_atomic_prot_pfn(pfn, kmap_prot);
}
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- /*
- * Force other mappings to Oops if they'll try to access this pte
- * without first remap it. Keeping stale mappings around is a bad idea
- * also, in case the page changes cacheability attributes or becomes
- * a protected page in a hypervisor.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+ int idx, type;
+
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
kpte_clear_flush(kmap_pte-idx, vaddr);
- else {
+ kmap_atomic_idx_pop();
+ }
#ifdef CONFIG_DEBUG_HIGHMEM
+ else {
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
-#endif
}
+#endif
pagefault_enable();
}
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-{
- return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
-}
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
+EXPORT_SYMBOL(__kunmap_atomic);
struct page *kmap_atomic_to_page(void *ptr)
{
@@ -98,12 +112,6 @@ struct page *kmap_atomic_to_page(void *ptr)
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
-
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
-EXPORT_SYMBOL(kmap_atomic_prot);
EXPORT_SYMBOL(kmap_atomic_to_page);
void __init set_highmem_pages_init(void)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 84346200e783..71a59296af80 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -51,7 +51,6 @@
#include <asm/numa.h>
#include <asm/cacheflush.h>
#include <asm/init.h>
-#include <linux/bootmem.h>
static int __init parse_direct_gbpages_off(char *arg)
{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 72fc70cf6184..7b179b499fa3 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
}
EXPORT_SYMBOL_GPL(iomap_create_wc);
-void
-iomap_free(resource_size_t base, unsigned long size)
+void iomap_free(resource_size_t base, unsigned long size)
{
io_free_memtype(base, base + size);
}
EXPORT_SYMBOL_GPL(iomap_free);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
pagefault_disable();
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
}
/*
- * Map 'pfn' using fixed map 'type' and protections 'prot'
+ * Map 'pfn' using protections 'prot'
*/
void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
/*
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
@@ -86,24 +85,34 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
+ return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
- /*
- * Force other mappings to Oops if they'll try to access this pte
- * without first remap it. Keeping stale mappings around is a bad idea
- * also, in case the page changes cacheability attributes or becomes
- * a protected page in a hypervisor.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+ int idx, type;
+
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ }
pagefault_enable();
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index bd1489c3ce09..4e8baad36d37 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -726,6 +726,12 @@ int __init op_nmi_init(struct oprofile_operations *ops)
case 0x11:
cpu_type = "x86-64/family11h";
break;
+ case 0x12:
+ cpu_type = "x86-64/family12h";
+ break;
+ case 0x14:
+ cpu_type = "x86-64/family14h";
+ break;
default:
return -ENODEV;
}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 42fb46f83883..a011bcc0f943 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -48,17 +48,24 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS];
static u32 ibs_caps;
-struct op_ibs_config {
+struct ibs_config {
unsigned long op_enabled;
unsigned long fetch_enabled;
unsigned long max_cnt_fetch;
unsigned long max_cnt_op;
unsigned long rand_en;
unsigned long dispatched_ops;
+ unsigned long branch_target;
};
-static struct op_ibs_config ibs_config;
-static u64 ibs_op_ctl;
+struct ibs_state {
+ u64 ibs_op_ctl;
+ int branch_target;
+ unsigned long sample_size;
+};
+
+static struct ibs_config ibs_config;
+static struct ibs_state ibs_state;
/*
* IBS cpuid feature detection
@@ -71,8 +78,16 @@ static u64 ibs_op_ctl;
* bit 0 is used to indicate the existence of IBS.
*/
#define IBS_CAPS_AVAIL (1U<<0)
+#define IBS_CAPS_FETCHSAM (1U<<1)
+#define IBS_CAPS_OPSAM (1U<<2)
#define IBS_CAPS_RDWROPCNT (1U<<3)
#define IBS_CAPS_OPCNT (1U<<4)
+#define IBS_CAPS_BRNTRGT (1U<<5)
+#define IBS_CAPS_OPCNTEXT (1U<<6)
+
+#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
+ | IBS_CAPS_FETCHSAM \
+ | IBS_CAPS_OPSAM)
/*
* IBS APIC setup
@@ -99,12 +114,12 @@ static u32 get_ibs_caps(void)
/* check IBS cpuid feature flags */
max_level = cpuid_eax(0x80000000);
if (max_level < IBS_CPUID_FEATURES)
- return IBS_CAPS_AVAIL;
+ return IBS_CAPS_DEFAULT;
ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
if (!(ibs_caps & IBS_CAPS_AVAIL))
/* cpuid flags not valid */
- return IBS_CAPS_AVAIL;
+ return IBS_CAPS_DEFAULT;
return ibs_caps;
}
@@ -197,8 +212,8 @@ op_amd_handle_ibs(struct pt_regs * const regs,
rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
if (ctl & IBS_OP_VAL) {
rdmsrl(MSR_AMD64_IBSOPRIP, val);
- oprofile_write_reserve(&entry, regs, val,
- IBS_OP_CODE, IBS_OP_SIZE);
+ oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
+ ibs_state.sample_size);
oprofile_add_data64(&entry, val);
rdmsrl(MSR_AMD64_IBSOPDATA, val);
oprofile_add_data64(&entry, val);
@@ -210,10 +225,14 @@ op_amd_handle_ibs(struct pt_regs * const regs,
oprofile_add_data64(&entry, val);
rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
oprofile_add_data64(&entry, val);
+ if (ibs_state.branch_target) {
+ rdmsrl(MSR_AMD64_IBSBRTARGET, val);
+ oprofile_add_data(&entry, (unsigned long)val);
+ }
oprofile_write_commit(&entry);
/* reenable the IRQ */
- ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
+ ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
}
}
@@ -226,21 +245,32 @@ static inline void op_amd_start_ibs(void)
if (!ibs_caps)
return;
+ memset(&ibs_state, 0, sizeof(ibs_state));
+
+ /*
+ * Note: Since the max count settings may out of range we
+ * write back the actual used values so that userland can read
+ * it.
+ */
+
if (ibs_config.fetch_enabled) {
- val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
+ val = ibs_config.max_cnt_fetch >> 4;
+ val = min(val, IBS_FETCH_MAX_CNT);
+ ibs_config.max_cnt_fetch = val << 4;
val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
val |= IBS_FETCH_ENABLE;
wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
}
if (ibs_config.op_enabled) {
- ibs_op_ctl = ibs_config.max_cnt_op >> 4;
+ val = ibs_config.max_cnt_op >> 4;
if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
/*
* IbsOpCurCnt not supported. See
* op_amd_randomize_ibs_op() for details.
*/
- ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
+ val = clamp(val, 0x0081ULL, 0xFF80ULL);
+ ibs_config.max_cnt_op = val << 4;
} else {
/*
* The start value is randomized with a
@@ -248,13 +278,24 @@ static inline void op_amd_start_ibs(void)
* with the half of the randomized range. Also
* avoid underflows.
*/
- ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
- IBS_OP_MAX_CNT);
+ val += IBS_RANDOM_MAXCNT_OFFSET;
+ if (ibs_caps & IBS_CAPS_OPCNTEXT)
+ val = min(val, IBS_OP_MAX_CNT_EXT);
+ else
+ val = min(val, IBS_OP_MAX_CNT);
+ ibs_config.max_cnt_op =
+ (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
+ }
+ val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
+ val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
+ val |= IBS_OP_ENABLE;
+ ibs_state.ibs_op_ctl = val;
+ ibs_state.sample_size = IBS_OP_SIZE;
+ if (ibs_config.branch_target) {
+ ibs_state.branch_target = 1;
+ ibs_state.sample_size++;
}
- if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
- ibs_op_ctl |= IBS_OP_CNT_CTL;
- ibs_op_ctl |= IBS_OP_ENABLE;
- val = op_amd_randomize_ibs_op(ibs_op_ctl);
+ val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
wrmsrl(MSR_AMD64_IBSOPCTL, val);
}
}
@@ -281,29 +322,25 @@ static inline int eilvt_is_available(int offset)
static inline int ibs_eilvt_valid(void)
{
- u64 val;
int offset;
+ u64 val;
rdmsrl(MSR_AMD64_IBSCTL, val);
+ offset = val & IBSCTL_LVT_OFFSET_MASK;
+
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
- pr_err(FW_BUG "cpu %d, invalid IBS "
- "interrupt offset %d (MSR%08X=0x%016llx)",
- smp_processor_id(), offset,
- MSR_AMD64_IBSCTL, val);
+ pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
return 0;
}
- offset = val & IBSCTL_LVT_OFFSET_MASK;
-
- if (eilvt_is_available(offset))
- return !0;
-
- pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
- "not available (MSR%08X=0x%016llx)",
- smp_processor_id(), offset,
- MSR_AMD64_IBSCTL, val);
+ if (!eilvt_is_available(offset)) {
+ pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
+ return 0;
+ }
- return 0;
+ return 1;
}
static inline int get_ibs_offset(void)
@@ -630,28 +667,33 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
/* model specific files */
/* setup some reasonable defaults */
+ memset(&ibs_config, 0, sizeof(ibs_config));
ibs_config.max_cnt_fetch = 250000;
- ibs_config.fetch_enabled = 0;
ibs_config.max_cnt_op = 250000;
- ibs_config.op_enabled = 0;
- ibs_config.dispatched_ops = 0;
-
- dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
- oprofilefs_create_ulong(sb, dir, "enable",
- &ibs_config.fetch_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
- &ibs_config.max_cnt_fetch);
- oprofilefs_create_ulong(sb, dir, "rand_enable",
- &ibs_config.rand_en);
-
- dir = oprofilefs_mkdir(sb, root, "ibs_op");
- oprofilefs_create_ulong(sb, dir, "enable",
- &ibs_config.op_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
- &ibs_config.max_cnt_op);
- if (ibs_caps & IBS_CAPS_OPCNT)
- oprofilefs_create_ulong(sb, dir, "dispatched_ops",
- &ibs_config.dispatched_ops);
+
+ if (ibs_caps & IBS_CAPS_FETCHSAM) {
+ dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
+ oprofilefs_create_ulong(sb, dir, "enable",
+ &ibs_config.fetch_enabled);
+ oprofilefs_create_ulong(sb, dir, "max_count",
+ &ibs_config.max_cnt_fetch);
+ oprofilefs_create_ulong(sb, dir, "rand_enable",
+ &ibs_config.rand_en);
+ }
+
+ if (ibs_caps & IBS_CAPS_OPSAM) {
+ dir = oprofilefs_mkdir(sb, root, "ibs_op");
+ oprofilefs_create_ulong(sb, dir, "enable",
+ &ibs_config.op_enabled);
+ oprofilefs_create_ulong(sb, dir, "max_count",
+ &ibs_config.max_cnt_op);
+ if (ibs_caps & IBS_CAPS_OPCNT)
+ oprofilefs_create_ulong(sb, dir, "dispatched_ops",
+ &ibs_config.dispatched_ops);
+ if (ibs_caps & IBS_CAPS_BRNTRGT)
+ oprofilefs_create_ulong(sb, dir, "branch_target",
+ &ibs_config.branch_target);
+ }
return 0;
}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 55253095be84..826140af3c3c 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
- resource_size_t start = res->start;
+ resource_size_t start = round_down(res->end - size + 1, align);
if (res->flags & IORESOURCE_IO) {
- if (skip_isa_ioresource_align(dev))
- return start;
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
+
+ /*
+ * If we're avoiding ISA aliases, the largest contiguous I/O
+ * port space is 256 bytes. Clearing bits 9 and 10 preserves
+ * all 256-byte and smaller alignments, so the result will
+ * still be correctly aligned.
+ */
+ if (!skip_isa_ioresource_align(dev))
+ start &= ~0x300;
} else if (res->flags & IORESOURCE_MEM) {
if (start < BIOS_END)
- start = BIOS_END;
+ start = res->end; /* fail; no space */
}
return start;
}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index f547ee05f715..9f9bfb705cf9 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -584,27 +584,28 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH9_3:
case PCI_DEVICE_ID_INTEL_ICH9_4:
case PCI_DEVICE_ID_INTEL_ICH9_5:
- case PCI_DEVICE_ID_INTEL_TOLAPAI_0:
+ case PCI_DEVICE_ID_INTEL_EP80579_0:
case PCI_DEVICE_ID_INTEL_ICH10_0:
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
+ case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index a918553ebc75..e282886616a0 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
int end, u64 addr)
{
struct pci_mmcfg_region *new;
- int num_buses;
struct resource *res;
if (addr == 0)
@@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
list_add_sorted(new);
- num_buses = end - start + 1;
res = &new->res;
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
- res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+ res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 68128a1b401a..90a7f5ad6916 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -19,15 +19,12 @@ config XEN_PVHVM
depends on X86_LOCAL_APIC
config XEN_MAX_DOMAIN_MEMORY
- int "Maximum allowed size of a domain in gigabytes"
- default 8 if X86_32
- default 32 if X86_64
+ int
+ default 128
depends on XEN
help
- The pseudo-physical to machine address array is sized
- according to the maximum possible memory size of a Xen
- domain. This array uses 1 page per gigabyte, so there's no
- need to be too stingy here.
+ This only affects the sizing of some bss arrays, the unused
+ portions of which are freed.
config XEN_SAVE_RESTORE
bool
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 63b83ceebd1a..70ddeaeb1ef3 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -59,7 +59,6 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
-#include <asm/setup.h>
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
@@ -136,9 +135,6 @@ static void xen_vcpu_setup(int cpu)
info.mfn = arbitrary_virt_to_mfn(vcpup);
info.offset = offset_in_page(vcpup);
- printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
- cpu, vcpup, info.mfn, info.offset);
-
/* Check to see if the hypervisor will put the vcpu_info
structure where we want it, which allows direct access via
a percpu-variable. */
@@ -152,9 +148,6 @@ static void xen_vcpu_setup(int cpu)
/* This cpu is using the registered vcpu info, even if
later ones fail to. */
per_cpu(xen_vcpu, cpu) = vcpup;
-
- printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
- cpu, vcpup);
}
}
@@ -836,6 +829,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
Xen console noise. */
break;
+ case MSR_IA32_CR_PAT:
+ if (smp_processor_id() == 0)
+ xen_set_pat(((u64)high << 32) | low);
+ break;
+
default:
ret = native_write_msr_safe(msr, low, high);
}
@@ -874,8 +872,6 @@ void xen_setup_vcpu_info_placement(void)
/* xen_vcpu_setup managed to place the vcpu_info within the
percpu area for all cpus, so make use of it */
if (have_vcpu_info_placement) {
- printk(KERN_INFO "Xen: using vcpu_info placement\n");
-
pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
@@ -1019,7 +1015,7 @@ static void xen_reboot(int reason)
struct sched_shutdown r = { .reason = reason };
#ifdef CONFIG_SMP
- smp_send_stop();
+ stop_other_cpus();
#endif
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
@@ -1189,6 +1185,9 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+ /* Allocate and initialize top and mid mfn levels for p2m structure */
+ xen_build_mfn_list_list();
+
init_mm.pgd = pgd;
/* keep using Xen gdt for now; no urgent need to change it */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index f72d18c69221..9631c90907eb 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -57,6 +57,7 @@
#include <asm/linkage.h>
#include <asm/page.h>
#include <asm/init.h>
+#include <asm/pat.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -140,7 +141,8 @@ static inline void check_zero(void)
* large enough to allocate page table pages to allocate the rest.
* Each page can map 2MB.
*/
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
#ifdef CONFIG_X86_64
/* l3 pud for userspace vsyscall mapping */
@@ -171,49 +173,182 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
*/
#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ * Xen
+ * |
+ * p2m_top p2m_top_mfn
+ * / \ / \
+ * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn
+ * / \ / \ / /
+ * p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively.
+ */
+
+unsigned long xen_max_p2m_pfn __read_mostly;
-#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
-#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
+#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
-/* Placeholder for holes in the address space */
-static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
- { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
- /* Array of pointers to pages containing p2m entries */
-static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
- { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
-/* Arrays of p2m arrays expressed in mfns used for save/restore */
-static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
-static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
- __page_aligned_bss;
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
static inline unsigned p2m_top_index(unsigned long pfn)
{
- BUG_ON(pfn >= MAX_DOMAIN_PAGES);
- return pfn / P2M_ENTRIES_PER_PAGE;
+ BUG_ON(pfn >= MAX_P2M_PFN);
+ return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+ return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
}
static inline unsigned p2m_index(unsigned long pfn)
{
- return pfn % P2M_ENTRIES_PER_PAGE;
+ return pfn % P2M_PER_PAGE;
+}
+
+static void p2m_top_init(unsigned long ***top)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+ top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+ top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_top_mfn_p_init(unsigned long **top)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+ top[i] = p2m_mid_missing_mfn;
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_MID_PER_PAGE; i++)
+ mid[i] = p2m_missing;
+}
+
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_MID_PER_PAGE; i++)
+ mid[i] = virt_to_mfn(p2m_missing);
}
-/* Build the parallel p2m_top_mfn structures */
+static void p2m_init(unsigned long *p2m)
+{
+ unsigned i;
+
+ for (i = 0; i < P2M_MID_PER_PAGE; i++)
+ p2m[i] = INVALID_P2M_ENTRY;
+}
+
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ * to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ * tree should alreay be completely allocated.
+ */
void xen_build_mfn_list_list(void)
{
- unsigned pfn, idx;
+ unsigned long pfn;
- for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
+ /* Pre-initialize p2m_top_mfn to be completely missing */
+ if (p2m_top_mfn == NULL) {
+ p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_mfn_init(p2m_mid_missing_mfn);
+
+ p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn_p_init(p2m_top_mfn_p);
- p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
+ p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn_init(p2m_top_mfn);
+ } else {
+ /* Reinitialise, mfn's all change after migration */
+ p2m_mid_mfn_init(p2m_mid_missing_mfn);
}
- for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
- unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
- p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
+ for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
+ unsigned topidx = p2m_top_index(pfn);
+ unsigned mididx = p2m_mid_index(pfn);
+ unsigned long **mid;
+ unsigned long *mid_mfn_p;
+
+ mid = p2m_top[topidx];
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+
+ /* Don't bother allocating any mfn mid levels if
+ * they're just missing, just update the stored mfn,
+ * since all could have changed over a migrate.
+ */
+ if (mid == p2m_mid_missing) {
+ BUG_ON(mididx);
+ BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+ p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+ pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
+ continue;
+ }
+
+ if (mid_mfn_p == p2m_mid_missing_mfn) {
+ /*
+ * XXX boot-time only! We should never find
+ * missing parts of the mfn tree after
+ * runtime. extend_brk() will BUG if we call
+ * it too late.
+ */
+ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_mfn_init(mid_mfn_p);
+
+ p2m_top_mfn_p[topidx] = mid_mfn_p;
+ }
+
+ p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+ mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
}
}
@@ -222,8 +357,8 @@ void xen_setup_mfn_list_list(void)
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
- virt_to_mfn(p2m_top_mfn_list);
- HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
+ virt_to_mfn(p2m_top_mfn);
+ HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
}
/* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -231,98 +366,176 @@ void __init xen_build_dynamic_phys_to_machine(void)
{
unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
- unsigned pfn;
+ unsigned long pfn;
+
+ xen_max_p2m_pfn = max_pfn;
- for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+ p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_init(p2m_missing);
+
+ p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_init(p2m_mid_missing);
+
+ p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_init(p2m_top);
+
+ /*
+ * The domain builder gives us a pre-constructed p2m array in
+ * mfn_list for all the pages initially given to us, so we just
+ * need to graft that into our tree structure.
+ */
+ for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
unsigned topidx = p2m_top_index(pfn);
+ unsigned mididx = p2m_mid_index(pfn);
- p2m_top[topidx] = &mfn_list[pfn];
- }
+ if (p2m_top[topidx] == p2m_mid_missing) {
+ unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_init(mid);
+
+ p2m_top[topidx] = mid;
+ }
- xen_build_mfn_list_list();
+ p2m_top[topidx][mididx] = &mfn_list[pfn];
+ }
}
unsigned long get_phys_to_machine(unsigned long pfn)
{
- unsigned topidx, idx;
+ unsigned topidx, mididx, idx;
- if (unlikely(pfn >= MAX_DOMAIN_PAGES))
+ if (unlikely(pfn >= MAX_P2M_PFN))
return INVALID_P2M_ENTRY;
topidx = p2m_top_index(pfn);
+ mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
- return p2m_top[topidx][idx];
+
+ return p2m_top[topidx][mididx][idx];
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
-/* install a new p2m_top page */
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
+static void *alloc_p2m_page(void)
{
- unsigned topidx = p2m_top_index(pfn);
- unsigned long **pfnp, *mfnp;
- unsigned i;
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
- pfnp = &p2m_top[topidx];
- mfnp = &p2m_top_mfn[topidx];
+static void free_p2m_page(void *p)
+{
+ free_page((unsigned long)p);
+}
- for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
- p[i] = INVALID_P2M_ENTRY;
+/*
+ * Fully allocate the p2m structure for a given pfn. We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync. We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+ unsigned topidx, mididx;
+ unsigned long ***top_p, **mid;
+ unsigned long *top_mfn_p, *mid_mfn;
- if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
- *mfnp = virt_to_mfn(p);
- return true;
+ topidx = p2m_top_index(pfn);
+ mididx = p2m_mid_index(pfn);
+
+ top_p = &p2m_top[topidx];
+ mid = *top_p;
+
+ if (mid == p2m_mid_missing) {
+ /* Mid level is missing, allocate a new one */
+ mid = alloc_p2m_page();
+ if (!mid)
+ return false;
+
+ p2m_mid_init(mid);
+
+ if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+ free_p2m_page(mid);
}
- return false;
-}
+ top_mfn_p = &p2m_top_mfn[topidx];
+ mid_mfn = p2m_top_mfn_p[topidx];
-static void alloc_p2m(unsigned long pfn)
-{
- unsigned long *p;
+ BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+
+ if (mid_mfn == p2m_mid_missing_mfn) {
+ /* Separately check the mid mfn level */
+ unsigned long missing_mfn;
+ unsigned long mid_mfn_mfn;
+
+ mid_mfn = alloc_p2m_page();
+ if (!mid_mfn)
+ return false;
+
+ p2m_mid_mfn_init(mid_mfn);
+
+ missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+ mid_mfn_mfn = virt_to_mfn(mid_mfn);
+ if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+ free_p2m_page(mid_mfn);
+ else
+ p2m_top_mfn_p[topidx] = mid_mfn;
+ }
+
+ if (p2m_top[topidx][mididx] == p2m_missing) {
+ /* p2m leaf page is missing */
+ unsigned long *p2m;
+
+ p2m = alloc_p2m_page();
+ if (!p2m)
+ return false;
- p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
- BUG_ON(p == NULL);
+ p2m_init(p2m);
+
+ if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+ free_p2m_page(p2m);
+ else
+ mid_mfn[mididx] = virt_to_mfn(p2m);
+ }
- if (!install_p2mtop_page(pfn, p))
- free_page((unsigned long)p);
+ return true;
}
/* Try to install p2m mapping; fail if intermediate bits missing */
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
- unsigned topidx, idx;
+ unsigned topidx, mididx, idx;
- if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
+ if (unlikely(pfn >= MAX_P2M_PFN)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
}
topidx = p2m_top_index(pfn);
- if (p2m_top[topidx] == p2m_missing) {
- if (mfn == INVALID_P2M_ENTRY)
- return true;
- return false;
- }
-
+ mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
- p2m_top[topidx][idx] = mfn;
+
+ if (p2m_top[topidx][mididx] == p2m_missing)
+ return mfn == INVALID_P2M_ENTRY;
+
+ p2m_top[topidx][mididx][idx] = mfn;
return true;
}
-void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return;
+ return true;
}
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
- alloc_p2m(pfn);
+ if (!alloc_p2m(pfn))
+ return false;
if (!__set_phys_to_machine(pfn, mfn))
- BUG();
+ return false;
}
+
+ return true;
}
unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -399,7 +612,7 @@ static bool xen_iomap_pte(pte_t pte)
return pte_flags(pte) & _PAGE_IOMAP;
}
-static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
{
struct multicall_space mcs;
struct mmu_update *u;
@@ -411,10 +624,16 @@ static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
u->ptr = arbitrary_virt_to_machine(ptep).maddr;
u->val = pte_val_ma(pteval);
- MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
+ MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
+EXPORT_SYMBOL_GPL(xen_set_domain_pte);
+
+static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+{
+ xen_set_domain_pte(ptep, pteval, DOMID_IO);
+}
static void xen_extend_mmu_update(const struct mmu_update *update)
{
@@ -561,7 +780,20 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
- val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+ /*
+ * If there's no mfn for the pfn, then just create an
+ * empty non-present pte. Unfortunately this loses
+ * information about the original pfn, so
+ * pte_mfn_to_pfn is asymmetric.
+ */
+ if (unlikely(mfn == INVALID_P2M_ENTRY)) {
+ mfn = 0;
+ flags = 0;
+ }
+
+ val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
return val;
@@ -583,10 +815,18 @@ static pteval_t iomap_pte(pteval_t val)
pteval_t xen_pte_val(pte_t pte)
{
- if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
- return pte.pte;
+ pteval_t pteval = pte.pte;
+
+ /* If this is a WC pte, convert back from Xen WC to Linux WC */
+ if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
+ WARN_ON(!pat_enabled);
+ pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
+ }
- return pte_mfn_to_pfn(pte.pte);
+ if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
+ return pteval;
+
+ return pte_mfn_to_pfn(pteval);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
@@ -596,10 +836,48 @@ pgdval_t xen_pgd_val(pgd_t pgd)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
+/*
+ * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
+ * are reserved for now, to correspond to the Intel-reserved PAT
+ * types.
+ *
+ * We expect Linux's PAT set as follows:
+ *
+ * Idx PTE flags Linux Xen Default
+ * 0 WB WB WB
+ * 1 PWT WC WT WT
+ * 2 PCD UC- UC- UC-
+ * 3 PCD PWT UC UC UC
+ * 4 PAT WB WC WB
+ * 5 PAT PWT WC WP WT
+ * 6 PAT PCD UC- UC UC-
+ * 7 PAT PCD PWT UC UC UC
+ */
+
+void xen_set_pat(u64 pat)
+{
+ /* We expect Linux to use a PAT setting of
+ * UC UC- WC WB (ignoring the PAT flag) */
+ WARN_ON(pat != 0x0007010600070106ull);
+}
+
pte_t xen_make_pte(pteval_t pte)
{
phys_addr_t addr = (pte & PTE_PFN_MASK);
+ /* If Linux is trying to set a WC pte, then map to the Xen WC.
+ * If _PAGE_PAT is set, then it probably means it is really
+ * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
+ * things work out OK...
+ *
+ * (We should never see kernel mappings with _PAGE_PSE set,
+ * but we could see hugetlbfs mappings, I think.).
+ */
+ if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
+ if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
+ pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
+ }
+
/*
* Unprivileged domains are allowed to do IOMAPpings for
* PCI passthrough, but not map ISA space. The ISA
@@ -1712,6 +1990,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
unsigned ident_pte;
unsigned long pfn;
+ level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+ PAGE_SIZE);
+
ident_pte = 0;
pfn = 0;
for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
@@ -1722,7 +2003,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
pte_page = m2v(pmd[pmdidx].pmd);
else {
/* Check for free pte pages */
- if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+ if (ident_pte == LEVEL1_IDENT_ENTRIES)
break;
pte_page = &level1_ident_pgt[ident_pte];
@@ -1837,13 +2118,15 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
return pgd;
}
#else /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
unsigned long max_pfn)
{
pmd_t *kernel_pmd;
+ level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
xen_start_info->nr_pt_frames * PAGE_SIZE +
512*1024);
@@ -2269,6 +2552,72 @@ void __init xen_hvm_init_mmu_ops(void)
}
#endif
+#define REMAP_BATCH_SIZE 16
+
+struct remap_data {
+ unsigned long mfn;
+ pgprot_t prot;
+ struct mmu_update *mmu_update;
+};
+
+static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ struct remap_data *rmd = data;
+ pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
+
+ rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
+ rmd->mmu_update->val = pte_val_ma(pte);
+ rmd->mmu_update++;
+
+ return 0;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long mfn, int nr,
+ pgprot_t prot, unsigned domid)
+{
+ struct remap_data rmd;
+ struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+ int batch;
+ unsigned long range;
+ int err = 0;
+
+ prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
+
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+
+ rmd.mfn = mfn;
+ rmd.prot = prot;
+
+ while (nr) {
+ batch = min(REMAP_BATCH_SIZE, nr);
+ range = (unsigned long)batch << PAGE_SHIFT;
+
+ rmd.mmu_update = mmu_update;
+ err = apply_to_page_range(vma->vm_mm, addr, range,
+ remap_area_mfn_pte_fn, &rmd);
+ if (err)
+ goto out;
+
+ err = -EFAULT;
+ if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
+ goto out;
+
+ nr -= batch;
+ addr += range;
+ }
+
+ err = 0;
+out:
+
+ flush_tlb_all();
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
#ifdef CONFIG_XEN_DEBUG_FS
static struct dentry *d_mmu_debug;
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index fa938c4aa2f7..537bb9aab777 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -12,7 +12,6 @@ enum pt_level {
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 9729c903404b..105db2501050 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -18,8 +18,10 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/callback.h>
+#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/interface/memory.h>
#include <xen/features.h>
@@ -34,6 +36,39 @@ extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
+/* Amount of extra memory space we add to the e820 ranges */
+phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+
+/*
+ * The maximum amount of extra memory compared to the base size. The
+ * main scaling factor is the size of struct page. At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO (10)
+
+static __init void xen_add_extra_mem(unsigned long pages)
+{
+ u64 size = (u64)pages * PAGE_SIZE;
+ u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
+
+ if (!pages)
+ return;
+
+ e820_add_region(extra_start, size, E820_RAM);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+
+ memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+
+ xen_extra_mem_size += size;
+
+ xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+}
+
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
phys_addr_t end_addr)
{
@@ -105,16 +140,65 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
-
char * __init xen_memory_setup(void)
{
+ static struct e820entry map[E820MAX] __initdata;
+
unsigned long max_pfn = xen_start_info->nr_pages;
+ unsigned long long mem_end;
+ int rc;
+ struct xen_memory_map memmap;
+ unsigned long extra_pages = 0;
+ unsigned long extra_limit;
+ int i;
+ int op;
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
+ mem_end = PFN_PHYS(max_pfn);
+
+ memmap.nr_entries = E820MAX;
+ set_xen_guest_handle(memmap.buffer, map);
+
+ op = xen_initial_domain() ?
+ XENMEM_machine_memory_map :
+ XENMEM_memory_map;
+ rc = HYPERVISOR_memory_op(op, &memmap);
+ if (rc == -ENOSYS) {
+ memmap.nr_entries = 1;
+ map[0].addr = 0ULL;
+ map[0].size = mem_end;
+ /* 8MB slack (to balance backend allocations). */
+ map[0].size += 8ULL << 20;
+ map[0].type = E820_RAM;
+ rc = 0;
+ }
+ BUG_ON(rc);
e820.nr_map = 0;
+ xen_extra_mem_start = mem_end;
+ for (i = 0; i < memmap.nr_entries; i++) {
+ unsigned long long end = map[i].addr + map[i].size;
+
+ if (map[i].type == E820_RAM) {
+ if (map[i].addr < mem_end && end > mem_end) {
+ /* Truncate region to max_mem. */
+ u64 delta = end - mem_end;
+
+ map[i].size -= delta;
+ extra_pages += PFN_DOWN(delta);
+
+ end = mem_end;
+ }
+ }
- e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
+ if (end > xen_extra_mem_start)
+ xen_extra_mem_start = end;
+
+ /* If region is non-RAM or below mem_end, add what remains */
+ if ((map[i].type != E820_RAM || map[i].addr < mem_end) &&
+ map[i].size > 0)
+ e820_add_region(map[i].addr, map[i].size, map[i].type);
+ }
/*
* Even though this is normal, usable memory under Xen, reserve
@@ -136,7 +220,29 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+ extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+
+ /*
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+ * factor the base size. On non-highmem systems, the base
+ * size is the full initial memory allocation; on highmem it
+ * is limited to the max size of lowmem, so that it doesn't
+ * get completely filled.
+ *
+ * In principle there could be a problem in lowmem systems if
+ * the initial memory is also very large with respect to
+ * lowmem, but we won't try to deal with that here.
+ */
+ extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ max_pfn + extra_pages);
+
+ if (extra_limit >= max_pfn)
+ extra_pages = extra_limit - max_pfn;
+ else
+ extra_pages = 0;
+
+ if (!xen_initial_domain())
+ xen_add_extra_mem(extra_pages);
return "Xen";
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 25f232b18a82..f4d010031465 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -400,9 +400,9 @@ static void stop_self(void *v)
BUG();
}
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
{
- smp_call_function(stop_self, NULL, 0);
+ smp_call_function(stop_self, NULL, wait);
}
static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
.cpu_disable = xen_cpu_disable,
.play_dead = xen_play_dead,
- .smp_send_stop = xen_smp_send_stop,
+ .stop_other_cpus = xen_stop_other_cpus,
.smp_send_reschedule = xen_smp_send_reschedule,
.send_call_func_ipi = xen_smp_send_call_function_ipi,
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 7c8ab86163e9..64044747348e 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -30,6 +30,9 @@ void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_ident_map_ISA(void);
void xen_reserve_top(void);
+extern unsigned long xen_max_p2m_pfn;
+
+void xen_set_pat(u64);
char * __init xen_memory_setup(void);
void __init xen_arch_setup(void);
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 76bf35554117..b03c043ce75b 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -324,10 +324,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pte_offset_kernel(dir,addr) \
((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
-
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
/*
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 9d4e1ceb3f09..c72c9473ef99 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -256,9 +256,11 @@ int ptrace_pokeusr(struct task_struct *child, long regno, long val)
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EPERM;
+ void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -267,7 +269,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_PEEKUSR: /* read register specified by addr. */
- ret = ptrace_peekusr(child, addr, (void __user *) data);
+ ret = ptrace_peekusr(child, addr, datap);
break;
case PTRACE_POKETEXT: /* write the word at location addr. */
@@ -280,19 +282,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *) data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *) data);
+ ret = ptrace_setregs(child, datap);
break;
case PTRACE_GETXTREGS:
- ret = ptrace_getxregs(child, (void __user *) data);
+ ret = ptrace_getxregs(child, datap);
break;
case PTRACE_SETXTREGS:
- ret = ptrace_setxregs(child, (void __user *) data);
+ ret = ptrace_setxregs(child, datap);
break;
default:
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index 5de2ed13b35d..1b11abbb5c91 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -24,19 +24,6 @@ config ASYNC_RAID6_RECOV
select ASYNC_PQ
select ASYNC_XOR
-config ASYNC_RAID6_TEST
- tristate "Self test for hardware accelerated raid6 recovery"
- depends on ASYNC_RAID6_RECOV
- select ASYNC_MEMCPY
- ---help---
- This is a one-shot self test that permutes through the
- recovery of all the possible two disk failure scenarios for a
- N-disk array. Recovery is performed with the asynchronous
- raid6 recovery routines, and will optionally use an offload
- engine if one is available.
-
- If unsure, say N.
-
config ASYNC_TX_DISABLE_PQ_VAL_DMA
bool
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 0ec1fb69d4ea..518c22bd9562 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
memcpy(dest_buf, src_buf, len);
- kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
+ kunmap_atomic(dest_buf, KM_USER0);
async_tx_sync_epilog(submit);
}
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 90d26c91f4e9..7a7219266e3c 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
- blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk);
+ blkcipher_unmap_src(walk);
}
scatterwalk_advance(&walk->in, n);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 88681aca88c5..3f3489c5ca8c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -9,7 +9,6 @@ menuconfig ACPI
depends on PCI
depends on PM
select PNP
- select CPU_IDLE
default y
help
Advanced Configuration and Power Interface (ACPI) support for
@@ -66,7 +65,6 @@ config ACPI_PROCFS
config ACPI_PROCFS_POWER
bool "Deprecated power /proc/acpi directories"
depends on PROC_FS
- default y
help
For backwards compatibility, this option allows
deprecated power /proc/acpi/ directories to exist, even when
@@ -90,13 +88,6 @@ config ACPI_POWER_METER
To compile this driver as a module, choose M here:
the module will be called power-meter.
-config ACPI_SYSFS_POWER
- bool "Future power /sys interface"
- select POWER_SUPPLY
- default y
- help
- Say N to disable power /sys interface
-
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
@@ -136,6 +127,7 @@ config ACPI_PROC_EVENT
config ACPI_AC
tristate "AC Adapter"
depends on X86
+ select POWER_SUPPLY
default y
help
This driver supports the AC Adapter object, which indicates
@@ -148,6 +140,7 @@ config ACPI_AC
config ACPI_BATTERY
tristate "Battery"
depends on X86
+ select POWER_SUPPLY
default y
help
This driver adds support for battery information through
@@ -206,6 +199,7 @@ config ACPI_DOCK
config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
+ select CPU_IDLE
default y
help
This driver installs ACPI as the idle handler for Linux and uses
@@ -364,6 +358,7 @@ config ACPI_HOTPLUG_MEMORY
config ACPI_SBS
tristate "Smart Battery System"
depends on X86
+ select POWER_SUPPLY
help
This driver supports the Smart Battery System, another
type of access to battery information, found on some laptops.
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 56205a0b85df..ba9afeaa23ac 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -32,9 +32,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
#include <linux/power_supply.h>
-#endif
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -86,9 +84,7 @@ static struct acpi_driver acpi_ac_driver = {
};
struct acpi_ac {
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply charger;
-#endif
struct acpi_device * device;
unsigned long long state;
};
@@ -104,7 +100,6 @@ static const struct file_operations acpi_ac_fops = {
.release = single_release,
};
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
static int get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -123,7 +118,6 @@ static int get_ac_property(struct power_supply *psy,
static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-#endif
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
@@ -247,9 +241,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
dev_name(&device->dev), event,
(u32) ac->state);
acpi_notifier_call_chain(device, event, (u32) ac->state);
-#ifdef CONFIG_ACPI_SYSFS_POWER
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
-#endif
}
return;
@@ -282,14 +274,12 @@ static int acpi_ac_add(struct acpi_device *device)
#endif
if (result)
goto end;
-#ifdef CONFIG_ACPI_SYSFS_POWER
ac->charger.name = acpi_device_bid(device);
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
power_supply_register(&ac->device->dev, &ac->charger);
-#endif
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
@@ -316,10 +306,8 @@ static int acpi_ac_resume(struct acpi_device *device)
old_state = ac->state;
if (acpi_ac_get_state(ac))
return 0;
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (old_state != ac->state)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
-#endif
return 0;
}
@@ -333,10 +321,8 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
ac = acpi_driver_data(device);
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_ac_remove_fs(device);
#endif
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index d93cc06f4bf8..a7e1d1aa4107 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -21,7 +21,7 @@ acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
-acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
+acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
@@ -44,4 +44,5 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
- utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o
+ utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
+ utosi.o utxferror.o
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 48faf3eba9fb..72e9d5eb083c 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -105,6 +105,8 @@ void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg);
acpi_status
acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
+void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
+
acpi_status acpi_db_find_name_in_namespace(char *name_arg);
void acpi_db_set_scope(char *name);
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 36867cd70eac..a6f99cc37a19 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -105,8 +105,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info **return_gpe_block);
acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
- struct acpi_gpe_block_info *gpe_block);
+acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *ignored);
acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 1d192142c691..ad88fcae4eb9 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -132,6 +132,7 @@ struct acpi_table_fadt acpi_gbl_FADT;
u32 acpi_current_gpe_count;
u32 acpi_gbl_trace_flags;
acpi_name acpi_gbl_trace_method_name;
+u8 acpi_gbl_system_awake_and_running;
#endif
@@ -187,6 +188,10 @@ ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+/* Mutex for _OSI support */
+
+ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+
/* Reader/Writer lock is used for namespace walk and dynamic table unload */
ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
@@ -255,6 +260,7 @@ ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
ACPI_EXTERN void *acpi_gbl_table_handler_context;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
+ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
/* Owner ID support */
@@ -273,8 +279,8 @@ ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
ACPI_EXTERN u8 acpi_gbl_osi_data;
+ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
#ifndef DEFINE_ACPI_GLOBALS
@@ -364,6 +370,7 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+ACPI_EXTERN u8 acpi_all_gpes_initialized;
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 120b3af56596..167470ad2d21 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -121,6 +121,13 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context);
+/*
+ * hwpci - PCI configuration support
+ */
+acpi_status
+acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
+ acpi_handle root_pci_device, acpi_handle pci_region);
+
#ifdef ACPI_FUTURE_USAGE
/*
* hwtimer - ACPI Timer prototypes
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 7dad9160f209..2ceb0c05b2d7 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -413,6 +413,7 @@ struct acpi_handler_info {
void *context; /* Context to be passed to handler */
struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
u8 orig_flags; /* Original misc info about this GPE */
+ u8 orig_enabled; /* Set if the GPE was originally enabled */
};
union acpi_gpe_dispatch_info {
@@ -457,6 +458,7 @@ struct acpi_gpe_block_info {
u32 register_count; /* Number of register pairs in block */
u16 gpe_count; /* Number of individual GPEs in block */
u8 block_base_number; /* Base GPE number for this block */
+ u8 initialized; /* If set, the GPE block has been initialized */
};
/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
@@ -473,7 +475,6 @@ struct acpi_gpe_walk_info {
struct acpi_gpe_block_info *gpe_block;
u16 count;
acpi_owner_id owner_id;
- u8 enable_this_gpe;
u8 execute_by_owner_id;
};
@@ -854,7 +855,7 @@ struct acpi_bit_register_info {
ACPI_BITMASK_POWER_BUTTON_STATUS | \
ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
ACPI_BITMASK_RT_CLOCK_STATUS | \
- ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
+ ACPI_BITMASK_PCIEXP_WAKE_STATUS | \
ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001
@@ -909,15 +910,21 @@ struct acpi_bit_register_info {
#define ACPI_OSI_WIN_VISTA 0x07
#define ACPI_OSI_WINSRV_2008 0x08
#define ACPI_OSI_WIN_VISTA_SP1 0x09
-#define ACPI_OSI_WIN_7 0x0A
+#define ACPI_OSI_WIN_VISTA_SP2 0x0A
+#define ACPI_OSI_WIN_7 0x0B
#define ACPI_ALWAYS_ILLEGAL 0x00
struct acpi_interface_info {
char *name;
+ struct acpi_interface_info *next;
+ u8 flags;
u8 value;
};
+#define ACPI_OSI_INVALID 0x01
+#define ACPI_OSI_DYNAMIC 0x02
+
struct acpi_port_info {
char *name;
u16 start;
@@ -997,7 +1004,7 @@ struct acpi_port_info {
struct acpi_db_method_info {
acpi_handle main_thread_gate;
acpi_handle thread_complete_gate;
- u32 *threads;
+ acpi_thread_id *threads;
u32 num_threads;
u32 num_created;
u32 num_completed;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 9894929a2abb..8d5c9e0a495f 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -338,8 +338,8 @@
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
-#define ACPI_ERROR_NAMESPACE(s, e) acpi_ns_report_error (AE_INFO, s, e);
-#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ns_report_method_error (AE_INFO, s, n, p, e);
+#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9f60ff002203..d44d3bc5b847 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -339,18 +339,6 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
u32 acpi_ns_local(acpi_object_type type);
void
-acpi_ns_report_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status);
-
-void
-acpi_ns_report_method_error(const char *module_name,
- u32 line_number,
- const char *message,
- struct acpi_namespace_node *node,
- const char *path, acpi_status lookup_status);
-
-void
acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *msg);
acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 54857fa87aaf..bdbfaf22bd14 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -248,7 +248,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
- u8 access_bit_width; /* Read/Write size in bits (8-64) */
+
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 35df755251ce..72e4183c1937 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -312,8 +312,6 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
/*
* uteval - object evaluation
*/
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
-
acpi_status
acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
char *path,
@@ -395,6 +393,21 @@ acpi_status
acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
/*
+ * utosi - Support for the _OSI predefined control method
+ */
+acpi_status acpi_ut_initialize_interfaces(void);
+
+void acpi_ut_interface_terminate(void);
+
+acpi_status acpi_ut_install_interface(acpi_string interface_name);
+
+acpi_status acpi_ut_remove_interface(acpi_string interface_name);
+
+struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
+
+/*
* utstate - Generic state creation/cache routines
*/
void
@@ -473,17 +486,6 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position);
acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer);
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_warning(const char *module_name,
- u32 line_number,
- char *pathname,
- u8 node_flags, const char *format, ...);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_info(const char *module_name,
- u32 line_number,
- char *pathname, u8 node_flags, const char *format, ...);
-
/* Values for Base above (16=Hex, 10=Decimal) */
#define ACPI_ANY_BASE 0
@@ -574,6 +576,32 @@ acpi_status
acpi_ut_create_list(char *list_name,
u16 object_size, struct acpi_memory_list **return_cache);
-#endif
+#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
+
+/*
+ * utxferror - various error/warning output functions
+ */
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+ u32 line_number,
+ char *pathname,
+ u8 node_flags, const char *format, ...);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_info(const char *module_name,
+ u32 line_number,
+ char *pathname, u8 node_flags, const char *format, ...);
+
+void
+acpi_ut_namespace_error(const char *module_name,
+ u32 line_number,
+ const char *internal_name, acpi_status lookup_status);
+
+void
+acpi_ut_method_error(const char *module_name,
+ u32 line_number,
+ const char *message,
+ struct acpi_namespace_node *node,
+ const char *path, acpi_status lookup_status);
#endif /* _ACUTILS_H */
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 64750ee96e20..d94dd8974b55 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -573,7 +573,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_os_release_mutex(method_desc->method.
mutex->mutex.os_mutex);
- method_desc->method.mutex->mutex.thread_id = NULL;
+ method_desc->method.mutex->mutex.thread_id = 0;
}
}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index d555b374e314..6b0b5d08d97a 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -300,10 +300,25 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
* we must enter this object into the namespace. The created
* object is temporary and will be deleted upon completion of
* the execution of this method.
+ *
+ * Note 10/2010: Except for the Scope() op. This opcode does
+ * not actually create a new object, it refers to an existing
+ * object. However, for Scope(), we want to indeed open a
+ * new scope.
*/
- status = acpi_ds_load2_begin_op(walk_state, NULL);
+ if (op->common.aml_opcode != AML_SCOPE_OP) {
+ status =
+ acpi_ds_load2_begin_op(walk_state, NULL);
+ } else {
+ status =
+ acpi_ds_scope_stack_push(op->named.node,
+ op->named.node->
+ type, walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
}
-
break;
case AML_CLASS_EXECUTE:
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 303618889da0..c61c3039c31a 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -95,47 +95,6 @@ acpi_status acpi_ev_initialize_events(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ev_install_fadt_gpes
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
- * (0 and 1). The HW must be fully initialized at this point,
- * including global lock support.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_fadt_gpes(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
-
- /* Namespace must be locked */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* FADT GPE Block 0 */
-
- (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
- acpi_gbl_gpe_fadt_blocks[0]);
-
- /* FADT GPE Block 1 */
-
- (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
- acpi_gbl_gpe_fadt_blocks[1]);
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_install_xrupt_handlers
*
* PARAMETERS: None
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 85445fb5844e..020add3eee1c 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -363,6 +363,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
+ gpe_block->initialized = FALSE;
ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
sizeof(struct acpi_generic_address));
@@ -385,11 +386,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(status);
}
+ acpi_all_gpes_initialized = FALSE;
+
/* Find all GPE methods (_Lxx or_Exx) for this block */
walk_info.gpe_block = gpe_block;
walk_info.gpe_device = gpe_device;
- walk_info.enable_this_gpe = FALSE;
walk_info.execute_by_owner_id = FALSE;
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
@@ -434,35 +436,34 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
******************************************************************************/
acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
- struct acpi_gpe_block_info *gpe_block)
+acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *ignored)
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_enabled_count;
u32 gpe_index;
- u32 gpe_number;
u32 i;
u32 j;
ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
- /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
-
- if (!gpe_block) {
+ /*
+ * Ignore a null GPE block (e.g., if no GPE block 1 exists) and
+ * GPE blocks that have been initialized already.
+ */
+ if (!gpe_block || gpe_block->initialized) {
return_ACPI_STATUS(AE_OK);
}
/*
- * Enable all GPEs that have a corresponding method. Any other GPEs
- * within this block must be enabled via the acpi_enable_gpe interface.
+ * Enable all GPEs that have a corresponding method and have the
+ * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must
+ * be enabled via the acpi_enable_gpe() interface.
*/
gpe_enabled_count = 0;
- if (gpe_device == acpi_gbl_fadt_gpe_device) {
- gpe_device = NULL;
- }
-
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
@@ -470,27 +471,19 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
- gpe_number = gpe_index + gpe_block->block_base_number;
/* Ignore GPEs that have no corresponding _Lxx/_Exx method */
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
+ || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
- /*
- * If the GPE has already been enabled for runtime
- * signaling, make sure it remains enabled, but do not
- * increment its reference counter.
- */
- status = gpe_event_info->runtime_count ?
- acpi_ev_enable_gpe(gpe_event_info) :
- acpi_enable_gpe(gpe_device, gpe_number);
-
+ status = acpi_raw_enable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not enable GPE 0x%02X",
- gpe_number));
+ "Could not enable GPE 0x%02X",
+ gpe_index + gpe_block->block_base_number));
continue;
}
@@ -504,5 +497,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_enabled_count));
}
+ gpe_block->initialized = TRUE;
+
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 3084c5de1bba..2c7def95f721 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -210,8 +210,7 @@ acpi_status acpi_ev_gpe_initialize(void)
*
* DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
* result of a Load() or load_table() operation. If new GPE
- * methods have been installed, register the new methods and
- * enable and runtime GPEs that are associated with them.
+ * methods have been installed, register the new methods.
*
******************************************************************************/
@@ -239,7 +238,6 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
walk_info.owner_id = table_owner_id;
walk_info.execute_by_owner_id = TRUE;
walk_info.count = 0;
- walk_info.enable_this_gpe = TRUE;
/* Walk the interrupt level descriptor list */
@@ -301,8 +299,6 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
*
* If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
* with that owner.
- * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
- * method is immediately enabled (Used for Load/load_table operators)
*
******************************************************************************/
@@ -315,8 +311,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
struct acpi_gpe_walk_info *walk_info =
ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_namespace_node *gpe_device;
- acpi_status status;
u32 gpe_number;
char name[ACPI_NAME_SIZE + 1];
u8 type;
@@ -421,29 +415,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
gpe_event_info->dispatch.method_node = method_node;
- /*
- * Enable this GPE if requested. This only happens when during the
- * execution of a Load or load_table operator. We have found a new
- * GPE method and want to immediately enable the GPE if it is a
- * runtime GPE.
- */
- if (walk_info->enable_this_gpe) {
-
- walk_info->count++;
- gpe_device = walk_info->gpe_device;
-
- if (gpe_device == acpi_gbl_fadt_gpe_device) {
- gpe_device = NULL;
- }
-
- status = acpi_enable_gpe(gpe_device, gpe_number);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not enable GPE 0x%02X",
- gpe_number));
- }
- }
-
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Registered GPE method %s as GPE number 0x%.2X\n",
name, gpe_number));
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index df0aea9a8cfd..fcaed9fb44ff 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -553,7 +553,7 @@ acpi_status acpi_ev_release_global_lock(void)
acpi_gbl_global_lock_acquired = FALSE;
/* Release the local GL mutex */
- acpi_ev_global_lock_thread_id = NULL;
+ acpi_ev_global_lock_thread_id = 0;
acpi_ev_global_lock_acquired = 0;
acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index f40d271bf568..0b47a6dc9290 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -289,8 +289,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
}
/*
- * Get the PCI device and function numbers from the _ADR object contained
- * in the parent's scope.
+ * Get the PCI device and function numbers from the _ADR object
+ * contained in the parent's scope.
*/
status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR,
pci_device_node, &pci_value);
@@ -320,9 +320,15 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
pci_id->bus = ACPI_LOWORD(pci_value);
}
- /* Complete this device's pci_id */
+ /* Complete/update the PCI ID for this device */
- acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id);
+ status =
+ acpi_hw_derive_pci_id(pci_id, pci_root_node,
+ region_obj->region.node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(pci_id);
+ return_ACPI_STATUS(status);
+ }
*region_context = pci_id;
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 14e48add32fa..36af222cac65 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -726,15 +726,16 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
/*
- * If the GPE is associated with a method and it cannot wake up the
- * system from sleep states, it was enabled automatically during
- * initialization, so it has to be disabled now to avoid spurious
- * execution of the handler.
+ * If the GPE is associated with a method, it might have been enabled
+ * automatically during initialization, in which case it has to be
+ * disabled now to avoid spurious execution of the handler.
*/
if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
- && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+ && gpe_event_info->runtime_count) {
+ handler->orig_enabled = 1;
(void)acpi_raw_disable_gpe(gpe_event_info);
+ }
/* Install the handler */
@@ -837,13 +838,13 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
gpe_event_info->flags |= handler->orig_flags;
/*
- * If the GPE was previously associated with a method and it cannot wake
- * up the system from sleep states, it should be enabled at this point
- * to restore the post-initialization configuration.
+ * If the GPE was previously associated with a method and it was
+ * enabled, it should be enabled at this point to restore the
+ * post-initialization configuration.
*/
if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
- && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+ && handler->orig_enabled)
(void)acpi_raw_enable_gpe(gpe_event_info);
/* Now we can free the handler object */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 304825528d48..a1dabe3fd8ae 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -379,21 +379,12 @@ acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info) {
+ if (gpe_event_info) {
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ } else {
status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
- if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
- goto unlock_and_exit;
}
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- if (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) {
- (void)acpi_raw_disable_gpe(gpe_event_info);
- }
-
-unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -651,7 +642,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_generic_address *gpe_block_address,
u32 register_count, u32 interrupt_number)
{
- acpi_status status;
+ acpi_status status = AE_OK;
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node;
struct acpi_gpe_block_info *gpe_block;
@@ -715,10 +706,6 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc->device.gpe_block = gpe_block;
- /* Enable the runtime GPEs in the new block */
-
- status = acpi_ev_initialize_gpe_block(node, gpe_block);
-
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
@@ -924,3 +911,43 @@ acpi_status acpi_enable_all_runtime_gpes(void)
return_ACPI_STATUS(status);
}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_update_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
+ * are not pointed to by any device _PRW methods indicating that
+ * these GPEs are generally intended for system or device wakeup
+ * (such GPEs have to be enabled directly when the devices whose
+ * _PRW methods point to them are set up for wakeup signaling).
+ *
+ ******************************************************************************/
+
+acpi_status acpi_update_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_update_gpes);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ } else if (acpi_all_gpes_initialized) {
+ goto unlock;
+ }
+
+ status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
+ if (ACPI_SUCCESS(status)) {
+ acpi_all_gpes_initialized = TRUE;
+ }
+
+unlock:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 541cbc1544d5..ce9314f79451 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -64,6 +64,12 @@ ACPI_MODULE_NAME("evxfregn")
*
* DESCRIPTION: Install a handler for all op_regions of a given space_id.
*
+ * NOTE: This function should only be called after acpi_enable_subsystem has
+ * been called. This is because any _REG methods associated with the Space ID
+ * are executed here, and these methods can only be safely executed after
+ * the default handlers have been installed and the hardware has been
+ * initialized (via acpi_enable_subsystem.)
+ *
******************************************************************************/
acpi_status
acpi_install_address_space_handler(acpi_handle device,
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 047217303a4b..38293fd3e088 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -119,8 +119,8 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
/*
- * Exit now for SMBus or IPMI address space, it has a non-linear address space
- * and the request cannot be directly validated
+ * Exit now for SMBus or IPMI address space, it has a non-linear
+ * address space and the request cannot be directly validated
*/
if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
@@ -147,8 +147,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* (Region length is specified in bytes)
*/
if (rgn_desc->region.length <
- (obj_desc->common_field.base_byte_offset +
- field_datum_byte_offset +
+ (obj_desc->common_field.base_byte_offset + field_datum_byte_offset +
obj_desc->common_field.access_byte_width)) {
if (acpi_gbl_enable_interpreter_slack) {
/*
@@ -680,6 +679,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
u32 buffer_tail_bits;
u32 datum_count;
u32 field_datum_count;
+ u32 access_bit_width;
u32 i;
ACPI_FUNCTION_TRACE(ex_extract_from_field);
@@ -694,16 +694,36 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
+
ACPI_MEMSET(buffer, 0, buffer_length);
+ access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
+
+ /* Handle the simple case here */
+
+ if ((obj_desc->common_field.start_field_bit_offset == 0) &&
+ (obj_desc->common_field.bit_length == access_bit_width)) {
+ status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
+ return_ACPI_STATUS(status);
+ }
+
+/* TBD: Move to common setup code */
+
+ /* Field algorithm is limited to sizeof(u64), truncate if needed */
+
+ if (obj_desc->common_field.access_byte_width > sizeof(u64)) {
+ obj_desc->common_field.access_byte_width = sizeof(u64);
+ access_bit_width = sizeof(u64) * 8;
+ }
/* Compute the number of datums (access width data items) */
- datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
- obj_desc->common_field.access_bit_width);
+ datum_count =
+ ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+ access_bit_width);
+
field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
obj_desc->common_field.
start_field_bit_offset,
- obj_desc->common_field.
access_bit_width);
/* Priming read from the field */
@@ -738,12 +758,11 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
* This avoids the differences in behavior between different compilers
* concerning shift values larger than the target data width.
*/
- if ((obj_desc->common_field.access_bit_width -
- obj_desc->common_field.start_field_bit_offset) <
+ if (access_bit_width -
+ obj_desc->common_field.start_field_bit_offset <
ACPI_INTEGER_BIT_SIZE) {
merged_datum |=
- raw_datum << (obj_desc->common_field.
- access_bit_width -
+ raw_datum << (access_bit_width -
obj_desc->common_field.
start_field_bit_offset);
}
@@ -765,8 +784,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
/* Mask off any extra bits in the last datum */
- buffer_tail_bits = obj_desc->common_field.bit_length %
- obj_desc->common_field.access_bit_width;
+ buffer_tail_bits = obj_desc->common_field.bit_length % access_bit_width;
if (buffer_tail_bits) {
merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
}
@@ -798,6 +816,7 @@ acpi_status
acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
void *buffer, u32 buffer_length)
{
+ void *new_buffer;
acpi_status status;
u64 mask;
u64 width_mask;
@@ -808,9 +827,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
u32 buffer_tail_bits;
u32 datum_count;
u32 field_datum_count;
- u32 i;
+ u32 access_bit_width;
u32 required_length;
- void *new_buffer;
+ u32 i;
ACPI_FUNCTION_TRACE(ex_insert_into_field);
@@ -844,17 +863,24 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
buffer_length = required_length;
}
+/* TBD: Move to common setup code */
+
+ /* Algo is limited to sizeof(u64), so cut the access_byte_width */
+ if (obj_desc->common_field.access_byte_width > sizeof(u64)) {
+ obj_desc->common_field.access_byte_width = sizeof(u64);
+ }
+
+ access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
+
/*
* Create the bitmasks used for bit insertion.
* Note: This if/else is used to bypass compiler differences with the
* shift operator
*/
- if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) {
+ if (access_bit_width == ACPI_INTEGER_BIT_SIZE) {
width_mask = ACPI_UINT64_MAX;
} else {
- width_mask =
- ACPI_MASK_BITS_ABOVE(obj_desc->common_field.
- access_bit_width);
+ width_mask = ACPI_MASK_BITS_ABOVE(access_bit_width);
}
mask = width_mask &
@@ -863,12 +889,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Compute the number of datums (access width data items) */
datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
- obj_desc->common_field.access_bit_width);
+ access_bit_width);
field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
obj_desc->common_field.
start_field_bit_offset,
- obj_desc->common_field.
access_bit_width);
/* Get initial Datum from the input buffer */
@@ -905,12 +930,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
* This avoids the differences in behavior between different compilers
* concerning shift values larger than the target data width.
*/
- if ((obj_desc->common_field.access_bit_width -
+ if ((access_bit_width -
obj_desc->common_field.start_field_bit_offset) <
ACPI_INTEGER_BIT_SIZE) {
merged_datum =
- raw_datum >> (obj_desc->common_field.
- access_bit_width -
+ raw_datum >> (access_bit_width -
obj_desc->common_field.
start_field_bit_offset);
} else {
@@ -929,6 +953,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
ACPI_MIN(obj_desc->common_field.access_byte_width,
buffer_length - buffer_offset));
+
merged_datum |=
raw_datum << obj_desc->common_field.start_field_bit_offset;
}
@@ -937,7 +962,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
buffer_tail_bits = (obj_desc->common_field.bit_length +
obj_desc->common_field.start_field_bit_offset) %
- obj_desc->common_field.access_bit_width;
+ access_bit_width;
if (buffer_tail_bits) {
mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
}
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index f73be97043c0..6af14e43f839 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -336,7 +336,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
/* Clear mutex info */
- obj_desc->mutex.thread_id = NULL;
+ obj_desc->mutex.thread_id = 0;
return_ACPI_STATUS(status);
}
@@ -393,10 +393,10 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
(obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
- "Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
- ACPI_CAST_PTR(void, walk_state->thread->thread_id),
+ "Thread %u cannot release Mutex [%4.4s] acquired by thread %u",
+ (u32)walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
- ACPI_CAST_PTR(void, owner_thread->thread_id)));
+ (u32)owner_thread->thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -488,7 +488,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
/* Mark mutex unowned */
obj_desc->mutex.owner_thread = NULL;
- obj_desc->mutex.thread_id = NULL;
+ obj_desc->mutex.thread_id = 0;
/* Update Thread sync_level (Last mutex is the important one) */
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 98a331d2249b..7aae29f73d3f 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -355,12 +355,10 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
- /* Setup width (access granularity) fields */
+ /* Setup width (access granularity) fields (values are: 1, 2, 4, 8) */
obj_desc->common_field.access_byte_width = (u8)
- ACPI_DIV_8(access_bit_width); /* 1, 2, 4, 8 */
-
- obj_desc->common_field.access_bit_width = (u8) access_bit_width;
+ ACPI_DIV_8(access_bit_width);
/*
* base_byte_offset is the address of the start of the field within the
@@ -405,8 +403,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *second_desc = NULL;
- u32 type;
acpi_status status;
+ u32 access_byte_width;
+ u32 type;
ACPI_FUNCTION_TRACE(ex_prep_field_value);
@@ -421,8 +420,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
type = acpi_ns_get_type(info->region_node);
if (type != ACPI_TYPE_REGION) {
ACPI_ERROR((AE_INFO,
- "Needed Region, found type 0x%X (%s)",
- type, acpi_ut_get_type_name(type)));
+ "Needed Region, found type 0x%X (%s)", type,
+ acpi_ut_get_type_name(type)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -438,7 +437,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
/* Initialize areas of the object that are common to all fields */
obj_desc->common_field.node = info->field_node;
- status = acpi_ex_prep_common_field_object(obj_desc, info->field_flags,
+ status = acpi_ex_prep_common_field_object(obj_desc,
+ info->field_flags,
info->attribute,
info->field_bit_position,
info->field_bit_length);
@@ -455,26 +455,25 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
obj_desc->field.region_obj =
acpi_ns_get_attached_object(info->region_node);
- /* An additional reference for the container */
+ /* Allow full data read from EC address space */
- acpi_ut_add_reference(obj_desc->field.region_obj);
+ if ((obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_EC)
+ && (obj_desc->common_field.bit_length > 8)) {
+ access_byte_width =
+ ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.
+ bit_length);
+
+ /* Maximum byte width supported is 255 */
- /* allow full data read from EC address space */
- if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_EC) {
- if (obj_desc->common_field.bit_length > 8) {
- unsigned width =
- ACPI_ROUND_BITS_UP_TO_BYTES(
- obj_desc->common_field.bit_length);
- // access_bit_width is u8, don't overflow it
- if (width > 8)
- width = 8;
+ if (access_byte_width < 256) {
obj_desc->common_field.access_byte_width =
- width;
- obj_desc->common_field.access_bit_width =
- 8 * width;
+ (u8)access_byte_width;
}
}
+ /* An additional reference for the container */
+
+ acpi_ut_add_reference(obj_desc->field.region_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 8819d2ac5aee..de17e10da0ed 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -353,7 +353,6 @@ acpi_ex_pci_config_space_handler(u32 function,
acpi_status status = AE_OK;
struct acpi_pci_id *pci_id;
u16 pci_register;
- u32 value32;
ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
@@ -381,8 +380,7 @@ acpi_ex_pci_config_space_handler(u32 function,
case ACPI_READ:
status = acpi_os_read_pci_configuration(pci_id, pci_register,
- &value32, bit_width);
- *value = value32;
+ value, bit_width);
break;
case ACPI_WRITE:
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
new file mode 100644
index 000000000000..ad21c7d8bf4f
--- /dev/null
+++ b/drivers/acpi/acpica/hwpci.c
@@ -0,0 +1,412 @@
+/*******************************************************************************
+ *
+ * Module Name: hwpci - Obtain PCI bus, device, and function numbers
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("hwpci")
+
+/* PCI configuration space values */
+#define PCI_CFG_HEADER_TYPE_REG 0x0E
+#define PCI_CFG_PRIMARY_BUS_NUMBER_REG 0x18
+#define PCI_CFG_SECONDARY_BUS_NUMBER_REG 0x19
+/* PCI header values */
+#define PCI_HEADER_TYPE_MASK 0x7F
+#define PCI_TYPE_BRIDGE 0x01
+#define PCI_TYPE_CARDBUS_BRIDGE 0x02
+typedef struct acpi_pci_device {
+ acpi_handle device;
+ struct acpi_pci_device *next;
+
+} acpi_pci_device;
+
+/* Local prototypes */
+
+static acpi_status
+acpi_hw_build_pci_list(acpi_handle root_pci_device,
+ acpi_handle pci_region,
+ struct acpi_pci_device **return_list_head);
+
+static acpi_status
+acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
+ struct acpi_pci_device *list_head);
+
+static void acpi_hw_delete_pci_list(struct acpi_pci_device *list_head);
+
+static acpi_status
+acpi_hw_get_pci_device_info(struct acpi_pci_id *pci_id,
+ acpi_handle pci_device,
+ u16 *bus_number, u8 *is_bridge);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_derive_pci_id
+ *
+ * PARAMETERS: pci_id - Initial values for the PCI ID. May be
+ * modified by this function.
+ * root_pci_device - A handle to a PCI device object. This
+ * object must be a PCI Root Bridge having a
+ * _HID value of either PNP0A03 or PNP0A08
+ * pci_region - A handle to a PCI configuration space
+ * Operation Region being initialized
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function derives a full PCI ID for a PCI device,
+ * consisting of a Segment number, Bus number, Device number,
+ * and function code.
+ *
+ * The PCI hardware dynamically configures PCI bus numbers
+ * depending on the bus topology discovered during system
+ * initialization. This function is invoked during configuration
+ * of a PCI_Config Operation Region in order to (possibly) update
+ * the Bus/Device/Function numbers in the pci_id with the actual
+ * values as determined by the hardware and operating system
+ * configuration.
+ *
+ * The pci_id parameter is initially populated during the Operation
+ * Region initialization. This function is then called, and is
+ * will make any necessary modifications to the Bus, Device, or
+ * Function number PCI ID subfields as appropriate for the
+ * current hardware and OS configuration.
+ *
+ * NOTE: Created 08/2010. Replaces the previous OSL acpi_os_derive_pci_id
+ * interface since this feature is OS-independent. This module
+ * specifically avoids any use of recursion by building a local
+ * temporary device list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
+ acpi_handle root_pci_device, acpi_handle pci_region)
+{
+ acpi_status status;
+ struct acpi_pci_device *list_head = NULL;
+
+ ACPI_FUNCTION_TRACE(hw_derive_pci_id);
+
+ if (!pci_id) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Build a list of PCI devices, from pci_region up to root_pci_device */
+
+ status =
+ acpi_hw_build_pci_list(root_pci_device, pci_region, &list_head);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Walk the list, updating the PCI device/function/bus numbers */
+
+ status = acpi_hw_process_pci_list(pci_id, list_head);
+ }
+
+ /* Always delete the list */
+
+ acpi_hw_delete_pci_list(list_head);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_build_pci_list
+ *
+ * PARAMETERS: root_pci_device - A handle to a PCI device object. This
+ * object is guaranteed to be a PCI Root
+ * Bridge having a _HID value of either
+ * PNP0A03 or PNP0A08
+ * pci_region - A handle to the PCI configuration space
+ * Operation Region
+ * return_list_head - Where the PCI device list is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Builds a list of devices from the input PCI region up to the
+ * Root PCI device for this namespace subtree.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_build_pci_list(acpi_handle root_pci_device,
+ acpi_handle pci_region,
+ struct acpi_pci_device **return_list_head)
+{
+ acpi_handle current_device;
+ acpi_handle parent_device;
+ acpi_status status;
+ struct acpi_pci_device *list_element;
+ struct acpi_pci_device *list_head = NULL;
+
+ /*
+ * Ascend namespace branch until the root_pci_device is reached, building
+ * a list of device nodes. Loop will exit when either the PCI device is
+ * found, or the root of the namespace is reached.
+ */
+ current_device = pci_region;
+ while (1) {
+ status = acpi_get_parent(current_device, &parent_device);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Finished when we reach the PCI root device (PNP0A03 or PNP0A08) */
+
+ if (parent_device == root_pci_device) {
+ *return_list_head = list_head;
+ return (AE_OK);
+ }
+
+ list_element = ACPI_ALLOCATE(sizeof(struct acpi_pci_device));
+ if (!list_element) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Put new element at the head of the list */
+
+ list_element->next = list_head;
+ list_element->device = parent_device;
+ list_head = list_element;
+
+ current_device = parent_device;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_process_pci_list
+ *
+ * PARAMETERS: pci_id - Initial values for the PCI ID. May be
+ * modified by this function.
+ * list_head - Device list created by
+ * acpi_hw_build_pci_list
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk downward through the PCI device list, getting the device
+ * info for each, via the PCI configuration space and updating
+ * the PCI ID as necessary. Deletes the list during traversal.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
+ struct acpi_pci_device *list_head)
+{
+ acpi_status status = AE_OK;
+ struct acpi_pci_device *info;
+ u16 bus_number;
+ u8 is_bridge = TRUE;
+
+ ACPI_FUNCTION_NAME(hw_process_pci_list);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Input PciId: Seg %4.4X Bus %4.4X Dev %4.4X Func %4.4X\n",
+ pci_id->segment, pci_id->bus, pci_id->device,
+ pci_id->function));
+
+ bus_number = pci_id->bus;
+
+ /*
+ * Descend down the namespace tree, collecting PCI device, function,
+ * and bus numbers. bus_number is only important for PCI bridges.
+ * Algorithm: As we descend the tree, use the last valid PCI device,
+ * function, and bus numbers that are discovered, and assign them
+ * to the PCI ID for the target device.
+ */
+ info = list_head;
+ while (info) {
+ status = acpi_hw_get_pci_device_info(pci_id, info->device,
+ &bus_number, &is_bridge);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ info = info->next;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Output PciId: Seg %4.4X Bus %4.4X Dev %4.4X Func %4.4X "
+ "Status %X BusNumber %X IsBridge %X\n",
+ pci_id->segment, pci_id->bus, pci_id->device,
+ pci_id->function, status, bus_number, is_bridge));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_delete_pci_list
+ *
+ * PARAMETERS: list_head - Device list created by
+ * acpi_hw_build_pci_list
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Free the entire PCI list.
+ *
+ ******************************************************************************/
+
+static void acpi_hw_delete_pci_list(struct acpi_pci_device *list_head)
+{
+ struct acpi_pci_device *next;
+ struct acpi_pci_device *previous;
+
+ next = list_head;
+ while (next) {
+ previous = next;
+ next = previous->next;
+ ACPI_FREE(previous);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_hw_get_pci_device_info
+ *
+ * PARAMETERS: pci_id - Initial values for the PCI ID. May be
+ * modified by this function.
+ * pci_device - Handle for the PCI device object
+ * bus_number - Where a PCI bridge bus number is returned
+ * is_bridge - Return value, indicates if this PCI
+ * device is a PCI bridge
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get the device info for a single PCI device object. Get the
+ * _ADR (contains PCI device and function numbers), and for PCI
+ * bridge devices, get the bus number from PCI configuration
+ * space.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_get_pci_device_info(struct acpi_pci_id *pci_id,
+ acpi_handle pci_device,
+ u16 *bus_number, u8 *is_bridge)
+{
+ acpi_status status;
+ acpi_object_type object_type;
+ u64 return_value;
+ u64 pci_value;
+
+ /* We only care about objects of type Device */
+
+ status = acpi_get_type(pci_device, &object_type);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ if (object_type != ACPI_TYPE_DEVICE) {
+ return (AE_OK);
+ }
+
+ /* We need an _ADR. Ignore device if not present */
+
+ status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR,
+ pci_device, &return_value);
+ if (ACPI_FAILURE(status)) {
+ return (AE_OK);
+ }
+
+ /*
+ * From _ADR, get the PCI Device and Function and
+ * update the PCI ID.
+ */
+ pci_id->device = ACPI_HIWORD(ACPI_LODWORD(return_value));
+ pci_id->function = ACPI_LOWORD(ACPI_LODWORD(return_value));
+
+ /*
+ * If the previous device was a bridge, use the previous
+ * device bus number
+ */
+ if (*is_bridge) {
+ pci_id->bus = *bus_number;
+ }
+
+ /*
+ * Get the bus numbers from PCI Config space:
+ *
+ * First, get the PCI header_type
+ */
+ *is_bridge = FALSE;
+ status = acpi_os_read_pci_configuration(pci_id,
+ PCI_CFG_HEADER_TYPE_REG,
+ &pci_value, 8);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* We only care about bridges (1=pci_bridge, 2=card_bus_bridge) */
+
+ pci_value &= PCI_HEADER_TYPE_MASK;
+
+ if ((pci_value != PCI_TYPE_BRIDGE) &&
+ (pci_value != PCI_TYPE_CARDBUS_BRIDGE)) {
+ return (AE_OK);
+ }
+
+ /* Bridge: Get the Primary bus_number */
+
+ status = acpi_os_read_pci_configuration(pci_id,
+ PCI_CFG_PRIMARY_BUS_NUMBER_REG,
+ &pci_value, 8);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ *is_bridge = TRUE;
+ pci_id->bus = (u16)pci_value;
+
+ /* Bridge: Get the Secondary bus_number */
+
+ status = acpi_os_read_pci_configuration(pci_id,
+ PCI_CFG_SECONDARY_BUS_NUMBER_REG,
+ &pci_value, 8);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ *bus_number = (u16)pci_value;
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 4009498fbabd..4ef9f43ea926 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -74,10 +74,18 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
static acpi_status
+acpi_ns_repair_CID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
+static acpi_status
acpi_ns_repair_FDE(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
static acpi_status
+acpi_ns_repair_HID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
+static acpi_status
acpi_ns_repair_PSS(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
@@ -108,8 +116,10 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
* As necessary:
*
* _ALR: Sort the list ascending by ambient_illuminance
+ * _CID: Strings: uppercase all, remove any leading asterisk
* _FDE: Convert Buffer of BYTEs to a Buffer of DWORDs
* _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs
+ * _HID: Strings: uppercase all, remove any leading asterisk
* _PSS: Sort the list descending by Power
* _TSS: Sort the list descending by Power
*
@@ -122,8 +132,10 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
*/
static const struct acpi_repair_info acpi_ns_repairable_names[] = {
{"_ALR", acpi_ns_repair_ALR},
+ {"_CID", acpi_ns_repair_CID},
{"_FDE", acpi_ns_repair_FDE},
{"_GTM", acpi_ns_repair_FDE}, /* _GTM has same repair as _FDE */
+ {"_HID", acpi_ns_repair_HID},
{"_PSS", acpi_ns_repair_PSS},
{"_TSS", acpi_ns_repair_TSS},
{{0, 0, 0, 0}, NULL} /* Table terminator */
@@ -321,6 +333,157 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data,
/******************************************************************************
*
+ * FUNCTION: acpi_ns_repair_CID
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _CID object. If a string, ensure that all
+ * letters are uppercase and that there is no leading asterisk.
+ * If a Package, ensure same for all string elements.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_CID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ acpi_status status;
+ union acpi_operand_object *return_object = *return_object_ptr;
+ union acpi_operand_object **element_ptr;
+ union acpi_operand_object *original_element;
+ u16 original_ref_count;
+ u32 i;
+
+ /* Check for _CID as a simple string */
+
+ if (return_object->common.type == ACPI_TYPE_STRING) {
+ status = acpi_ns_repair_HID(data, return_object_ptr);
+ return (status);
+ }
+
+ /* Exit if not a Package */
+
+ if (return_object->common.type != ACPI_TYPE_PACKAGE) {
+ return (AE_OK);
+ }
+
+ /* Examine each element of the _CID package */
+
+ element_ptr = return_object->package.elements;
+ for (i = 0; i < return_object->package.count; i++) {
+ original_element = *element_ptr;
+ original_ref_count = original_element->common.reference_count;
+
+ status = acpi_ns_repair_HID(data, element_ptr);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Take care with reference counts */
+
+ if (original_element != *element_ptr) {
+
+ /* Element was replaced */
+
+ (*element_ptr)->common.reference_count =
+ original_ref_count;
+
+ acpi_ut_remove_reference(original_element);
+ }
+
+ element_ptr++;
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ns_repair_HID
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _HID object. If a string, ensure that all
+ * letters are uppercase and that there is no leading asterisk.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_HID(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ union acpi_operand_object *return_object = *return_object_ptr;
+ union acpi_operand_object *new_string;
+ char *source;
+ char *dest;
+
+ ACPI_FUNCTION_NAME(ns_repair_HID);
+
+ /* We only care about string _HID objects (not integers) */
+
+ if (return_object->common.type != ACPI_TYPE_STRING) {
+ return (AE_OK);
+ }
+
+ if (return_object->string.length == 0) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Invalid zero-length _HID or _CID string"));
+
+ /* Return AE_OK anyway, let driver handle it */
+
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
+ }
+
+ /* It is simplest to always create a new string object */
+
+ new_string = acpi_ut_create_string_object(return_object->string.length);
+ if (!new_string) {
+ return (AE_NO_MEMORY);
+ }
+
+ /*
+ * Remove a leading asterisk if present. For some unknown reason, there
+ * are many machines in the field that contains IDs like this.
+ *
+ * Examples: "*PNP0C03", "*ACPI0003"
+ */
+ source = return_object->string.pointer;
+ if (*source == '*') {
+ source++;
+ new_string->string.length--;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Removed invalid leading asterisk\n",
+ data->pathname));
+ }
+
+ /*
+ * Copy and uppercase the string. From the ACPI specification:
+ *
+ * A valid PNP ID must be of the form "AAA####" where A is an uppercase
+ * letter and # is a hex digit. A valid ACPI ID must be of the form
+ * "ACPI####" where # is a hex digit.
+ */
+ for (dest = new_string->string.pointer; *source; dest++, source++) {
+ *dest = (char)ACPI_TOUPPER(*source);
+ }
+
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_string;
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_ns_repair_TSS
*
* PARAMETERS: Data - Pointer to validation data structure
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index e1add3491b04..a7d6ad9c111b 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -60,104 +60,6 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
/*******************************************************************************
*
- * FUNCTION: acpi_ns_report_error
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * internal_name - Name or path of the namespace node
- * lookup_status - Exception code from NS lookup
- *
- * RETURN: None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status)
-{
- acpi_status status;
- u32 bad_name;
- char *name = NULL;
-
- acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
- if (lookup_status == AE_BAD_CHARACTER) {
-
- /* There is a non-ascii character in the name */
-
- ACPI_MOVE_32_TO_32(&bad_name,
- ACPI_CAST_PTR(u32, internal_name));
- acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
- } else {
- /* Convert path to external format */
-
- status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
- internal_name, NULL, &name);
-
- /* Print target name */
-
- if (ACPI_SUCCESS(status)) {
- acpi_os_printf("[%s]", name);
- } else {
- acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
- }
-
- if (name) {
- ACPI_FREE(name);
- }
- }
-
- acpi_os_printf(" Namespace lookup failure, %s\n",
- acpi_format_exception(lookup_status));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_report_method_error
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Message - Error message to use on failure
- * prefix_node - Prefix relative to the path
- * Path - Path to the node (optional)
- * method_status - Execution status
- *
- * RETURN: None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_method_error(const char *module_name,
- u32 line_number,
- const char *message,
- struct acpi_namespace_node *prefix_node,
- const char *path, acpi_status method_status)
-{
- acpi_status status;
- struct acpi_namespace_node *node = prefix_node;
-
- acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
- if (path) {
- status =
- acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
- &node);
- if (ACPI_FAILURE(status)) {
- acpi_os_printf("[Could not get node by pathname]");
- }
- }
-
- acpi_ns_print_node_pathname(node, message);
- acpi_os_printf(", %s\n", acpi_format_exception(method_status));
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_print_node_pathname
*
* PARAMETERS: Node - Object
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 1728cb9bf600..d2ff4325c427 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -49,7 +49,7 @@
ACPI_MODULE_NAME("tbfadt")
/* Local prototypes */
-static inline void
+static ACPI_INLINE void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id, u8 byte_width, u64 address);
@@ -181,7 +181,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
*
******************************************************************************/
-static inline void
+static ACPI_INLINE void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id, u8 byte_width, u64 address)
{
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 983510640059..f21c486929a5 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -179,9 +179,8 @@ acpi_debug_print(u32 requested_debug_level,
if (thread_id != acpi_gbl_prev_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
- ("\n**** Context Switch from TID %p to TID %p ****\n\n",
- ACPI_CAST_PTR(void, acpi_gbl_prev_thread_id),
- ACPI_CAST_PTR(void, thread_id));
+ ("\n**** Context Switch from TID %u to TID %u ****\n\n",
+ (u32)acpi_gbl_prev_thread_id, (u32)thread_id);
}
acpi_gbl_prev_thread_id = thread_id;
@@ -194,7 +193,7 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("%8s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) {
- acpi_os_printf("[%p] ", ACPI_CAST_PTR(void, thread_id));
+ acpi_os_printf("[%u] ", (u32)thread_id);
}
acpi_os_printf("[%02ld] %-22.22s: ",
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 6dfdeb653490..22f59ef604e0 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -48,153 +48,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("uteval")
-/*
- * Strings supported by the _OSI predefined (internal) method.
- *
- * March 2009: Removed "Linux" as this host no longer wants to respond true
- * for this string. Basically, the only safe OS strings are windows-related
- * and in many or most cases represent the only test path within the
- * BIOS-provided ASL code.
- *
- * The second element of each entry is used to track the newest version of
- * Windows that the BIOS has requested.
- */
-static struct acpi_interface_info acpi_interfaces_supported[] = {
- /* Operating System Vendor Strings */
-
- {"Windows 2000", ACPI_OSI_WIN_2000}, /* Windows 2000 */
- {"Windows 2001", ACPI_OSI_WIN_XP}, /* Windows XP */
- {"Windows 2001 SP1", ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */
- {"Windows 2001.1", ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */
- {"Windows 2001 SP2", ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
- {"Windows 2001.1 SP1", ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
- {"Windows 2006", ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */
- {"Windows 2006.1", ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
- {"Windows 2006 SP1", ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
- {"Windows 2009", ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
-
- /* Feature Group Strings */
-
- {"Extended Address Space Descriptor", 0}
-
- /*
- * All "optional" feature group strings (features that are implemented
- * by the host) should be implemented in the host version of
- * acpi_os_validate_interface and should not be added here.
- */
-};
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_osi_implementation
- *
- * PARAMETERS: walk_state - Current walk state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Implementation of the _OSI predefined control method
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
-{
- acpi_status status;
- union acpi_operand_object *string_desc;
- union acpi_operand_object *return_desc;
- u32 return_value;
- u32 i;
-
- ACPI_FUNCTION_TRACE(ut_osi_implementation);
-
- /* Validate the string input argument */
-
- string_desc = walk_state->arguments[0].object;
- if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
- return_ACPI_STATUS(AE_TYPE);
- }
-
- /* Create a return object */
-
- return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
- if (!return_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Default return value is 0, NOT SUPPORTED */
-
- return_value = 0;
-
- /* Compare input string to static table of supported interfaces */
-
- for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
- if (!ACPI_STRCMP(string_desc->string.pointer,
- acpi_interfaces_supported[i].name)) {
- /*
- * The interface is supported.
- * Update the osi_data if necessary. We keep track of the latest
- * version of Windows that has been requested by the BIOS.
- */
- if (acpi_interfaces_supported[i].value >
- acpi_gbl_osi_data) {
- acpi_gbl_osi_data =
- acpi_interfaces_supported[i].value;
- }
-
- return_value = ACPI_UINT32_MAX;
- goto exit;
- }
- }
-
- /*
- * Did not match the string in the static table, call the host OSL to
- * check for a match with one of the optional strings (such as
- * "Module Device", "3.0 Thermal Model", etc.)
- */
- status = acpi_os_validate_interface(string_desc->string.pointer);
- if (ACPI_SUCCESS(status)) {
-
- /* The interface is supported */
-
- return_value = ACPI_UINT32_MAX;
- }
-
-exit:
- ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO,
- "ACPI: BIOS _OSI(%s) is %ssupported\n",
- string_desc->string.pointer, return_value == 0 ? "not " : ""));
-
- /* Complete the return value */
-
- return_desc->integer.value = return_value;
- walk_state->return_desc = return_desc;
- return_ACPI_STATUS (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_osi_invalidate
- *
- * PARAMETERS: interface_string
- *
- * RETURN: Status
- *
- * DESCRIPTION: invalidate string in pre-defiend _OSI string list
- *
- ******************************************************************************/
-
-acpi_status acpi_osi_invalidate(char *interface)
-{
- int i;
-
- for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
- if (!ACPI_STRCMP(interface, acpi_interfaces_supported[i].name)) {
- *acpi_interfaces_supported[i].name = '\0';
- return AE_OK;
- }
- }
- return AE_NOT_FOUND;
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_evaluate_object
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 0558747579ef..e87bc6760be6 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -154,14 +154,16 @@ ACPI_EXPORT_SYMBOL(acpi_format_exception)
* 1) _SB_ is defined to be a device to allow \_SB_._INI to be run
* during the initialization sequence.
* 2) _TZ_ is defined to be a thermal zone in order to allow ASL code to
- * perform a Notify() operation on it.
+ * perform a Notify() operation on it. 09/2010: Changed to type Device.
+ * This still allows notifies, but does not confuse host code that
+ * searches for valid thermal_zone objects.
*/
const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_SB_", ACPI_TYPE_DEVICE, NULL},
{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
- {"_TZ_", ACPI_TYPE_THERMAL, NULL},
+ {"_TZ_", ACPI_TYPE_DEVICE, NULL},
{"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
{"_GL_", ACPI_TYPE_MUTEX, (char *)1},
@@ -766,6 +768,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_gpe_fadt_blocks[0] = NULL;
acpi_gbl_gpe_fadt_blocks[1] = NULL;
acpi_current_gpe_count = 0;
+ acpi_all_gpes_initialized = FALSE;
/* Global handlers */
@@ -774,6 +777,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_exception_handler = NULL;
acpi_gbl_init_handler = NULL;
acpi_gbl_table_handler = NULL;
+ acpi_gbl_interface_handler = NULL;
/* Global Lock support */
@@ -800,6 +804,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
acpi_gbl_osi_data = 0;
+ acpi_gbl_osi_mutex = NULL;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 1397fadd0d4b..d2906328535d 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -48,42 +48,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utids")
-/* Local prototypes */
-static void acpi_ut_copy_id_string(char *destination, char *source);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_copy_id_string
- *
- * PARAMETERS: Destination - Where to copy the string
- * Source - Source string
- *
- * RETURN: None
- *
- * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
- * Performs removal of a leading asterisk if present -- workaround
- * for a known issue on a bunch of machines.
- *
- ******************************************************************************/
-
-static void acpi_ut_copy_id_string(char *destination, char *source)
-{
-
- /*
- * Workaround for ID strings that have a leading asterisk. This construct
- * is not allowed by the ACPI specification (ID strings must be
- * alphanumeric), but enough existing machines have this embedded in their
- * ID strings that the following code is useful.
- */
- if (*source == '*') {
- source++;
- }
-
- /* Do the actual copy */
-
- ACPI_STRCPY(destination, source);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_HID
@@ -101,7 +65,6 @@ static void acpi_ut_copy_id_string(char *destination, char *source)
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
-
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
struct acpica_device_id **return_id)
@@ -147,7 +110,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
} else {
- acpi_ut_copy_id_string(hid->string, obj_desc->string.pointer);
+ ACPI_STRCPY(hid->string, obj_desc->string.pointer);
}
hid->length = length;
@@ -224,7 +187,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
} else {
- acpi_ut_copy_id_string(uid->string, obj_desc->string.pointer);
+ ACPI_STRCPY(uid->string, obj_desc->string.pointer);
}
uid->length = length;
@@ -357,8 +320,8 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
/* Copy the String CID from the returned object */
- acpi_ut_copy_id_string(next_id_string,
- cid_objects[i]->string.pointer);
+ ACPI_STRCPY(next_id_string,
+ cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index a39c93dac719..c1b1c803ea9b 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -117,6 +117,10 @@ void acpi_ut_subsystem_shutdown(void)
/* Close the acpi_event Handling */
acpi_ev_terminate();
+
+ /* Delete any dynamic _OSI interfaces */
+
+ acpi_ut_interface_terminate();
#endif
/* Close the Namespace */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 35059a14eb72..49cf7b7fd816 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -48,11 +48,27 @@
ACPI_MODULE_NAME("utmath")
/*
- * Support for double-precision integer divide. This code is included here
- * in order to support kernel environments where the double-precision math
- * library is not available.
+ * Optional support for 64-bit double-precision integer divide. This code
+ * is configurable and is implemented in order to support 32-bit kernel
+ * environments where a 64-bit double-precision math library is not available.
+ *
+ * Support for a more normal 64-bit divide/modulo (with check for a divide-
+ * by-zero) appears after this optional section of code.
*/
#ifndef ACPI_USE_NATIVE_DIVIDE
+/* Structures used only for 64-bit divide */
+typedef struct uint64_struct {
+ u32 lo;
+ u32 hi;
+
+} uint64_struct;
+
+typedef union uint64_overlay {
+ u64 full;
+ struct uint64_struct part;
+
+} uint64_overlay;
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide
@@ -69,6 +85,7 @@ ACPI_MODULE_NAME("utmath")
* 32-bit remainder.
*
******************************************************************************/
+
acpi_status
acpi_ut_short_divide(u64 dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder)
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index e8d0724ee403..c7d0e05ef5a4 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -50,11 +50,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmisc")
-/*
- * Common suffix for messages
- */
-#define ACPI_COMMON_MSG_SUFFIX \
- acpi_os_printf(" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
/*******************************************************************************
*
* FUNCTION: acpi_ut_validate_exception
@@ -1044,160 +1039,3 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
return_ACPI_STATUS(AE_AML_INTERNAL);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_error, acpi_exception, acpi_warning, acpi_info
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Format - Printf format string + additional args
- *
- * RETURN: None
- *
- * DESCRIPTION: Print message with module/line/version info
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_error(const char *module_name, u32 line_number, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI Error: ");
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_exception(const char *module_name,
- u32 line_number, acpi_status status, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI Exception: %s, ", acpi_format_exception(status));
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI Warning: ");
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name, u32 line_number, const char *format, ...)
-{
- va_list args;
-
- acpi_os_printf("ACPI: ");
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- acpi_os_printf("\n");
- va_end(args);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_error)
-ACPI_EXPORT_SYMBOL(acpi_exception)
-ACPI_EXPORT_SYMBOL(acpi_warning)
-ACPI_EXPORT_SYMBOL(acpi_info)
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_predefined_warning
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Pathname - Full pathname to the node
- * node_flags - From Namespace node for the method/object
- * Format - Printf format string + additional args
- *
- * RETURN: None
- *
- * DESCRIPTION: Warnings for the predefined validation module. Messages are
- * only emitted the first time a problem with a particular
- * method/object is detected. This prevents a flood of error
- * messages for methods that are repeatedly evaluated.
- *
-******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_warning(const char *module_name,
- u32 line_number,
- char *pathname,
- u8 node_flags, const char *format, ...)
-{
- va_list args;
-
- /*
- * Warning messages for this method/object will be disabled after the
- * first time a validation fails or an object is successfully repaired.
- */
- if (node_flags & ANOBJ_EVALUATED) {
- return;
- }
-
- acpi_os_printf("ACPI Warning for %s: ", pathname);
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_predefined_info
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * Pathname - Full pathname to the node
- * node_flags - From Namespace node for the method/object
- * Format - Printf format string + additional args
- *
- * RETURN: None
- *
- * DESCRIPTION: Info messages for the predefined validation module. Messages
- * are only emitted the first time a problem with a particular
- * method/object is detected. This prevents a flood of
- * messages for methods that are repeatedly evaluated.
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_info(const char *module_name,
- u32 line_number,
- char *pathname, u8 node_flags, const char *format, ...)
-{
- va_list args;
-
- /*
- * Warning messages for this method/object will be disabled after the
- * first time a validation fails or an object is successfully repaired.
- */
- if (node_flags & ANOBJ_EVALUATED) {
- return;
- }
-
- acpi_os_printf("ACPI Info for %s: ", pathname);
-
- va_start(args, format);
- acpi_os_vprintf(format, args);
- ACPI_COMMON_MSG_SUFFIX;
- va_end(args);
-}
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index f5cca3a1300c..d9efa495b433 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -86,6 +86,12 @@ acpi_status acpi_ut_mutex_initialize(void)
spin_lock_init(acpi_gbl_gpe_lock);
spin_lock_init(acpi_gbl_hardware_lock);
+ /* Mutex for _OSI support */
+ status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
/* Create the reader/writer lock for namespace access */
status = acpi_ut_create_rw_lock(&acpi_gbl_namespace_rw_lock);
@@ -117,6 +123,8 @@ void acpi_ut_mutex_terminate(void)
acpi_ut_delete_mutex(i);
}
+ acpi_os_delete_mutex(acpi_gbl_osi_mutex);
+
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
@@ -220,18 +228,17 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
ACPI_ERROR((AE_INFO,
- "Mutex [%s] already acquired by this thread [%p]",
+ "Mutex [%s] already acquired by this thread [%u]",
acpi_ut_get_mutex_name
(mutex_id),
- ACPI_CAST_PTR(void,
- this_thread_id)));
+ (u32)this_thread_id));
return (AE_ALREADY_ACQUIRED);
}
ACPI_ERROR((AE_INFO,
- "Invalid acquire order: Thread %p owns [%s], wants [%s]",
- ACPI_CAST_PTR(void, this_thread_id),
+ "Invalid acquire order: Thread %u owns [%s], wants [%s]",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(i),
acpi_ut_get_mutex_name(mutex_id)));
@@ -242,24 +249,24 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
#endif
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "Thread %p attempting to acquire Mutex [%s]\n",
- ACPI_CAST_PTR(void, this_thread_id),
+ "Thread %u attempting to acquire Mutex [%s]\n",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
ACPI_WAIT_FOREVER);
if (ACPI_SUCCESS(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "Thread %p acquired Mutex [%s]\n",
- ACPI_CAST_PTR(void, this_thread_id),
+ "Thread %u acquired Mutex [%s]\n",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++;
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %p could not acquire Mutex [0x%X]",
- ACPI_CAST_PTR(void, this_thread_id), mutex_id));
+ "Thread %u could not acquire Mutex [0x%X]",
+ (u32)this_thread_id, mutex_id));
}
return (status);
@@ -279,10 +286,14 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
+ acpi_thread_id this_thread_id;
+
ACPI_FUNCTION_NAME(ut_release_mutex);
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n",
- ACPI_CAST_PTR(void, acpi_os_get_thread_id()),
+ this_thread_id = acpi_os_get_thread_id();
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
new file mode 100644
index 000000000000..18c59a85fdca
--- /dev/null
+++ b/drivers/acpi/acpica/utosi.c
@@ -0,0 +1,380 @@
+/******************************************************************************
+ *
+ * Module Name: utosi - Support for the _OSI predefined control method
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utosi")
+
+/*
+ * Strings supported by the _OSI predefined control method (which is
+ * implemented internally within this module.)
+ *
+ * March 2009: Removed "Linux" as this host no longer wants to respond true
+ * for this string. Basically, the only safe OS strings are windows-related
+ * and in many or most cases represent the only test path within the
+ * BIOS-provided ASL code.
+ *
+ * The last element of each entry is used to track the newest version of
+ * Windows that the BIOS has requested.
+ */
+static struct acpi_interface_info acpi_default_supported_interfaces[] = {
+ /* Operating System Vendor Strings */
+
+ {"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */
+ {"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */
+ {"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */
+ {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */
+ {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
+ {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
+ {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */
+ {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
+ {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
+ {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
+ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
+
+ /* Feature Group Strings */
+
+ {"Extended Address Space Descriptor", NULL, 0, 0}
+
+ /*
+ * All "optional" feature group strings (features that are implemented
+ * by the host) should be dynamically added by the host via
+ * acpi_install_interface and should not be manually added here.
+ *
+ * Examples of optional feature group strings:
+ *
+ * "Module Device"
+ * "Processor Device"
+ * "3.0 Thermal Model"
+ * "3.0 _SCP Extensions"
+ * "Processor Aggregator Device"
+ */
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_initialize_interfaces
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the global _OSI supported interfaces list
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_initialize_interfaces(void)
+{
+ u32 i;
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ acpi_gbl_supported_interfaces = acpi_default_supported_interfaces;
+
+ /* Link the static list of supported interfaces */
+
+ for (i = 0;
+ i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1);
+ i++) {
+ acpi_default_supported_interfaces[i].next =
+ &acpi_default_supported_interfaces[(acpi_size) i + 1];
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_interface_terminate
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete all interfaces in the global list. Sets
+ * acpi_gbl_supported_interfaces to NULL.
+ *
+ ******************************************************************************/
+
+void acpi_ut_interface_terminate(void)
+{
+ struct acpi_interface_info *next_interface;
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ next_interface = acpi_gbl_supported_interfaces;
+
+ while (next_interface) {
+ acpi_gbl_supported_interfaces = next_interface->next;
+
+ /* Only interfaces added at runtime can be freed */
+
+ if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+ ACPI_FREE(next_interface->name);
+ ACPI_FREE(next_interface);
+ }
+
+ next_interface = acpi_gbl_supported_interfaces;
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_install_interface
+ *
+ * PARAMETERS: interface_name - The interface to install
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install the interface into the global interface list.
+ * Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_install_interface(acpi_string interface_name)
+{
+ struct acpi_interface_info *interface_info;
+
+ /* Allocate info block and space for the name string */
+
+ interface_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info));
+ if (!interface_info) {
+ return (AE_NO_MEMORY);
+ }
+
+ interface_info->name =
+ ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1);
+ if (!interface_info->name) {
+ ACPI_FREE(interface_info);
+ return (AE_NO_MEMORY);
+ }
+
+ /* Initialize new info and insert at the head of the global list */
+
+ ACPI_STRCPY(interface_info->name, interface_name);
+ interface_info->flags = ACPI_OSI_DYNAMIC;
+ interface_info->next = acpi_gbl_supported_interfaces;
+
+ acpi_gbl_supported_interfaces = interface_info;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_interface
+ *
+ * PARAMETERS: interface_name - The interface to remove
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the interface from the global interface list.
+ * Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_remove_interface(acpi_string interface_name)
+{
+ struct acpi_interface_info *previous_interface;
+ struct acpi_interface_info *next_interface;
+
+ previous_interface = next_interface = acpi_gbl_supported_interfaces;
+ while (next_interface) {
+ if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+
+ /* Found: name is in either the static list or was added at runtime */
+
+ if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+
+ /* Interface was added dynamically, remove and free it */
+
+ if (previous_interface == next_interface) {
+ acpi_gbl_supported_interfaces =
+ next_interface->next;
+ } else {
+ previous_interface->next =
+ next_interface->next;
+ }
+
+ ACPI_FREE(next_interface->name);
+ ACPI_FREE(next_interface);
+ } else {
+ /*
+ * Interface is in static list. If marked invalid, then it
+ * does not actually exist. Else, mark it invalid.
+ */
+ if (next_interface->flags & ACPI_OSI_INVALID) {
+ return (AE_NOT_EXIST);
+ }
+
+ next_interface->flags |= ACPI_OSI_INVALID;
+ }
+
+ return (AE_OK);
+ }
+
+ previous_interface = next_interface;
+ next_interface = next_interface->next;
+ }
+
+ /* Interface was not found */
+
+ return (AE_NOT_EXIST);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_interface
+ *
+ * PARAMETERS: interface_name - The interface to find
+ *
+ * RETURN: struct acpi_interface_info if found. NULL if not found.
+ *
+ * DESCRIPTION: Search for the specified interface name in the global list.
+ * Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
+{
+ struct acpi_interface_info *next_interface;
+
+ next_interface = acpi_gbl_supported_interfaces;
+ while (next_interface) {
+ if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+ return (next_interface);
+ }
+
+ next_interface = next_interface->next;
+ }
+
+ return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_osi_implementation
+ *
+ * PARAMETERS: walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Implementation of the _OSI predefined control method. When
+ * an invocation of _OSI is encountered in the system AML,
+ * control is transferred to this function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
+{
+ union acpi_operand_object *string_desc;
+ union acpi_operand_object *return_desc;
+ struct acpi_interface_info *interface_info;
+ acpi_interface_handler interface_handler;
+ u32 return_value;
+
+ ACPI_FUNCTION_TRACE(ut_osi_implementation);
+
+ /* Validate the string input argument (from the AML caller) */
+
+ string_desc = walk_state->arguments[0].object;
+ if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /* Create a return object */
+
+ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Default return value is 0, NOT SUPPORTED */
+
+ return_value = 0;
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ /* Lookup the interface in the global _OSI list */
+
+ interface_info = acpi_ut_get_interface(string_desc->string.pointer);
+ if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) {
+ /*
+ * The interface is supported.
+ * Update the osi_data if necessary. We keep track of the latest
+ * version of Windows that has been requested by the BIOS.
+ */
+ if (interface_info->value > acpi_gbl_osi_data) {
+ acpi_gbl_osi_data = interface_info->value;
+ }
+
+ return_value = ACPI_UINT32_MAX;
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+
+ /*
+ * Invoke an optional _OSI interface handler. The host OS may wish
+ * to do some interface-specific handling. For example, warn about
+ * certain interfaces or override the true/false support value.
+ */
+ interface_handler = acpi_gbl_interface_handler;
+ if (interface_handler) {
+ return_value =
+ interface_handler(string_desc->string.pointer,
+ return_value);
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
+ "ACPI: BIOS _OSI(\"%s\") is %ssupported\n",
+ string_desc->string.pointer,
+ return_value == 0 ? "not " : ""));
+
+ /* Complete the return object */
+
+ return_desc->integer.value = return_value;
+ walk_state->return_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 7f8cefcb2b32..1f484c9a6888 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -110,6 +110,15 @@ acpi_status __init acpi_initialize_subsystem(void)
return_ACPI_STATUS(status);
}
+ /* Initialize the global OSI interfaces list with the static names */
+
+ status = acpi_ut_initialize_interfaces();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During OSI interfaces initialization"));
+ return_ACPI_STATUS(status);
+ }
+
/* If configured, initialize the AML debugger */
ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
@@ -290,19 +299,6 @@ acpi_status acpi_initialize_objects(u32 flags)
}
/*
- * Complete the GPE initialization for the GPE blocks defined in the FADT
- * (GPE block 0 and 1).
- *
- * NOTE: Currently, there seems to be no need to run the _REG methods
- * before enabling the GPEs.
- */
- if (!(flags & ACPI_NO_EVENT_INIT)) {
- status = acpi_ev_install_fadt_gpes();
- if (ACPI_FAILURE(status))
- return (status);
- }
-
- /*
* Empty the caches (delete the cached objects) on the assumption that
* the table load filled them up more than they will be at runtime --
* thus wasting non-paged memory.
@@ -506,6 +502,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
+
/*****************************************************************************
*
* FUNCTION: acpi_purge_cached_objects
@@ -529,4 +526,117 @@ acpi_status acpi_purge_cached_objects(void)
}
ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
-#endif
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_install_interface
+ *
+ * PARAMETERS: interface_name - The interface to install
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install an _OSI interface to the global list
+ *
+ ****************************************************************************/
+acpi_status acpi_install_interface(acpi_string interface_name)
+{
+ acpi_status status;
+ struct acpi_interface_info *interface_info;
+
+ /* Parameter validation */
+
+ if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ /* Check if the interface name is already in the global list */
+
+ interface_info = acpi_ut_get_interface(interface_name);
+ if (interface_info) {
+ /*
+ * The interface already exists in the list. This is OK if the
+ * interface has been marked invalid -- just clear the bit.
+ */
+ if (interface_info->flags & ACPI_OSI_INVALID) {
+ interface_info->flags &= ~ACPI_OSI_INVALID;
+ status = AE_OK;
+ } else {
+ status = AE_ALREADY_EXISTS;
+ }
+ } else {
+ /* New interface name, install into the global list */
+
+ status = acpi_ut_install_interface(interface_name);
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_interface)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_remove_interface
+ *
+ * PARAMETERS: interface_name - The interface to remove
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove an _OSI interface from the global list
+ *
+ ****************************************************************************/
+acpi_status acpi_remove_interface(acpi_string interface_name)
+{
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ status = acpi_ut_remove_interface(interface_name);
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_interface)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_install_interface_handler
+ *
+ * PARAMETERS: Handler - The _OSI interface handler to install
+ * NULL means "remove existing handler"
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the predefined _OSI ACPI method.
+ * invoked during execution of the internal implementation of
+ * _OSI. A NULL handler simply removes any existing handler.
+ *
+ ****************************************************************************/
+acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
+{
+ acpi_status status = AE_OK;
+
+ (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+ if (handler && acpi_gbl_interface_handler) {
+ status = AE_ALREADY_EXISTS;
+ } else {
+ acpi_gbl_interface_handler = handler;
+ }
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+#endif /* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
new file mode 100644
index 000000000000..6f12e314fbae
--- /dev/null
+++ b/drivers/acpi/acpica/utxferror.c
@@ -0,0 +1,415 @@
+/*******************************************************************************
+ *
+ * Module Name: utxferror - Various error/warning output functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxferror")
+
+/*
+ * This module is used for the in-kernel ACPICA as well as the ACPICA
+ * tools/applications.
+ *
+ * For the i_aSL compiler case, the output is redirected to stderr so that
+ * any of the various ACPI errors and warnings do not appear in the output
+ * files, for either the compiler or disassembler portions of the tool.
+ */
+#ifdef ACPI_ASL_COMPILER
+#include <stdio.h>
+extern FILE *acpi_gbl_output_file;
+
+#define ACPI_MSG_REDIRECT_BEGIN \
+ FILE *output_file = acpi_gbl_output_file; \
+ acpi_os_redirect_output (stderr);
+
+#define ACPI_MSG_REDIRECT_END \
+ acpi_os_redirect_output (output_file);
+
+#else
+/*
+ * non-i_aSL case - no redirection, nothing to do
+ */
+#define ACPI_MSG_REDIRECT_BEGIN
+#define ACPI_MSG_REDIRECT_END
+#endif
+/*
+ * Common message prefixes
+ */
+#define ACPI_MSG_ERROR "ACPI Error: "
+#define ACPI_MSG_EXCEPTION "ACPI Exception: "
+#define ACPI_MSG_WARNING "ACPI Warning: "
+#define ACPI_MSG_INFO "ACPI: "
+/*
+ * Common message suffix
+ */
+#define ACPI_MSG_SUFFIX \
+ acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Error" message with module/line/version info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_error(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_ERROR);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_error)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_exception
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Status - Status to be formatted
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
+ * and decoded acpi_status.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_exception(const char *module_name,
+ u32 line_number, acpi_status status, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_format_exception(status));
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_exception)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_warning
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Warning" message with module/line/version info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_WARNING);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_warning)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_info
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print generic "ACPI:" information message. There is no
+ * module/line/version info in order to keep the message simple.
+ *
+ * TBD: module_name and line_number args are not needed, should be removed.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_info(const char *module_name, u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_INFO);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ acpi_os_printf("\n");
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_info)
+
+/*
+ * The remainder of this module contains internal error functions that may
+ * be configured out.
+ */
+#if !defined (ACPI_NO_ERROR_MESSAGES) && !defined (ACPI_BIN_APP)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_predefined_warning
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Pathname - Full pathname to the node
+ * node_flags - From Namespace node for the method/object
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Warnings for the predefined validation module. Messages are
+ * only emitted the first time a problem with a particular
+ * method/object is detected. This prevents a flood of error
+ * messages for methods that are repeatedly evaluated.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+ u32 line_number,
+ char *pathname,
+ u8 node_flags, const char *format, ...)
+{
+ va_list arg_list;
+
+ /*
+ * Warning messages for this method/object will be disabled after the
+ * first time a validation fails or an object is successfully repaired.
+ */
+ if (node_flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
+ acpi_os_printf(ACPI_MSG_WARNING "For %s: ", pathname);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_predefined_info
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Pathname - Full pathname to the node
+ * node_flags - From Namespace node for the method/object
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Info messages for the predefined validation module. Messages
+ * are only emitted the first time a problem with a particular
+ * method/object is detected. This prevents a flood of
+ * messages for methods that are repeatedly evaluated.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_info(const char *module_name,
+ u32 line_number,
+ char *pathname, u8 node_flags, const char *format, ...)
+{
+ va_list arg_list;
+
+ /*
+ * Warning messages for this method/object will be disabled after the
+ * first time a validation fails or an object is successfully repaired.
+ */
+ if (node_flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
+ acpi_os_printf(ACPI_MSG_INFO "For %s: ", pathname);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_namespace_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * internal_name - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname for the NS node.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_namespace_error(const char *module_name,
+ u32 line_number,
+ const char *internal_name, acpi_status lookup_status)
+{
+ acpi_status status;
+ u32 bad_name;
+ char *name = NULL;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_ERROR);
+
+ if (lookup_status == AE_BAD_CHARACTER) {
+
+ /* There is a non-ascii character in the name */
+
+ ACPI_MOVE_32_TO_32(&bad_name,
+ ACPI_CAST_PTR(u32, internal_name));
+ acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
+ } else {
+ /* Convert path to external format */
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+ internal_name, NULL, &name);
+
+ /* Print target name */
+
+ if (ACPI_SUCCESS(status)) {
+ acpi_os_printf("[%s]", name);
+ } else {
+ acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
+ }
+
+ if (name) {
+ ACPI_FREE(name);
+ }
+ }
+
+ acpi_os_printf(" Namespace lookup failure, %s",
+ acpi_format_exception(lookup_status));
+
+ ACPI_MSG_SUFFIX;
+ ACPI_MSG_REDIRECT_END;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_method_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Message - Error message to use on failure
+ * prefix_node - Prefix relative to the path
+ * Path - Path to the node (optional)
+ * method_status - Execution status
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname for the method.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_method_error(const char *module_name,
+ u32 line_number,
+ const char *message,
+ struct acpi_namespace_node *prefix_node,
+ const char *path, acpi_status method_status)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node = prefix_node;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_ERROR);
+
+ if (path) {
+ status =
+ acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
+ &node);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("[Could not get node by pathname]");
+ }
+ }
+
+ acpi_ns_print_node_pathname(node, message);
+ acpi_os_printf(", %s", acpi_format_exception(method_status));
+
+ ACPI_MSG_SUFFIX;
+ ACPI_MSG_REDIRECT_END;
+}
+
+#endif /* ACPI_NO_ERROR_MESSAGES */
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 98417201e9ce..95649d373071 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -42,10 +42,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-
-#ifdef CONFIG_ACPI_SYSFS_POWER
#include <linux/power_supply.h>
-#endif
#define PREFIX "ACPI: "
@@ -98,13 +95,12 @@ enum {
* due to bad math.
*/
ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
+ ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
struct mutex lock;
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply bat;
-#endif
struct acpi_device *device;
unsigned long update_time;
int rate_now;
@@ -141,7 +137,6 @@ inline int acpi_battery_present(struct acpi_battery *battery)
return battery->device->status.battery_present;
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static int acpi_battery_technology(struct acpi_battery *battery)
{
if (!strcasecmp("NiCd", battery->type))
@@ -186,6 +181,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ int ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
@@ -214,26 +210,44 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = battery->cycle_count;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = battery->design_voltage * 1000;
+ if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->design_voltage * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = battery->voltage_now * 1000;
+ if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->voltage_now * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
- val->intval = battery->rate_now * 1000;
+ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- val->intval = battery->design_capacity * 1000;
+ if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
- val->intval = battery->full_charge_capacity * 1000;
+ if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->full_charge_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_ENERGY_NOW:
- val->intval = battery->capacity_now * 1000;
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
+ else
+ val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->model_number;
@@ -245,9 +259,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->strval = battery->serial_number;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static enum power_supply_property charge_battery_props[] = {
@@ -281,7 +295,6 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
inline char *acpi_battery_units(struct acpi_battery *battery)
@@ -412,6 +425,8 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
result = extract_package(battery, buffer.pointer,
info_offsets, ARRAY_SIZE(info_offsets));
kfree(buffer.pointer);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ battery->full_charge_capacity = battery->design_capacity;
return result;
}
@@ -448,6 +463,10 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->rate_now != -1)
battery->rate_now = abs((s16)battery->rate_now);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
+ && battery->capacity_now >= 0 && battery->capacity_now <= 100)
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
return result;
}
@@ -492,7 +511,6 @@ static int acpi_battery_init_alarm(struct acpi_battery *battery)
return acpi_battery_set_alarm(battery);
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -552,7 +570,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
power_supply_unregister(&battery->bat);
battery->bat.dev = NULL;
}
-#endif
static void acpi_battery_quirks(struct acpi_battery *battery)
{
@@ -561,6 +578,33 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
}
}
+/*
+ * According to the ACPI spec, some kinds of primary batteries can
+ * report percentage battery remaining capacity directly to OS.
+ * In this case, it reports the Last Full Charged Capacity == 100
+ * and BatteryPresentRate == 0xFFFFFFFF.
+ *
+ * Now we found some battery reports percentage remaining capacity
+ * even if it's rechargeable.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=15979
+ *
+ * Handle this correctly so that they won't break userspace.
+ */
+static void acpi_battery_quirks2(struct acpi_battery *battery)
+{
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ return ;
+
+ if (battery->full_charge_capacity == 100 &&
+ battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
+ battery->capacity_now >=0 && battery->capacity_now <= 100) {
+ set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
+ battery->full_charge_capacity = battery->design_capacity;
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
+ }
+}
+
static int acpi_battery_update(struct acpi_battery *battery)
{
int result, old_present = acpi_battery_present(battery);
@@ -568,9 +612,7 @@ static int acpi_battery_update(struct acpi_battery *battery)
if (result)
return result;
if (!acpi_battery_present(battery)) {
-#ifdef CONFIG_ACPI_SYSFS_POWER
sysfs_remove_battery(battery);
-#endif
battery->update_time = 0;
return 0;
}
@@ -582,11 +624,11 @@ static int acpi_battery_update(struct acpi_battery *battery)
acpi_battery_quirks(battery);
acpi_battery_init_alarm(battery);
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (!battery->bat.dev)
sysfs_add_battery(battery);
-#endif
- return acpi_battery_get_state(battery);
+ result = acpi_battery_get_state(battery);
+ acpi_battery_quirks2(battery);
+ return result;
}
/* --------------------------------------------------------------------------
@@ -867,26 +909,20 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
static void acpi_battery_notify(struct acpi_device *device, u32 event)
{
struct acpi_battery *battery = acpi_driver_data(device);
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct device *old;
-#endif
if (!battery)
return;
-#ifdef CONFIG_ACPI_SYSFS_POWER
old = battery->bat.dev;
-#endif
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
-#ifdef CONFIG_ACPI_SYSFS_POWER
/* acpi_battery_update could remove power_supply object */
if (old && battery->bat.dev)
power_supply_changed(&battery->bat);
-#endif
}
static int acpi_battery_add(struct acpi_device *device)
@@ -934,9 +970,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
sysfs_remove_battery(battery);
-#endif
mutex_destroy(&battery->lock);
kfree(battery);
return 0;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 310e3b9749cb..d68bd61072bb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -935,6 +935,12 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /*
+ * _PDC control method may load dynamic SSDT tables,
+ * and we need to install the table handler before that.
+ */
+ acpi_sysfs_init();
+
acpi_early_processor_set_pdc();
/*
@@ -1026,7 +1032,6 @@ static int __init acpi_init(void)
acpi_scan_init();
acpi_ec_init();
acpi_power_init();
- acpi_sysfs_init();
acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 1575a9b51f1d..71ef9cd0735f 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -338,7 +338,8 @@ static int acpi_button_add(struct acpi_device *device)
{
struct acpi_button *button;
struct input_dev *input;
- char *hid, *name, *class;
+ const char *hid = acpi_device_hid(device);
+ char *name, *class;
int error;
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
@@ -353,7 +354,6 @@ static int acpi_button_add(struct acpi_device *device)
goto err_free_button;
}
- hid = acpi_device_hid(device);
name = acpi_device_name(device);
class = acpi_device_class(device);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 3fe29e992be8..81514a4918cc 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -725,6 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
dock_lock(ds, 1);
+ acpi_update_gpes();
break;
}
if (dock_present(ds) || dock_in_progress(ds))
@@ -929,7 +930,7 @@ static struct attribute_group dock_attribute_group = {
* allocated and initialize a new dock station device. Find all devices
* that are on the dock station, and register for dock event notifications.
*/
-static int dock_add(acpi_handle handle)
+static int __init dock_add(acpi_handle handle)
{
int ret, id;
struct dock_station ds, *dock_station;
@@ -1023,7 +1024,7 @@ static int dock_remove(struct dock_station *ds)
*
* This is called by acpi_walk_namespace to look for dock stations.
*/
-static acpi_status
+static __init acpi_status
find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
{
if (is_dock(handle))
@@ -1032,7 +1033,7 @@ find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-static acpi_status
+static __init acpi_status
find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
/* If bay is a dock, it's already handled */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f31291ba94d0..372ff80b7b0c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -83,6 +83,11 @@ enum {
EC_FLAGS_BLOCKED, /* Transactions are blocked */
};
+/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
+static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+module_param(ec_delay, uint, 0644);
+MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+
/* If we find an EC via the ECDT, we need to keep a ptr to its context */
/* External interfaces use first EC only, so remember */
typedef int (*acpi_ec_query_func) (void *data);
@@ -210,7 +215,7 @@ static int ec_poll(struct acpi_ec *ec)
int repeat = 2; /* number of command restarts */
while (repeat--) {
unsigned long delay = jiffies +
- msecs_to_jiffies(ACPI_EC_DELAY);
+ msecs_to_jiffies(ec_delay);
do {
/* don't sleep with disabled interrupts */
if (EC_FLAGS_MSI || irqs_disabled()) {
@@ -265,7 +270,7 @@ static int ec_check_ibf0(struct acpi_ec *ec)
static int ec_wait_ibf0(struct acpi_ec *ec)
{
- unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
/* interrupt wait manually if GPE mode is not active */
while (time_before(jiffies, delay))
if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index d94d2953c974..60049080c869 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -27,8 +27,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
@@ -119,122 +117,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = {
};
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-
-static struct proc_dir_entry *acpi_fan_dir;
-
-static int acpi_fan_read_state(struct seq_file *seq, void *offset)
-{
- struct acpi_device *device = seq->private;
- int state = 0;
-
-
- if (device) {
- if (acpi_bus_get_power(device->handle, &state))
- seq_printf(seq, "status: ERROR\n");
- else
- seq_printf(seq, "status: %s\n",
- !state ? "on" : "off");
- }
- return 0;
-}
-
-static int acpi_fan_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_fan_read_state, PDE(inode)->data);
-}
-
-static ssize_t
-acpi_fan_write_state(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- int result = 0;
- struct seq_file *m = file->private_data;
- struct acpi_device *device = m->private;
- char state_string[3] = { '\0' };
-
- if (count > sizeof(state_string) - 1)
- return -EINVAL;
-
- if (copy_from_user(state_string, buffer, count))
- return -EFAULT;
-
- state_string[count] = '\0';
- if ((state_string[0] < '0') || (state_string[0] > '3'))
- return -EINVAL;
- if (state_string[1] == '\n')
- state_string[1] = '\0';
- if (state_string[1] != '\0')
- return -EINVAL;
-
- result = acpi_bus_set_power(device->handle,
- simple_strtoul(state_string, NULL, 0));
- if (result)
- return result;
-
- return count;
-}
-
-static const struct file_operations acpi_fan_state_ops = {
- .open = acpi_fan_state_open_fs,
- .read = seq_read,
- .write = acpi_fan_write_state,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int acpi_fan_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
-
- if (!device)
- return -EINVAL;
-
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_fan_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'status' [R/W] */
- entry = proc_create_data(ACPI_FAN_FILE_STATE,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_fan_state_ops,
- device);
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_fan_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_FAN_FILE_STATE, acpi_device_dir(device));
- remove_proc_entry(acpi_device_bid(device), acpi_fan_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#else
-static int acpi_fan_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-
-static int acpi_fan_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -284,10 +166,6 @@ static int acpi_fan_add(struct acpi_device *device)
dev_err(&device->dev, "Failed to create sysfs link "
"'device'\n");
- result = acpi_fan_add_fs(device);
- if (result)
- goto end;
-
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -303,7 +181,6 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
if (!device || !cdev)
return -EINVAL;
- acpi_fan_remove_fs(device);
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
sysfs_remove_link(&cdev->device.kobj, "device");
thermal_cooling_device_unregister(cdev);
@@ -347,19 +224,9 @@ static int __init acpi_fan_init(void)
{
int result = 0;
-#ifdef CONFIG_ACPI_PROCFS
- acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
- if (!acpi_fan_dir)
- return -ENODEV;
-#endif
-
result = acpi_bus_register_driver(&acpi_fan_driver);
- if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
-#endif
+ if (result < 0)
return -ENODEV;
- }
return 0;
}
@@ -369,10 +236,6 @@ static void __exit acpi_fan_exit(void)
acpi_bus_unregister_driver(&acpi_fan_driver);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
-#endif
-
return;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 65b25a303b86..966feddf6b1b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -95,8 +95,25 @@ struct acpi_res_list {
static LIST_HEAD(resource_list_head);
static DEFINE_SPINLOCK(acpi_res_lock);
+/*
+ * This list of permanent mappings is for memory that may be accessed from
+ * interrupt context, where we can't do the ioremap().
+ */
+struct acpi_ioremap {
+ struct list_head list;
+ void __iomem *virt;
+ acpi_physical_address phys;
+ acpi_size size;
+ struct kref ref;
+};
+
+static LIST_HEAD(acpi_ioremaps);
+static DEFINE_SPINLOCK(acpi_ioremap_lock);
+
#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
-static char osi_additional_string[OSI_STRING_LENGTH_MAX];
+static char osi_setup_string[OSI_STRING_LENGTH_MAX];
+
+static void __init acpi_osi_setup_late(void);
/*
* The story of _OSI(Linux)
@@ -138,6 +155,20 @@ static struct osi_linux {
unsigned int known:1;
} osi_linux = { 0, 0, 0, 0};
+static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+{
+ if (!strcmp("Linux", interface)) {
+
+ printk(KERN_NOTICE FW_BUG PREFIX
+ "BIOS _OSI(Linux) query %s%s\n",
+ osi_linux.enable ? "honored" : "ignored",
+ osi_linux.cmdline ? " via cmdline" :
+ osi_linux.dmi ? " via DMI" : "");
+ }
+
+ return supported;
+}
+
static void __init acpi_request_region (struct acpi_generic_address *addr,
unsigned int length, char *desc)
{
@@ -185,36 +216,6 @@ static int __init acpi_reserve_resources(void)
}
device_initcall(acpi_reserve_resources);
-acpi_status __init acpi_os_initialize(void)
-{
- return AE_OK;
-}
-
-acpi_status acpi_os_initialize1(void)
-{
- kacpid_wq = create_workqueue("kacpid");
- kacpi_notify_wq = create_workqueue("kacpi_notify");
- kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
- BUG_ON(!kacpid_wq);
- BUG_ON(!kacpi_notify_wq);
- BUG_ON(!kacpi_hotplug_wq);
- return AE_OK;
-}
-
-acpi_status acpi_os_terminate(void)
-{
- if (acpi_irq_handler) {
- acpi_os_remove_interrupt_handler(acpi_irq_irq,
- acpi_irq_handler);
- }
-
- destroy_workqueue(kacpid_wq);
- destroy_workqueue(kacpi_notify_wq);
- destroy_workqueue(kacpi_hotplug_wq);
-
- return AE_OK;
-}
-
void acpi_os_printf(const char *fmt, ...)
{
va_list args;
@@ -260,29 +261,135 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
}
}
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup(acpi_physical_address phys, acpi_size size)
+{
+ struct acpi_ioremap *map;
+
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+ if (map->phys <= phys &&
+ phys + size <= map->phys + map->size)
+ return map;
+
+ return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static void __iomem *
+acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
+{
+ struct acpi_ioremap *map;
+
+ map = acpi_map_lookup(phys, size);
+ if (map)
+ return map->virt + (phys - map->phys);
+
+ return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
+{
+ struct acpi_ioremap *map;
+
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+ if (map->virt <= virt &&
+ virt + size <= map->virt + map->size)
+ return map;
+
+ return NULL;
+}
+
void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
+ struct acpi_ioremap *map, *tmp_map;
+ unsigned long flags, pg_sz;
+ void __iomem *virt;
+ phys_addr_t pg_off;
+
if (phys > ULONG_MAX) {
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
return NULL;
}
- if (acpi_gbl_permanent_mmap)
- /*
- * ioremap checks to ensure this is in reserved space
- */
- return ioremap((unsigned long)phys, size);
- else
+
+ if (!acpi_gbl_permanent_mmap)
return __acpi_map_table((unsigned long)phys, size);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return NULL;
+
+ pg_off = round_down(phys, PAGE_SIZE);
+ pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
+ virt = ioremap(pg_off, pg_sz);
+ if (!virt) {
+ kfree(map);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&map->list);
+ map->virt = virt;
+ map->phys = pg_off;
+ map->size = pg_sz;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ /* Check if page has already been mapped. */
+ tmp_map = acpi_map_lookup(phys, size);
+ if (tmp_map) {
+ kref_get(&tmp_map->ref);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ iounmap(map->virt);
+ kfree(map);
+ return tmp_map->virt + (phys - tmp_map->phys);
+ }
+ list_add_tail_rcu(&map->list, &acpi_ioremaps);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+ return map->virt + (phys - map->phys);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
+static void acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_ioremap *map;
+
+ map = container_of(ref, struct acpi_ioremap, ref);
+ list_del_rcu(&map->list);
+}
+
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
{
- if (acpi_gbl_permanent_mmap)
- iounmap(virt);
- else
+ struct acpi_ioremap *map;
+ unsigned long flags;
+ int del;
+
+ if (!acpi_gbl_permanent_mmap) {
__acpi_unmap_table(virt, size);
+ return;
+ }
+
+ spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ map = acpi_map_lookup_virt(virt, size);
+ if (!map) {
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+ printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
+ dump_stack();
+ return;
+ }
+
+ del = kref_put(&map->ref, acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->virt);
+ kfree(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
@@ -292,6 +399,44 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
__acpi_unmap_table(virt, size);
}
+int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+{
+ void __iomem *virt;
+
+ if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ if (!addr->address || !addr->bit_width)
+ return -EINVAL;
+
+ virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+ if (!virt)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
+
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+{
+ void __iomem *virt;
+ unsigned long flags;
+ acpi_size size = addr->bit_width / 8;
+
+ if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return;
+
+ if (!addr->address || !addr->bit_width)
+ return;
+
+ spin_lock_irqsave(&acpi_ioremap_lock, flags);
+ virt = acpi_map_vaddr_lookup(addr->address, size);
+ spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+ acpi_os_unmap_memory(virt, size);
+}
+EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
+
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
@@ -495,8 +640,15 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
{
u32 dummy;
void __iomem *virt_addr;
-
- virt_addr = ioremap(phys_addr, width);
+ int size = width / 8, unmap = 0;
+
+ rcu_read_lock();
+ virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ rcu_read_unlock();
+ if (!virt_addr) {
+ virt_addr = ioremap(phys_addr, size);
+ unmap = 1;
+ }
if (!value)
value = &dummy;
@@ -514,7 +666,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
BUG();
}
- iounmap(virt_addr);
+ if (unmap)
+ iounmap(virt_addr);
return AE_OK;
}
@@ -523,8 +676,15 @@ acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
{
void __iomem *virt_addr;
-
- virt_addr = ioremap(phys_addr, width);
+ int size = width / 8, unmap = 0;
+
+ rcu_read_lock();
+ virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ rcu_read_unlock();
+ if (!virt_addr) {
+ virt_addr = ioremap(phys_addr, size);
+ unmap = 1;
+ }
switch (width) {
case 8:
@@ -540,16 +700,18 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
BUG();
}
- iounmap(virt_addr);
+ if (unmap)
+ iounmap(virt_addr);
return AE_OK;
}
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
- u32 *value, u32 width)
+ u64 *value, u32 width)
{
int result, size;
+ u32 value32;
if (!value)
return AE_BAD_PARAMETER;
@@ -570,7 +732,8 @@ acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
result = raw_pci_read(pci_id->segment, pci_id->bus,
PCI_DEVFN(pci_id->device, pci_id->function),
- reg, size, value);
+ reg, size, &value32);
+ *value = value32;
return (result ? AE_ERROR : AE_OK);
}
@@ -602,74 +765,6 @@ acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
return (result ? AE_ERROR : AE_OK);
}
-/* TODO: Change code to take advantage of driver model more */
-static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
- acpi_handle chandle, /* current node */
- struct acpi_pci_id **id,
- int *is_bridge, u8 * bus_number)
-{
- acpi_handle handle;
- struct acpi_pci_id *pci_id = *id;
- acpi_status status;
- unsigned long long temp;
- acpi_object_type type;
-
- acpi_get_parent(chandle, &handle);
- if (handle != rhandle) {
- acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
- bus_number);
-
- status = acpi_get_type(handle, &type);
- if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
- return;
-
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
- &temp);
- if (ACPI_SUCCESS(status)) {
- u32 val;
- pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
- pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
-
- if (*is_bridge)
- pci_id->bus = *bus_number;
-
- /* any nicer way to get bus number of bridge ? */
- status =
- acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
- 8);
- if (ACPI_SUCCESS(status)
- && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
- status =
- acpi_os_read_pci_configuration(pci_id, 0x18,
- &val, 8);
- if (!ACPI_SUCCESS(status)) {
- /* Certainly broken... FIX ME */
- return;
- }
- *is_bridge = 1;
- pci_id->bus = val;
- status =
- acpi_os_read_pci_configuration(pci_id, 0x19,
- &val, 8);
- if (ACPI_SUCCESS(status)) {
- *bus_number = val;
- }
- } else
- *is_bridge = 0;
- }
- }
-}
-
-void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
- acpi_handle chandle, /* current node */
- struct acpi_pci_id **id)
-{
- int is_bridge = 1;
- u8 bus_number = (*id)->bus;
-
- acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
-}
-
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
@@ -780,16 +875,6 @@ void acpi_os_wait_events_complete(void *context)
EXPORT_SYMBOL(acpi_os_wait_events_complete);
/*
- * Allocate the memory for a spinlock and initialize it.
- */
-acpi_status acpi_os_create_lock(acpi_spinlock * handle)
-{
- spin_lock_init(*handle);
-
- return AE_OK;
-}
-
-/*
* Deallocate the memory for a spinlock.
*/
void acpi_os_delete_lock(acpi_spinlock handle)
@@ -977,6 +1062,12 @@ static void __init set_osi_linux(unsigned int enable)
printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
enable ? "Add": "Delet");
}
+
+ if (osi_linux.enable)
+ acpi_osi_setup("Linux");
+ else
+ acpi_osi_setup("!Linux");
+
return;
}
@@ -1011,21 +1102,33 @@ void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
* string starting with '!' disables that string
* otherwise string is added to list, augmenting built-in strings
*/
-int __init acpi_osi_setup(char *str)
+static void __init acpi_osi_setup_late(void)
{
- if (str == NULL || *str == '\0') {
- printk(KERN_INFO PREFIX "_OSI method disabled\n");
- acpi_gbl_create_osi_method = FALSE;
- } else if (!strcmp("!Linux", str)) {
+ char *str = osi_setup_string;
+
+ if (*str == '\0')
+ return;
+
+ if (!strcmp("!Linux", str)) {
acpi_cmdline_osi_linux(0); /* !enable */
} else if (*str == '!') {
- if (acpi_osi_invalidate(++str) == AE_OK)
+ if (acpi_remove_interface(++str) == AE_OK)
printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
} else if (!strcmp("Linux", str)) {
acpi_cmdline_osi_linux(1); /* enable */
- } else if (*osi_additional_string == '\0') {
- strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
- printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
+ } else {
+ if (acpi_install_interface(str) == AE_OK)
+ printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
+ }
+}
+
+int __init acpi_osi_setup(char *str)
+{
+ if (str == NULL || *str == '\0') {
+ printk(KERN_INFO PREFIX "_OSI method disabled\n");
+ acpi_gbl_create_osi_method = FALSE;
+ } else {
+ strncpy(osi_setup_string, str, OSI_STRING_LENGTH_MAX);
}
return 1;
@@ -1152,21 +1255,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
}
EXPORT_SYMBOL(acpi_check_region);
-int acpi_check_mem_region(resource_size_t start, resource_size_t n,
- const char *name)
-{
- struct resource res = {
- .start = start,
- .end = start + n - 1,
- .name = name,
- .flags = IORESOURCE_MEM,
- };
-
- return acpi_check_resource_conflict(&res);
-
-}
-EXPORT_SYMBOL(acpi_check_mem_region);
-
/*
* Let drivers know whether the resource checks are effective
*/
@@ -1282,38 +1370,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
return (AE_OK);
}
-/******************************************************************************
- *
- * FUNCTION: acpi_os_validate_interface
- *
- * PARAMETERS: interface - Requested interface to be validated
- *
- * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
- *
- * DESCRIPTION: Match an interface string to the interfaces supported by the
- * host. Strings originate from an AML call to the _OSI method.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_interface (char *interface)
-{
- if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
- return AE_OK;
- if (!strcmp("Linux", interface)) {
-
- printk(KERN_NOTICE PREFIX
- "BIOS _OSI(Linux) query %s%s\n",
- osi_linux.enable ? "honored" : "ignored",
- osi_linux.cmdline ? " via cmdline" :
- osi_linux.dmi ? " via DMI" : "");
-
- if (osi_linux.enable)
- return AE_OK;
- }
- return AE_SUPPORT;
-}
-
static inline int acpi_res_list_add(struct acpi_res_list *res)
{
struct acpi_res_list *res_list_elem;
@@ -1462,5 +1518,46 @@ acpi_os_validate_address (
}
return AE_OK;
}
-
#endif
+
+acpi_status __init acpi_os_initialize(void)
+{
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
+
+ return AE_OK;
+}
+
+acpi_status acpi_os_initialize1(void)
+{
+ kacpid_wq = create_workqueue("kacpid");
+ kacpi_notify_wq = create_workqueue("kacpi_notify");
+ kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+ BUG_ON(!kacpid_wq);
+ BUG_ON(!kacpi_notify_wq);
+ BUG_ON(!kacpi_hotplug_wq);
+ acpi_install_interface_handler(acpi_osi_handler);
+ acpi_osi_setup_late();
+ return AE_OK;
+}
+
+acpi_status acpi_os_terminate(void)
+{
+ if (acpi_irq_handler) {
+ acpi_os_remove_interrupt_handler(acpi_irq_irq,
+ acpi_irq_handler);
+ }
+
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+
+ destroy_workqueue(kacpid_wq);
+ destroy_workqueue(kacpi_notify_wq);
+ destroy_workqueue(kacpi_hotplug_wq);
+
+ return AE_OK;
+}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index e4804fb05e23..f907cfbfa13c 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 8d47a5846aeb..9ff80a7e9f6a 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 3ba8d1f44a73..96668ad09622 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 844c155aeb0f..67dedeed144c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -80,18 +80,13 @@ static struct acpi_driver acpi_power_driver = {
},
};
-struct acpi_power_reference {
- struct list_head node;
- struct acpi_device *device;
-};
-
struct acpi_power_resource {
struct acpi_device * device;
acpi_bus_id name;
u32 system_level;
u32 order;
+ unsigned int ref_count;
struct mutex resource_lock;
- struct list_head reference;
};
static struct list_head acpi_power_resource_list;
@@ -184,101 +179,89 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return result;
}
-static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
+static int __acpi_power_on(struct acpi_power_resource *resource)
{
- int result = 0;
- int found = 0;
acpi_status status = AE_OK;
- struct acpi_power_resource *resource = NULL;
- struct list_head *node, *next;
- struct acpi_power_reference *ref;
+ status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* Update the power resource's _device_ power state */
+ resource->device->power.state = ACPI_STATE_D0;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
+ resource->name));
+
+ return 0;
+}
+
+static int acpi_power_on(acpi_handle handle)
+{
+ int result = 0;
+ struct acpi_power_resource *resource = NULL;
result = acpi_power_get_context(handle, &resource);
if (result)
return result;
mutex_lock(&resource->resource_lock);
- list_for_each_safe(node, next, &resource->reference) {
- ref = container_of(node, struct acpi_power_reference, node);
- if (dev->handle == ref->device->handle) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already referenced by resource [%s]\n",
- dev->pnp.bus_id, resource->name));
- found = 1;
- break;
- }
- }
- if (!found) {
- ref = kmalloc(sizeof (struct acpi_power_reference),
- irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
- if (!ref) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "kmalloc() failed\n"));
- mutex_unlock(&resource->resource_lock);
- return -ENOMEM;
- }
- list_add_tail(&ref->node, &resource->reference);
- ref->device = dev;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] added to resource [%s] references\n",
- dev->pnp.bus_id, resource->name));
+ if (resource->ref_count++) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] already on",
+ resource->name));
+ } else {
+ result = __acpi_power_on(resource);
}
- mutex_unlock(&resource->resource_lock);
- status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D0;
+ mutex_unlock(&resource->resource_lock);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned on\n",
- resource->name));
return 0;
}
-static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
+static int acpi_power_off_device(acpi_handle handle)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
- struct list_head *node, *next;
- struct acpi_power_reference *ref;
result = acpi_power_get_context(handle, &resource);
if (result)
return result;
mutex_lock(&resource->resource_lock);
- list_for_each_safe(node, next, &resource->reference) {
- ref = container_of(node, struct acpi_power_reference, node);
- if (dev->handle == ref->device->handle) {
- list_del(&ref->node);
- kfree(ref);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] removed from resource [%s] references\n",
- dev->pnp.bus_id, resource->name));
- break;
- }
+
+ if (!resource->ref_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] already off",
+ resource->name));
+ goto unlock;
}
- if (!list_empty(&resource->reference)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cannot turn resource [%s] off - resource is in use\n",
- resource->name));
- mutex_unlock(&resource->resource_lock);
- return 0;
+ if (--resource->ref_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] still in use\n",
+ resource->name));
+ goto unlock;
}
- mutex_unlock(&resource->resource_lock);
status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
- if (ACPI_FAILURE(status))
- return -ENODEV;
+ if (ACPI_FAILURE(status)) {
+ result = -ENODEV;
+ } else {
+ /* Update the power resource's _device_ power state */
+ resource->device->power.state = ACPI_STATE_D3;
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D3;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Power resource [%s] turned off\n",
+ resource->name));
+ }
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n",
- resource->name));
+ unlock:
+ mutex_unlock(&resource->resource_lock);
- return 0;
+ return result;
}
/**
@@ -364,7 +347,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
/* Open power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) {
- int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev);
+ int ret = acpi_power_on(dev->wakeup.resources.handles[i]);
if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0;
@@ -420,7 +403,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
/* Close power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) {
int ret = acpi_power_off_device(
- dev->wakeup.resources.handles[i], dev);
+ dev->wakeup.resources.handles[i]);
if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0;
@@ -500,7 +483,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
* (e.g. so the device doesn't lose power while transitioning).
*/
for (i = 0; i < tl->count; i++) {
- result = acpi_power_on(tl->handles[i], device);
+ result = acpi_power_on(tl->handles[i]);
if (result)
goto end;
}
@@ -513,7 +496,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
* Then we dereference all power resources used in the current list.
*/
for (i = 0; i < cl->count; i++) {
- result = acpi_power_off_device(cl->handles[i], device);
+ result = acpi_power_off_device(cl->handles[i]);
if (result)
goto end;
}
@@ -551,7 +534,6 @@ static int acpi_power_add(struct acpi_device *device)
resource->device = device;
mutex_init(&resource->resource_lock);
- INIT_LIST_HEAD(&resource->reference);
strcpy(resource->name, device->pnp.bus_id);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
@@ -594,22 +576,14 @@ static int acpi_power_add(struct acpi_device *device)
static int acpi_power_remove(struct acpi_device *device, int type)
{
- struct acpi_power_resource *resource = NULL;
- struct list_head *node, *next;
+ struct acpi_power_resource *resource;
-
- if (!device || !acpi_driver_data(device))
+ if (!device)
return -EINVAL;
resource = acpi_driver_data(device);
-
- mutex_lock(&resource->resource_lock);
- list_for_each_safe(node, next, &resource->reference) {
- struct acpi_power_reference *ref = container_of(node, struct acpi_power_reference, node);
- list_del(&ref->node);
- kfree(ref);
- }
- mutex_unlock(&resource->resource_lock);
+ if (!resource)
+ return -EINVAL;
kfree(resource);
@@ -619,29 +593,28 @@ static int acpi_power_remove(struct acpi_device *device, int type)
static int acpi_power_resume(struct acpi_device *device)
{
int result = 0, state;
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_reference *ref;
+ struct acpi_power_resource *resource;
- if (!device || !acpi_driver_data(device))
+ if (!device)
return -EINVAL;
resource = acpi_driver_data(device);
+ if (!resource)
+ return -EINVAL;
+
+ mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(device->handle, &state);
if (result)
- return result;
+ goto unlock;
- mutex_lock(&resource->resource_lock);
- if (state == ACPI_POWER_RESOURCE_STATE_OFF &&
- !list_empty(&resource->reference)) {
- ref = container_of(resource->reference.next, struct acpi_power_reference, node);
- mutex_unlock(&resource->resource_lock);
- result = acpi_power_on(device->handle, ref->device);
- return result;
- }
+ if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count)
+ result = __acpi_power_on(resource);
+ unlock:
mutex_unlock(&resource->resource_lock);
- return 0;
+
+ return result;
}
int __init acpi_power_init(void)
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 347eb21b2353..85e48047d7b0 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -40,8 +40,10 @@
#include <linux/pm.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
+#ifdef CONFIG_ACPI_PROCFS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#endif
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
@@ -244,6 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
return result;
}
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_processor_dir = NULL;
static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
@@ -280,7 +283,16 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
return 0;
}
-
+#else
+static inline int acpi_processor_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static inline int acpi_processor_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -842,9 +854,11 @@ static int __init acpi_processor_init(void)
memset(&errata, 0, sizeof(errata));
+#ifdef CONFIG_ACPI_PROCFS
acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
if (!acpi_processor_dir)
return -ENOMEM;
+#endif
if (!cpuidle_register_driver(&acpi_idle_driver)) {
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
@@ -871,7 +885,9 @@ static int __init acpi_processor_init(void)
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return result;
}
@@ -891,7 +907,9 @@ static void __exit acpi_processor_exit(void)
cpuidle_unregister_driver(&acpi_idle_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return;
}
@@ -899,6 +917,4 @@ static void __exit acpi_processor_exit(void)
module_init(acpi_processor_init);
module_exit(acpi_processor_exit);
-EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
-
MODULE_ALIAS("processor");
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f4428e82b352..dcb38f8ddfda 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -64,7 +64,6 @@
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
-#define ACPI_PROCESSOR_FILE_POWER "power"
#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
#define C2_OVERHEAD 1 /* 1us */
#define C3_OVERHEAD 1 /* 1us */
@@ -1013,7 +1012,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
- state->power_usage = cx->power;
state->flags = 0;
switch (cx->type) {
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 953b25fb9869..fde49b9b1d99 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -44,47 +44,6 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_thermal");
-/* --------------------------------------------------------------------------
- Limit Interface
- -------------------------------------------------------------------------- */
-static int acpi_processor_apply_limit(struct acpi_processor *pr)
-{
- int result = 0;
- u16 px = 0;
- u16 tx = 0;
-
-
- if (!pr)
- return -EINVAL;
-
- if (!pr->flags.limit)
- return -ENODEV;
-
- if (pr->flags.throttling) {
- if (pr->limit.user.tx > tx)
- tx = pr->limit.user.tx;
- if (pr->limit.thermal.tx > tx)
- tx = pr->limit.thermal.tx;
-
- result = acpi_processor_set_throttling(pr, tx, false);
- if (result)
- goto end;
- }
-
- pr->limit.state.px = px;
- pr->limit.state.tx = tx;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Processor [%d] limit set to (P%d:T%d)\n", pr->id,
- pr->limit.state.px, pr->limit.state.tx));
-
- end:
- if (result)
- printk(KERN_ERR PREFIX "Unable to set limit\n");
-
- return result;
-}
-
#ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
@@ -107,36 +66,6 @@ static int cpu_has_cpufreq(unsigned int cpu)
return 1;
}
-static int acpi_thermal_cpufreq_increase(unsigned int cpu)
-{
- if (!cpu_has_cpufreq(cpu))
- return -ENODEV;
-
- if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
- CPUFREQ_THERMAL_MAX_STEP) {
- per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
- cpufreq_update_policy(cpu);
- return 0;
- }
-
- return -ERANGE;
-}
-
-static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
-{
- if (!cpu_has_cpufreq(cpu))
- return -ENODEV;
-
- if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
- (CPUFREQ_THERMAL_MIN_STEP + 1))
- per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
- else
- per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
- cpufreq_update_policy(cpu);
- /* We reached max freq again and can leave passive mode */
- return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
-}
-
static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -238,113 +167,6 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
#endif
-int acpi_processor_set_thermal_limit(acpi_handle handle, int type)
-{
- int result = 0;
- struct acpi_processor *pr = NULL;
- struct acpi_device *device = NULL;
- int tx = 0, max_tx_px = 0;
-
-
- if ((type < ACPI_PROCESSOR_LIMIT_NONE)
- || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
- return -EINVAL;
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
-
- pr = acpi_driver_data(device);
- if (!pr)
- return -ENODEV;
-
- /* Thermal limits are always relative to the current Px/Tx state. */
- if (pr->flags.throttling)
- pr->limit.thermal.tx = pr->throttling.state;
-
- /*
- * Our default policy is to only use throttling at the lowest
- * performance state.
- */
-
- tx = pr->limit.thermal.tx;
-
- switch (type) {
-
- case ACPI_PROCESSOR_LIMIT_NONE:
- do {
- result = acpi_thermal_cpufreq_decrease(pr->id);
- } while (!result);
- tx = 0;
- break;
-
- case ACPI_PROCESSOR_LIMIT_INCREMENT:
- /* if going up: P-states first, T-states later */
-
- result = acpi_thermal_cpufreq_increase(pr->id);
- if (!result)
- goto end;
- else if (result == -ERANGE)
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At maximum performance state\n"));
-
- if (pr->flags.throttling) {
- if (tx == (pr->throttling.state_count - 1))
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At maximum throttling state\n"));
- else
- tx++;
- }
- break;
-
- case ACPI_PROCESSOR_LIMIT_DECREMENT:
- /* if going down: T-states first, P-states later */
-
- if (pr->flags.throttling) {
- if (tx == 0) {
- max_tx_px = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At minimum throttling state\n"));
- } else {
- tx--;
- goto end;
- }
- }
-
- result = acpi_thermal_cpufreq_decrease(pr->id);
- if (result) {
- /*
- * We only could get -ERANGE, 1 or 0.
- * In the first two cases we reached max freq again.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "At minimum performance state\n"));
- max_tx_px = 1;
- } else
- max_tx_px = 0;
-
- break;
- }
-
- end:
- if (pr->flags.throttling) {
- pr->limit.thermal.px = 0;
- pr->limit.thermal.tx = tx;
-
- result = acpi_processor_apply_limit(pr);
- if (result)
- printk(KERN_ERR PREFIX "Unable to set thermal limit\n");
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
- pr->limit.thermal.px, pr->limit.thermal.tx));
- } else
- result = 0;
- if (max_tx_px)
- return 1;
- else
- return result;
-}
-
int acpi_processor_get_limit_info(struct acpi_processor *pr)
{
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 730863855ed5..ff3632717c51 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,8 +32,10 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
+#ifdef CONFIG_ACPI_PROCFS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#endif
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -1214,6 +1216,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
return result;
}
+#ifdef CONFIG_ACPI_PROCFS
/* proc interface */
static int acpi_processor_throttling_seq_show(struct seq_file *seq,
void *offset)
@@ -1322,3 +1325,4 @@ const struct file_operations acpi_processor_throttling_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 4ff76e8174eb..e5dbedb16bbf 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -40,10 +40,7 @@
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
-
-#ifdef CONFIG_ACPI_SYSFS_POWER
#include <linux/power_supply.h>
-#endif
#include "sbshc.h"
@@ -85,9 +82,7 @@ static const struct acpi_device_id sbs_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
struct acpi_battery {
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply bat;
-#endif
struct acpi_sbs *sbs;
#ifdef CONFIG_ACPI_PROCFS_POWER
struct proc_dir_entry *proc_entry;
@@ -120,9 +115,7 @@ struct acpi_battery {
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
struct acpi_sbs {
-#ifdef CONFIG_ACPI_SYSFS_POWER
struct power_supply charger;
-#endif
struct acpi_device *device;
struct acpi_smb_hc *hc;
struct mutex lock;
@@ -166,7 +159,6 @@ static inline int acpi_battery_scale(struct acpi_battery *battery)
acpi_battery_ipscale(battery);
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static int sbs_get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -313,7 +305,6 @@ static enum power_supply_property sbs_energy_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
-#endif
/* --------------------------------------------------------------------------
Smart Battery System Management
@@ -449,7 +440,6 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
return result;
}
-#ifdef CONFIG_ACPI_SYSFS_POWER
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -479,7 +469,6 @@ static struct device_attribute alarm_attr = {
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
-#endif
/* --------------------------------------------------------------------------
FS Interface (/proc/acpi)
@@ -798,7 +787,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
&acpi_battery_state_fops, &acpi_battery_alarm_fops,
battery);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
battery->bat.name = battery->name;
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
if (!acpi_battery_mode(battery)) {
@@ -819,7 +807,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
goto end;
battery->have_sysfs_alarm = 1;
end:
-#endif
printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
battery->name, battery->present ? "present" : "absent");
@@ -828,17 +815,13 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
{
-#if defined(CONFIG_ACPI_SYSFS_POWER) || defined(CONFIG_ACPI_PROCFS_POWER)
struct acpi_battery *battery = &sbs->battery[id];
-#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (battery->bat.dev) {
if (battery->have_sysfs_alarm)
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
}
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
if (battery->proc_entry)
acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir);
@@ -859,14 +842,12 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
if (result)
goto end;
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
sbs->charger.name = "sbs-charger";
sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
sbs->charger.properties = sbs_ac_props;
sbs->charger.num_properties = ARRAY_SIZE(sbs_ac_props);
sbs->charger.get_property = sbs_get_ac_property;
power_supply_register(&sbs->device->dev, &sbs->charger);
-#endif
printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n",
ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line");
@@ -876,10 +857,8 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
static void acpi_charger_remove(struct acpi_sbs *sbs)
{
-#ifdef CONFIG_ACPI_SYSFS_POWER
if (sbs->charger.dev)
power_supply_unregister(&sbs->charger);
-#endif
#ifdef CONFIG_ACPI_PROCFS_POWER
if (sbs->charger_entry)
acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir);
@@ -900,9 +879,7 @@ static void acpi_sbs_callback(void *context)
ACPI_SBS_NOTIFY_STATUS,
sbs->charger_present);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
-#endif
}
if (sbs->manager_present) {
for (id = 0; id < MAX_SBS_BAT; ++id) {
@@ -919,9 +896,7 @@ static void acpi_sbs_callback(void *context)
ACPI_SBS_NOTIFY_STATUS,
bat->present);
#endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
kobject_uevent(&bat->bat.dev->kobj, KOBJ_CHANGE);
-#endif
}
}
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b23825ecfa37..2b6c21d86b98 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -26,6 +26,8 @@ extern struct acpi_device *acpi_root;
#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
+static const char *dummy_hid = "device";
+
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
DEFINE_MUTEX(acpi_device_lock);
@@ -49,6 +51,9 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
int count;
struct acpi_hardware_id *id;
+ if (list_empty(&acpi_dev->pnp.ids))
+ return 0;
+
len = snprintf(modalias, size, "acpi:");
size -= len;
@@ -202,13 +207,15 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
- result = device_create_file(&dev->dev, &dev_attr_hid);
- if (result)
- goto end;
+ if (!list_empty(&dev->pnp.ids)) {
+ result = device_create_file(&dev->dev, &dev_attr_hid);
+ if (result)
+ goto end;
- result = device_create_file(&dev->dev, &dev_attr_modalias);
- if (result)
- goto end;
+ result = device_create_file(&dev->dev, &dev_attr_modalias);
+ if (result)
+ goto end;
+ }
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
@@ -316,6 +323,9 @@ static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
struct acpi_device *acpi_dev = to_acpi_device(dev);
int len;
+ if (list_empty(&acpi_dev->pnp.ids))
+ return 0;
+
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
@@ -1010,10 +1020,13 @@ static int acpi_dock_match(struct acpi_device *device)
return acpi_get_handle(device->handle, "_DCK", &tmp);
}
-char *acpi_device_hid(struct acpi_device *device)
+const char *acpi_device_hid(struct acpi_device *device)
{
struct acpi_hardware_id *hid;
+ if (list_empty(&device->pnp.ids))
+ return dummy_hid;
+
hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
return hid->id;
}
@@ -1142,16 +1155,6 @@ static void acpi_device_set_id(struct acpi_device *device)
acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
break;
}
-
- /*
- * We build acpi_devices for some objects that don't have _HID or _CID,
- * e.g., PCI bridges and slots. Drivers can't bind to these objects,
- * but we do use them indirectly by traversing the acpi_device tree.
- * This generic ID isn't useful for driver binding, but it provides
- * the useful property that "every acpi_device has an ID."
- */
- if (list_empty(&device->pnp.ids))
- acpi_add_id(device, "device");
}
static int acpi_device_set_context(struct acpi_device *device)
@@ -1431,6 +1434,7 @@ EXPORT_SYMBOL(acpi_bus_add);
int acpi_bus_start(struct acpi_device *device)
{
struct acpi_bus_ops ops;
+ int result;
if (!device)
return -EINVAL;
@@ -1438,7 +1442,11 @@ int acpi_bus_start(struct acpi_device *device)
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
- return acpi_bus_scan(device->handle, &ops, NULL);
+ result = acpi_bus_scan(device->handle, &ops, NULL);
+
+ acpi_update_gpes();
+
+ return result;
}
EXPORT_SYMBOL(acpi_bus_start);
@@ -1552,6 +1560,8 @@ int __init acpi_scan_init(void)
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
+ else
+ acpi_update_gpes();
return result;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 4754ff6e70e6..721d93b3ceee 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -25,7 +25,9 @@
#include "internal.h"
#include "sleep.h"
-u8 sleep_states[ACPI_S_STATE_COUNT];
+static u8 sleep_states[ACPI_S_STATE_COUNT];
+
+static u32 acpi_target_sleep_state = ACPI_STATE_S0;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -79,8 +81,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
}
#ifdef CONFIG_ACPI_SLEEP
-static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
* and to restore them during the subsequent resume. Windows does that also for
@@ -419,6 +419,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB1Z1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
@@ -562,7 +570,7 @@ int acpi_suspend(u32 acpi_state)
return -EINVAL;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_OPS
/**
* acpi_pm_device_sleep_state - return preferred power state of ACPI device
* in the system sleep state given by %acpi_target_sleep_state
@@ -624,7 +632,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
* can wake the system. _S0W may be valid, too.
*/
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
- (device_may_wakeup(dev) && adev->wakeup.state.enabled &&
+ (device_may_wakeup(dev) &&
adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
acpi_status status;
@@ -632,7 +640,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
status = acpi_evaluate_integer(handle, acpi_method, NULL,
&d_max);
if (ACPI_FAILURE(status)) {
- d_max = d_min;
+ if (acpi_target_sleep_state != ACPI_STATE_S0 ||
+ status != AE_NOT_FOUND)
+ d_max = d_min;
} else if (d_max < d_min) {
/* Warn the user of the broken DSDT */
printk(KERN_WARNING "ACPI: Wrong value from %s\n",
@@ -646,7 +656,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
*d_min_p = d_min;
return d_max;
}
+#endif /* CONFIG_PM_OPS */
+#ifdef CONFIG_PM_SLEEP
/**
* acpi_pm_device_sleep_wake - enable or disable the system wake-up
* capability of given device
@@ -677,7 +689,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
return error;
}
-#endif
+#endif /* CONFIG_PM_SLEEP */
static void acpi_power_off_prepare(void)
{
@@ -702,7 +714,7 @@ static void acpi_power_off(void)
* paths through the BIOS, so disable _GTS and _BFS by default,
* but do speak up and offer the option to enable them.
*/
-void __init acpi_gts_bfs_check(void)
+static void __init acpi_gts_bfs_check(void)
{
acpi_handle dummy;
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index d8821805c3bc..74d59c8f4678 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,5 +1,4 @@
-extern u8 sleep_states[];
extern int acpi_suspend(u32 state);
extern void acpi_enable_wakeup_devices(u8 sleep_state);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 2f8f17131d9f..5a27b0a31315 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -37,12 +37,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
-
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
-
#include <linux/jiffies.h>
#include <linux/kmod.h>
#include <linux/reboot.h>
@@ -195,61 +189,6 @@ struct acpi_thermal {
struct mutex lock;
};
-#ifdef CONFIG_ACPI_PROCFS
-static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_cooling_mode(struct file *,
- const char __user *, size_t,
- loff_t *);
-static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
- size_t, loff_t *);
-
-static const struct file_operations acpi_thermal_state_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_temp_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_temp_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_trip_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_trip_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_cooling_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_cooling_open_fs,
- .read = seq_read,
- .write = acpi_thermal_write_cooling_mode,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_polling_fops = {
- .owner = THIS_MODULE,
- .open = acpi_thermal_polling_open_fs,
- .read = seq_read,
- .write = acpi_thermal_write_polling,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_ACPI_PROCFS*/
-
/* --------------------------------------------------------------------------
Thermal Zone Management
-------------------------------------------------------------------------- */
@@ -958,358 +897,6 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-static struct proc_dir_entry *acpi_thermal_dir;
-
-static int acpi_thermal_state_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- seq_puts(seq, "state: ");
-
- if (!tz->state.critical && !tz->state.hot && !tz->state.passive
- && !tz->state.active)
- seq_puts(seq, "ok\n");
- else {
- if (tz->state.critical)
- seq_puts(seq, "critical ");
- if (tz->state.hot)
- seq_puts(seq, "hot ");
- if (tz->state.passive)
- seq_puts(seq, "passive ");
- if (tz->state.active)
- seq_printf(seq, "active[%d]", tz->state.active_index);
- seq_puts(seq, "\n");
- }
-
- end:
- return 0;
-}
-
-static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_state_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_temp_seq_show(struct seq_file *seq, void *offset)
-{
- int result = 0;
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- result = acpi_thermal_get_temperature(tz);
- if (result)
- goto end;
-
- seq_printf(seq, "temperature: %ld C\n",
- KELVIN_TO_CELSIUS(tz->temperature));
-
- end:
- return 0;
-}
-
-static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_temp_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_trip_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
- struct acpi_device *device;
- acpi_status status;
-
- int i = 0;
- int j = 0;
-
-
- if (!tz)
- goto end;
-
- if (tz->trips.critical.flags.valid)
- seq_printf(seq, "critical (S5): %ld C%s",
- KELVIN_TO_CELSIUS(tz->trips.critical.temperature),
- nocrt ? " <disabled>\n" : "\n");
-
- if (tz->trips.hot.flags.valid)
- seq_printf(seq, "hot (S4): %ld C%s",
- KELVIN_TO_CELSIUS(tz->trips.hot.temperature),
- nocrt ? " <disabled>\n" : "\n");
-
- if (tz->trips.passive.flags.valid) {
- seq_printf(seq,
- "passive: %ld C: tc1=%lu tc2=%lu tsp=%lu devices=",
- KELVIN_TO_CELSIUS(tz->trips.passive.temperature),
- tz->trips.passive.tc1, tz->trips.passive.tc2,
- tz->trips.passive.tsp);
- for (j = 0; j < tz->trips.passive.devices.count; j++) {
- status = acpi_bus_get_device(tz->trips.passive.devices.
- handles[j], &device);
- seq_printf(seq, "%4.4s ", status ? "" :
- acpi_device_bid(device));
- }
- seq_puts(seq, "\n");
- } else {
- seq_printf(seq, "passive (forced):");
- if (tz->thermal_zone->forced_passive)
- seq_printf(seq, " %i C\n",
- tz->thermal_zone->forced_passive / 1000);
- else
- seq_printf(seq, "<not set>\n");
- }
-
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!(tz->trips.active[i].flags.valid))
- break;
- seq_printf(seq, "active[%d]: %ld C: devices=",
- i,
- KELVIN_TO_CELSIUS(tz->trips.active[i].temperature));
- for (j = 0; j < tz->trips.active[i].devices.count; j++){
- status = acpi_bus_get_device(tz->trips.active[i].
- devices.handles[j],
- &device);
- seq_printf(seq, "%4.4s ", status ? "" :
- acpi_device_bid(device));
- }
- seq_puts(seq, "\n");
- }
-
- end:
- return 0;
-}
-
-static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- if (!tz->flags.cooling_mode)
- seq_puts(seq, "<setting not supported>\n");
- else
- seq_puts(seq, "0 - Active; 1 - Passive\n");
-
- end:
- return 0;
-}
-
-static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_cooling_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t
-acpi_thermal_write_cooling_mode(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *m = file->private_data;
- struct acpi_thermal *tz = m->private;
- int result = 0;
- char mode_string[12] = { '\0' };
-
-
- if (!tz || (count > sizeof(mode_string) - 1))
- return -EINVAL;
-
- if (!tz->flags.cooling_mode)
- return -ENODEV;
-
- if (copy_from_user(mode_string, buffer, count))
- return -EFAULT;
-
- mode_string[count] = '\0';
-
- result = acpi_thermal_set_cooling_mode(tz,
- simple_strtoul(mode_string, NULL,
- 0));
- if (result)
- return result;
-
- acpi_thermal_check(tz);
-
- return count;
-}
-
-static int acpi_thermal_polling_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_thermal *tz = seq->private;
-
-
- if (!tz)
- goto end;
-
- if (!tz->thermal_zone->polling_delay) {
- seq_puts(seq, "<polling disabled>\n");
- goto end;
- }
-
- seq_printf(seq, "polling frequency: %d seconds\n",
- (tz->thermal_zone->polling_delay / 1000));
-
- end:
- return 0;
-}
-
-static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_thermal_polling_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
-{
- if (!tz)
- return -EINVAL;
-
- /* Convert value to deci-seconds */
- tz->polling_frequency = seconds * 10;
-
- tz->thermal_zone->polling_delay = seconds * 1000;
-
- if (tz->tz_enabled)
- thermal_zone_device_update(tz->thermal_zone);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Polling frequency set to %lu seconds\n",
- tz->polling_frequency/10));
-
- return 0;
-}
-
-static ssize_t
-acpi_thermal_write_polling(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *m = file->private_data;
- struct acpi_thermal *tz = m->private;
- int result = 0;
- char polling_string[12] = { '\0' };
- int seconds = 0;
-
-
- if (!tz || (count > sizeof(polling_string) - 1))
- return -EINVAL;
-
- if (copy_from_user(polling_string, buffer, count))
- return -EFAULT;
-
- polling_string[count] = '\0';
-
- seconds = simple_strtoul(polling_string, NULL, 0);
-
- result = acpi_thermal_set_polling(tz, seconds);
- if (result)
- return result;
-
- acpi_thermal_check(tz);
-
- return count;
-}
-
-static int acpi_thermal_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
-
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_thermal_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'state' [R] */
- entry = proc_create_data(ACPI_THERMAL_FILE_STATE,
- S_IRUGO, acpi_device_dir(device),
- &acpi_thermal_state_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'temperature' [R] */
- entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE,
- S_IRUGO, acpi_device_dir(device),
- &acpi_thermal_temp_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'trip_points' [R] */
- entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS,
- S_IRUGO,
- acpi_device_dir(device),
- &acpi_thermal_trip_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'cooling_mode' [R/W] */
- entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_thermal_cooling_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
-
- /* 'polling_frequency' [R/W] */
- entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device),
- &acpi_thermal_polling_fops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_thermal_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_THERMAL_FILE_STATE,
- acpi_device_dir(device));
- remove_proc_entry(acpi_device_bid(device), acpi_thermal_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#else
-static inline int acpi_thermal_add_fs(struct acpi_device *device) { return 0; }
-static inline int acpi_thermal_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif /* CONFIG_ACPI_PROCFS */
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1428,17 +1015,11 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- result = acpi_thermal_add_fs(device);
- if (result)
- goto unregister_thermal_zone;
-
printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
acpi_device_name(device), acpi_device_bid(device),
KELVIN_TO_CELSIUS(tz->temperature));
goto end;
-unregister_thermal_zone:
- thermal_zone_device_unregister(tz->thermal_zone);
free_memory:
kfree(tz);
end:
@@ -1454,7 +1035,6 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
tz = acpi_driver_data(device);
- acpi_thermal_remove_fs(device);
acpi_thermal_unregister_thermal_zone(tz);
mutex_destroy(&tz->lock);
kfree(tz);
@@ -1580,19 +1160,9 @@ static int __init acpi_thermal_init(void)
return -ENODEV;
}
-#ifdef CONFIG_ACPI_PROCFS
- acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir);
- if (!acpi_thermal_dir)
- return -ENODEV;
-#endif
-
result = acpi_bus_register_driver(&acpi_thermal_driver);
- if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
-#endif
+ if (result < 0)
return -ENODEV;
- }
return 0;
}
@@ -1602,10 +1172,6 @@ static void __exit acpi_thermal_exit(void)
acpi_bus_unregister_driver(&acpi_thermal_driver);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
-#endif
-
return;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 67dec0c675aa..5cd0228d2daa 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -30,8 +30,6 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/input.h>
#include <linux/backlight.h>
#include <linux/thermal.h>
@@ -152,9 +150,6 @@ struct acpi_video_bus {
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
-#ifdef CONFIG_ACPI_PROCFS
- struct proc_dir_entry *dir;
-#endif
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
@@ -210,108 +205,6 @@ struct acpi_video_device {
struct output_device *output_dev;
};
-#ifdef CONFIG_ACPI_PROCFS
-/* bus */
-static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
-static const struct file_operations acpi_video_bus_info_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file);
-static const struct file_operations acpi_video_bus_ROM_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_ROM_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_POST_info_open_fs(struct inode *inode,
- struct file *file);
-static const struct file_operations acpi_video_bus_POST_info_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_POST_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_video_bus_write_POST(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_bus_POST_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_POST_open_fs,
- .read = seq_read,
- .write = acpi_video_bus_write_POST,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_video_bus_write_DOS(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_bus_DOS_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_bus_DOS_open_fs,
- .read = seq_read,
- .write = acpi_video_bus_write_DOS,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/* device */
-static int acpi_video_device_info_open_fs(struct inode *inode,
- struct file *file);
-static const struct file_operations acpi_video_device_info_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_device_state_open_fs(struct inode *inode,
- struct file *file);
-static ssize_t acpi_video_device_write_state(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_device_state_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_state_open_fs,
- .read = seq_read,
- .write = acpi_video_device_write_state,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_device_brightness_open_fs(struct inode *inode,
- struct file *file);
-static ssize_t acpi_video_device_write_brightness(struct file *file,
- const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_device_brightness_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_brightness_open_fs,
- .read = seq_read,
- .write = acpi_video_device_write_brightness,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int acpi_video_device_EDID_open_fs(struct inode *inode,
- struct file *file);
-static const struct file_operations acpi_video_device_EDID_fops = {
- .owner = THIS_MODULE,
- .open = acpi_video_device_EDID_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_ACPI_PROCFS */
-
static const char device_decode[][30] = {
"motherboard VGA device",
"PCI VGA device",
@@ -1111,646 +1004,6 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
}
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-
-static struct proc_dir_entry *acpi_video_dir;
-
-/* video devices */
-
-static int acpi_video_device_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_device *dev = seq->private;
-
-
- if (!dev)
- goto end;
-
- seq_printf(seq, "device_id: 0x%04x\n", (u32) dev->device_id);
- seq_printf(seq, "type: ");
- if (dev->flags.crt)
- seq_printf(seq, "CRT\n");
- else if (dev->flags.lcd)
- seq_printf(seq, "LCD\n");
- else if (dev->flags.tvout)
- seq_printf(seq, "TVOUT\n");
- else if (dev->flags.dvi)
- seq_printf(seq, "DVI\n");
- else
- seq_printf(seq, "UNKNOWN\n");
-
- seq_printf(seq, "known by bios: %s\n", dev->flags.bios ? "yes" : "no");
-
- end:
- return 0;
-}
-
-static int
-acpi_video_device_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_info_seq_show,
- PDE(inode)->data);
-}
-
-static int
-acpi_video_device_query(struct acpi_video_device *device,
- unsigned long long *state)
-{
- int status;
-
- status = acpi_evaluate_integer(device->dev->handle, "_DGS",
- NULL, state);
-
- return status;
-}
-
-static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
-{
- int status;
- struct acpi_video_device *dev = seq->private;
- unsigned long long state;
-
-
- if (!dev)
- goto end;
-
- status = acpi_video_device_get_state(dev, &state);
- seq_printf(seq, "state: ");
- if (ACPI_SUCCESS(status))
- seq_printf(seq, "0x%02llx\n", state);
- else
- seq_printf(seq, "<not supported>\n");
-
- status = acpi_video_device_query(dev, &state);
- seq_printf(seq, "query: ");
- if (ACPI_SUCCESS(status))
- seq_printf(seq, "0x%02llx\n", state);
- else
- seq_printf(seq, "<not supported>\n");
-
- end:
- return 0;
-}
-
-static int
-acpi_video_device_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_state_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t
-acpi_video_device_write_state(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int status;
- struct seq_file *m = file->private_data;
- struct acpi_video_device *dev = m->private;
- char str[12] = { 0 };
- u32 state = 0;
-
-
- if (!dev || count >= sizeof(str))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- state = simple_strtoul(str, NULL, 0);
- state &= ((1ul << 31) | (1ul << 30) | (1ul << 0));
-
- status = acpi_video_device_set_state(dev, state);
-
- if (status)
- return -EFAULT;
-
- return count;
-}
-
-static int
-acpi_video_device_brightness_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_device *dev = seq->private;
- int i;
-
-
- if (!dev || !dev->brightness) {
- seq_printf(seq, "<not supported>\n");
- return 0;
- }
-
- seq_printf(seq, "levels: ");
- for (i = 2; i < dev->brightness->count; i++)
- seq_printf(seq, " %d", dev->brightness->levels[i]);
- seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr);
-
- return 0;
-}
-
-static int
-acpi_video_device_brightness_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_brightness_seq_show,
- PDE(inode)->data);
-}
-
-static ssize_t
-acpi_video_device_write_brightness(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- struct seq_file *m = file->private_data;
- struct acpi_video_device *dev = m->private;
- char str[5] = { 0 };
- unsigned int level = 0;
- int i;
-
-
- if (!dev || !dev->brightness || count >= sizeof(str))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- level = simple_strtoul(str, NULL, 0);
-
- if (level > 100)
- return -EFAULT;
-
- /* validate through the list of available levels */
- for (i = 2; i < dev->brightness->count; i++)
- if (level == dev->brightness->levels[i]) {
- if (!acpi_video_device_lcd_set_level(dev, level))
- return count;
- break;
- }
-
- return -EINVAL;
-}
-
-static int acpi_video_device_EDID_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_device *dev = seq->private;
- int status;
- int i;
- union acpi_object *edid = NULL;
-
-
- if (!dev)
- goto out;
-
- status = acpi_video_device_EDID(dev, &edid, 128);
- if (ACPI_FAILURE(status)) {
- status = acpi_video_device_EDID(dev, &edid, 256);
- }
-
- if (ACPI_FAILURE(status)) {
- goto out;
- }
-
- if (edid && edid->type == ACPI_TYPE_BUFFER) {
- for (i = 0; i < edid->buffer.length; i++)
- seq_putc(seq, edid->buffer.pointer[i]);
- }
-
- out:
- if (!edid)
- seq_printf(seq, "<not supported>\n");
- else
- kfree(edid);
-
- return 0;
-}
-
-static int
-acpi_video_device_EDID_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_device_EDID_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_video_device_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry, *device_dir;
- struct acpi_video_device *vid_dev;
-
- vid_dev = acpi_driver_data(device);
- if (!vid_dev)
- return -ENODEV;
-
- device_dir = proc_mkdir(acpi_device_bid(device),
- vid_dev->video->dir);
- if (!device_dir)
- return -ENOMEM;
-
- /* 'info' [R] */
- entry = proc_create_data("info", S_IRUGO, device_dir,
- &acpi_video_device_info_fops, acpi_driver_data(device));
- if (!entry)
- goto err_remove_dir;
-
- /* 'state' [R/W] */
- entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_device_state_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_info;
-
- /* 'brightness' [R/W] */
- entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_device_brightness_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_state;
-
- /* 'EDID' [R] */
- entry = proc_create_data("EDID", S_IRUGO, device_dir,
- &acpi_video_device_EDID_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_brightness;
-
- acpi_device_dir(device) = device_dir;
-
- return 0;
-
- err_remove_brightness:
- remove_proc_entry("brightness", device_dir);
- err_remove_state:
- remove_proc_entry("state", device_dir);
- err_remove_info:
- remove_proc_entry("info", device_dir);
- err_remove_dir:
- remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
- return -ENOMEM;
-}
-
-static int acpi_video_device_remove_fs(struct acpi_device *device)
-{
- struct acpi_video_device *vid_dev;
- struct proc_dir_entry *device_dir;
-
- vid_dev = acpi_driver_data(device);
- if (!vid_dev || !vid_dev->video || !vid_dev->video->dir)
- return -ENODEV;
-
- device_dir = acpi_device_dir(device);
- if (device_dir) {
- remove_proc_entry("info", device_dir);
- remove_proc_entry("state", device_dir);
- remove_proc_entry("brightness", device_dir);
- remove_proc_entry("EDID", device_dir);
- remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-
-/* video bus */
-static int acpi_video_bus_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
-
-
- if (!video)
- goto end;
-
- seq_printf(seq, "Switching heads: %s\n",
- video->flags.multihead ? "yes" : "no");
- seq_printf(seq, "Video ROM: %s\n",
- video->flags.rom ? "yes" : "no");
- seq_printf(seq, "Device to be POSTed on boot: %s\n",
- video->flags.post ? "yes" : "no");
-
- end:
- return 0;
-}
-
-static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_info_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_video_bus_ROM_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
-
-
- if (!video)
- goto end;
-
- printk(KERN_INFO PREFIX "Please implement %s\n", __func__);
- seq_printf(seq, "<TODO>\n");
-
- end:
- return 0;
-}
-
-static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_ROM_seq_show, PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_POST_options(struct acpi_video_bus *video,
- unsigned long long *options)
-{
- int status;
-
- status = acpi_evaluate_integer(video->device->handle, "_VPO",
- NULL, options);
- *options &= 3;
-
- return status;
-}
-
-static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
- unsigned long long options;
- int status;
-
-
- if (!video)
- goto end;
-
- status = acpi_video_bus_POST_options(video, &options);
- if (ACPI_SUCCESS(status)) {
- if (!(options & 1)) {
- printk(KERN_WARNING PREFIX
- "The motherboard VGA device is not listed as a possible POST device.\n");
- printk(KERN_WARNING PREFIX
- "This indicates a BIOS bug. Please contact the manufacturer.\n");
- }
- printk(KERN_WARNING "%llx\n", options);
- seq_printf(seq, "can POST: <integrated video>");
- if (options & 2)
- seq_printf(seq, " <PCI video>");
- if (options & 4)
- seq_printf(seq, " <AGP video>");
- seq_putc(seq, '\n');
- } else
- seq_printf(seq, "<not supported>\n");
- end:
- return 0;
-}
-
-static int
-acpi_video_bus_POST_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_POST_info_seq_show,
- PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
-{
- int status;
-
- status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id);
-
- return status;
-}
-
-static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
- int status;
- unsigned long long id;
-
-
- if (!video)
- goto end;
-
- status = acpi_video_bus_get_POST(video, &id);
- if (!ACPI_SUCCESS(status)) {
- seq_printf(seq, "<not supported>\n");
- goto end;
- }
- seq_printf(seq, "device POSTed is <%s>\n", device_decode[id & 3]);
-
- end:
- return 0;
-}
-
-static int acpi_video_bus_DOS_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_video_bus *video = seq->private;
-
-
- seq_printf(seq, "DOS setting: <%d>\n", video->dos_setting);
-
- return 0;
-}
-
-static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_POST_seq_show,
- PDE(inode)->data);
-}
-
-static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_video_bus_DOS_seq_show, PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
-{
- int status;
- unsigned long long tmp;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
-
-
- arg0.integer.value = option;
-
- status = acpi_evaluate_integer(video->device->handle, "_SPD",
- &args, &tmp);
- if (ACPI_SUCCESS(status))
- status = tmp ? (-EINVAL) : (AE_OK);
-
- return status;
-}
-
-static ssize_t
-acpi_video_bus_write_POST(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int status;
- struct seq_file *m = file->private_data;
- struct acpi_video_bus *video = m->private;
- char str[12] = { 0 };
- unsigned long long opt, options;
-
-
- if (!video || count >= sizeof(str))
- return -EINVAL;
-
- status = acpi_video_bus_POST_options(video, &options);
- if (!ACPI_SUCCESS(status))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- opt = strtoul(str, NULL, 0);
- if (opt > 3)
- return -EFAULT;
-
- /* just in case an OEM 'forgot' the motherboard... */
- options |= 1;
-
- if (options & (1ul << opt)) {
- status = acpi_video_bus_set_POST(video, opt);
- if (!ACPI_SUCCESS(status))
- return -EFAULT;
-
- }
-
- return count;
-}
-
-static ssize_t
-acpi_video_bus_write_DOS(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * data)
-{
- int status;
- struct seq_file *m = file->private_data;
- struct acpi_video_bus *video = m->private;
- char str[12] = { 0 };
- unsigned long opt;
-
-
- if (!video || count >= sizeof(str))
- return -EINVAL;
-
- if (copy_from_user(str, buffer, count))
- return -EFAULT;
-
- str[count] = 0;
- opt = strtoul(str, NULL, 0);
- if (opt > 7)
- return -EFAULT;
-
- status = acpi_video_bus_DOS(video, opt & 0x3, (opt & 0x4) >> 2);
-
- if (!ACPI_SUCCESS(status))
- return -EFAULT;
-
- return count;
-}
-
-static int acpi_video_bus_add_fs(struct acpi_device *device)
-{
- struct acpi_video_bus *video = acpi_driver_data(device);
- struct proc_dir_entry *device_dir;
- struct proc_dir_entry *entry;
-
- device_dir = proc_mkdir(acpi_device_bid(device), acpi_video_dir);
- if (!device_dir)
- return -ENOMEM;
-
- /* 'info' [R] */
- entry = proc_create_data("info", S_IRUGO, device_dir,
- &acpi_video_bus_info_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_dir;
-
- /* 'ROM' [R] */
- entry = proc_create_data("ROM", S_IRUGO, device_dir,
- &acpi_video_bus_ROM_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_info;
-
- /* 'POST_info' [R] */
- entry = proc_create_data("POST_info", S_IRUGO, device_dir,
- &acpi_video_bus_POST_info_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_rom;
-
- /* 'POST' [R/W] */
- entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_bus_POST_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_post_info;
-
- /* 'DOS' [R/W] */
- entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IWUSR,
- device_dir,
- &acpi_video_bus_DOS_fops,
- acpi_driver_data(device));
- if (!entry)
- goto err_remove_post;
-
- video->dir = acpi_device_dir(device) = device_dir;
- return 0;
-
- err_remove_post:
- remove_proc_entry("POST", device_dir);
- err_remove_post_info:
- remove_proc_entry("POST_info", device_dir);
- err_remove_rom:
- remove_proc_entry("ROM", device_dir);
- err_remove_info:
- remove_proc_entry("info", device_dir);
- err_remove_dir:
- remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
- return -ENOMEM;
-}
-
-static int acpi_video_bus_remove_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *device_dir = acpi_device_dir(device);
-
- if (device_dir) {
- remove_proc_entry("info", device_dir);
- remove_proc_entry("ROM", device_dir);
- remove_proc_entry("POST_info", device_dir);
- remove_proc_entry("POST", device_dir);
- remove_proc_entry("DOS", device_dir);
- remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#else
-static inline int acpi_video_device_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_video_device_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_video_bus_add_fs(struct acpi_device *device)
-{
- return 0;
-}
-static inline int acpi_video_bus_remove_fs(struct acpi_device *device)
-{
- return 0;
-}
-#endif /* CONFIG_ACPI_PROCFS */
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1877,8 +1130,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
- acpi_video_device_add_fs(device);
-
return 0;
}
@@ -2181,8 +1432,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
if (!device || !device->video)
return -ENOENT;
- acpi_video_device_remove_fs(device->dev);
-
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
@@ -2466,10 +1715,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_free_video;
- error = acpi_video_bus_add_fs(device);
- if (error)
- goto err_free_video;
-
mutex_init(&video->device_list_lock);
INIT_LIST_HEAD(&video->video_device_list);
@@ -2522,7 +1767,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_video_bus_stop_devices(video);
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
- acpi_video_bus_remove_fs(device);
err_free_video:
kfree(video);
device->driver_data = NULL;
@@ -2544,7 +1788,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
acpi_video_bus_stop_devices(video);
acpi_video_bus_put_devices(video);
- acpi_video_bus_remove_fs(device);
input_unregister_device(video->input);
kfree(video->attached_array);
@@ -2584,17 +1827,9 @@ int acpi_video_register(void)
return 0;
}
-#ifdef CONFIG_ACPI_PROCFS
- acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
- if (!acpi_video_dir)
- return -ENODEV;
-#endif
-
result = acpi_bus_register_driver(&acpi_video_bus);
- if (result < 0) {
- remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+ if (result < 0)
return -ENODEV;
- }
/*
* When the acpi_video_bus is loaded successfully, increase
@@ -2617,10 +1852,6 @@ void acpi_video_unregister(void)
}
acpi_bus_unregister_driver(&acpi_video_bus);
-#ifdef CONFIG_ACPI_PROCFS
- remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
-#endif
-
register_count = 0;
return;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 80f9f3659e4d..97c5898cd76e 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1736,9 +1736,10 @@ static int __devinit eni_do_init(struct atm_dev *dev)
eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
if (readl(&eprom->magic) != ENI155_MAGIC) {
printk("\n");
- printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
- "magic - expected 0x%x, got 0x%x\n",dev->number,
- ENI155_MAGIC,(unsigned) readl(&eprom->magic));
+ printk(KERN_ERR DEV_LABEL
+ "(itf %d): bad magic - expected 0x%x, got 0x%x\n",
+ dev->number, ENI155_MAGIC,
+ (unsigned)readl(&eprom->magic));
error = -EINVAL;
goto unmap;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ee53558b452f..ce012a9c6201 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -160,6 +160,18 @@ static ssize_t node_read_numastat(struct sys_device * dev,
}
static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static ssize_t node_read_vmstat(struct sys_device *dev,
+ struct sysdev_attribute *attr, char *buf)
+{
+ int nid = dev->id;
+ return sprintf(buf,
+ "nr_written %lu\n"
+ "nr_dirtied %lu\n",
+ node_page_state(nid, NR_WRITTEN),
+ node_page_state(nid, NR_DIRTIED));
+}
+static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+
static ssize_t node_read_distance(struct sys_device * dev,
struct sysdev_attribute *attr, char * buf)
{
@@ -243,6 +255,7 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_meminfo);
sysdev_create_file(&node->sysdev, &attr_numastat);
sysdev_create_file(&node->sysdev, &attr_distance);
+ sysdev_create_file(&node->sysdev, &attr_vmstat);
scan_unevictable_register_node(node);
@@ -267,6 +280,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_meminfo);
sysdev_remove_file(&node->sysdev, &attr_numastat);
sysdev_remove_file(&node->sysdev, &attr_distance);
+ sysdev_remove_file(&node->sysdev, &attr_vmstat);
scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1dd8676d7f55..126ca492dd08 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -503,7 +503,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
* the resume will actually succeed.
*/
if (dev->power.no_callbacks && !parent && dev->parent) {
- spin_lock(&dev->parent->power.lock);
+ spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
if (dev->parent->power.disable_depth > 0
|| dev->parent->power.ignore_children
|| dev->parent->power.runtime_status == RPM_ACTIVE) {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index f21c237a9e5e..541e18879965 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -4,12 +4,14 @@
* block device routines
*/
+#include <linux/kernel.h>
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include <linux/genhd.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
@@ -207,7 +209,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
spin_lock_irqsave(&d->lock, flags);
if ((d->flags & DEVFL_UP) == 0) {
- printk(KERN_INFO "aoe: device %ld.%d is not up\n",
+ pr_info_ratelimited("aoe: device %ld.%d is not up\n",
d->aoemajor, d->aoeminor);
spin_unlock_irqrestore(&d->lock, flags);
mempool_free(buf, d->bufpool);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 0849280bfc1c..6b5110a47458 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -102,6 +102,7 @@ aoedev_freedev(struct aoedev *d)
{
struct aoetgt **t, **e;
+ cancel_work_sync(&d->work);
if (d->gd) {
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
@@ -135,7 +136,6 @@ aoedev_flush(const char __user *str, size_t cnt)
all = !strncmp(buf, "all", 3);
}
- flush_scheduled_work();
spin_lock_irqsave(&devlist_lock, flags);
dd = &devlist;
while ((d = *dd)) {
@@ -257,8 +257,6 @@ aoedev_exit(void)
struct aoedev *d;
ulong flags;
- flush_scheduled_work();
-
while ((d = devlist)) {
devlist = d->next;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index f09e6df15aa7..2cc4dda46279 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -66,11 +66,6 @@ MODULE_VERSION("3.6.26");
MODULE_LICENSE("GPL");
static DEFINE_MUTEX(cciss_mutex);
-static int cciss_allow_hpsa;
-module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_allow_hpsa,
- "Prevent cciss driver from accessing hardware known to be "
- " supported by the hpsa driver");
#include "cciss_cmd.h"
#include "cciss.h"
@@ -98,19 +93,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{0,}
};
@@ -138,24 +120,9 @@ static struct board_type products[] = {
{0x3214103C, "Smart Array E200i", &SA5_access},
{0x3215103C, "Smart Array E200i", &SA5_access},
{0x3237103C, "Smart Array E500", &SA5_access},
-/* controllers below this line are also supported by the hpsa driver. */
-#define HPSA_BOUNDARY 0x3223103C
{0x3223103C, "Smart Array P800", &SA5_access},
{0x3234103C, "Smart Array P400", &SA5_access},
{0x323D103C, "Smart Array P700m", &SA5_access},
- {0x3241103C, "Smart Array P212", &SA5_access},
- {0x3243103C, "Smart Array P410", &SA5_access},
- {0x3245103C, "Smart Array P410i", &SA5_access},
- {0x3247103C, "Smart Array P411", &SA5_access},
- {0x3249103C, "Smart Array P812", &SA5_access},
- {0x324A103C, "Smart Array P712m", &SA5_access},
- {0x324B103C, "Smart Array P711m", &SA5_access},
- {0x3350103C, "Smart Array", &SA5_access},
- {0x3351103C, "Smart Array", &SA5_access},
- {0x3352103C, "Smart Array", &SA5_access},
- {0x3353103C, "Smart Array", &SA5_access},
- {0x3354103C, "Smart Array", &SA5_access},
- {0x3355103C, "Smart Array", &SA5_access},
};
/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -1184,6 +1151,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |=
copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
@@ -3970,9 +3938,6 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
subsystem_vendor_id;
for (i = 0; i < ARRAY_SIZE(products); i++) {
- /* Stand aside for hpsa driver on request */
- if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
- return -ENODEV;
if (*board_id == products[i].board_id)
return i;
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index c5dfe6486cf3..25c7a73c5062 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2982,7 +2982,7 @@ static int drbd_create_mempools(void)
drbd_ee_mempool = mempool_create(number,
mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
- if (drbd_request_mempool == NULL)
+ if (drbd_ee_mempool == NULL)
goto Enomem;
/* drbd's page pool */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 6c48b3545f84..1e5284ef65fa 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd,
else
memcpy(raw_buf, loop_buf, size);
- kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1);
+ kunmap_atomic(raw_buf, KM_USER0);
cond_resched();
return 0;
}
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
for (i = 0; i < size; i++)
*out++ = *in++ ^ key[(i & 511) % keysize];
- kunmap_atomic(raw_buf, KM_USER0);
kunmap_atomic(loop_buf, KM_USER1);
+ kunmap_atomic(raw_buf, KM_USER0);
cond_resched();
return 0;
}
@@ -1049,9 +1049,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
if (bdev)
invalidate_bdev(bdev);
set_capacity(lo->lo_disk, 0);
+ loop_sysfs_exit(lo);
if (bdev) {
bd_set_size(bdev, 0);
- loop_sysfs_exit(lo);
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index dcd4cfcf4126..a22e3f895947 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -80,8 +80,10 @@ static void do_z2_request(struct request_queue *q)
int err = 0;
if (start + len > z2ram_size) {
- printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
- blk_rq_pos(req), blk_rq_cur_sectors(req));
+ pr_err(DEVICE_NAME ": bad access: block=%llu, "
+ "count=%u\n",
+ (unsigned long long)blk_rq_pos(req),
+ blk_rq_cur_sectors(req));
err = -EIO;
goto done;
}
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 627f542827c7..8eb56e273e75 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
+obj-$(CONFIG_AGP_INTEL) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 120490949997..5259065f3c79 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -121,11 +121,6 @@ struct agp_bridge_driver {
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
void (*chipset_flush)(struct agp_bridge_data *);
-
- int (*agp_map_page)(struct page *page, dma_addr_t *ret);
- void (*agp_unmap_page)(struct page *page, dma_addr_t dma);
- int (*agp_map_memory)(struct agp_memory *mem);
- void (*agp_unmap_memory)(struct agp_memory *mem);
};
struct agp_bridge_data {
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b6b1568314c8..b1b4362bc648 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -309,7 +309,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
- if (type != 0 || mem->type != 0)
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
return -EINVAL;
if ((pg_start + mem->page_count) > num_entries)
@@ -348,7 +349,8 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
unsigned long __iomem *cur_gatt;
unsigned long addr;
- if (type != 0 || mem->type != 0)
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
return -EINVAL;
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index ee4f855611b6..f27d0d0816d3 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -151,17 +151,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
}
bridge->scratch_page_page = page;
- if (bridge->driver->agp_map_page) {
- if (bridge->driver->agp_map_page(page,
- &bridge->scratch_page_dma)) {
- dev_err(&bridge->dev->dev,
- "unable to dma-map scratch page\n");
- rc = -ENOMEM;
- goto err_out_nounmap;
- }
- } else {
- bridge->scratch_page_dma = page_to_phys(page);
- }
+ bridge->scratch_page_dma = page_to_phys(page);
bridge->scratch_page = bridge->driver->mask_memory(bridge,
bridge->scratch_page_dma, 0);
@@ -204,12 +194,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
return 0;
err_out:
- if (bridge->driver->needs_scratch_page &&
- bridge->driver->agp_unmap_page) {
- bridge->driver->agp_unmap_page(bridge->scratch_page_page,
- bridge->scratch_page_dma);
- }
-err_out_nounmap:
if (bridge->driver->needs_scratch_page) {
void *va = page_address(bridge->scratch_page_page);
@@ -240,10 +224,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
bridge->driver->needs_scratch_page) {
void *va = page_address(bridge->scratch_page_page);
- if (bridge->driver->agp_unmap_page)
- bridge->driver->agp_unmap_page(bridge->scratch_page_page,
- bridge->scratch_page_dma);
-
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 64255cef8a7d..4956f1c8f9d5 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -437,11 +437,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
curr->is_flushed = true;
}
- if (curr->bridge->driver->agp_map_memory) {
- ret_val = curr->bridge->driver->agp_map_memory(curr);
- if (ret_val)
- return ret_val;
- }
ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
if (ret_val != 0)
@@ -483,9 +478,6 @@ int agp_unbind_memory(struct agp_memory *curr)
if (ret_val != 0)
return ret_val;
- if (curr->bridge->driver->agp_unmap_memory)
- curr->bridge->driver->agp_unmap_memory(curr);
-
curr->is_bound = false;
curr->pg_start = 0;
spin_lock(&curr->bridge->mapped_lock);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index cd18493c9527..e72f49d52202 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,9 +12,6 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
-#include <linux/intel-gtt.h>
-
-#include "intel-gtt.c"
int intel_agp_enabled;
EXPORT_SYMBOL(intel_agp_enabled);
@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int find_gmch(u16 device)
-{
- struct pci_dev *gmch_device;
-
- gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
- if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
- gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
- device, gmch_device);
- }
-
- if (!gmch_device)
- return 0;
-
- intel_private.pcidev = gmch_device;
- return 1;
-}
-
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
* which one should be used if a gmch_chip_id is present.
*/
-static const struct intel_driver_description {
+static const struct intel_agp_driver_description {
unsigned int chip_id;
- unsigned int gmch_chip_id;
char *name;
const struct agp_bridge_driver *driver;
- const struct agp_bridge_driver *gmch_driver;
} intel_agp_chipsets[] = {
- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
- NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
- NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
- NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
- &intel_815_driver, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
- &intel_830mp_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
- &intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
- NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
- NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
- NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
- "GM45", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
- "Eaglelake", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
- "Q45/Q43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
- "G45/G43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
- "B43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
- "B43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
- "G41", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
- "HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
- { 0, 0, NULL, NULL, NULL }
+ { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
+ { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
+ { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
+ { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
+ { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
+ { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
+ { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
+ { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
+ { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
+ { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
+ { 0, NULL, NULL }
};
-static int __devinit intel_gmch_probe(struct pci_dev *pdev,
- struct agp_bridge_data *bridge)
-{
- int i, mask;
-
- bridge->driver = NULL;
-
- for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
- if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
- bridge->driver =
- intel_agp_chipsets[i].gmch_driver;
- break;
- }
- }
-
- if (!bridge->driver)
- return 0;
-
- bridge->dev_private_data = &intel_private;
- bridge->dev = pdev;
-
- dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
-
- if (bridge->driver->mask_memory == intel_gen6_mask_memory)
- mask = 40;
- else if (bridge->driver->mask_memory == intel_i965_mask_memory)
- mask = 36;
- else
- mask = 32;
-
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask %d-bit failed!\n", mask);
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(mask));
-
- return 1;
-}
-
static int __devinit agp_intel_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
}
}
- if (intel_agp_chipsets[i].name == NULL) {
+ if (!bridge->driver) {
if (cap_ptr)
dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
pdev->vendor, pdev->device);
@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
return -ENODEV;
}
- if (!bridge->driver) {
- if (cap_ptr)
- dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
- intel_agp_chipsets[i].gmch_chip_id);
- agp_put_bridge(bridge);
- return -ENODEV;
- }
-
bridge->dev = pdev;
bridge->dev_private_data = NULL;
@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
agp_remove_bridge(bridge);
- if (intel_private.pcidev)
- pci_dev_put(intel_private.pcidev);
+ intel_gmch_remove(pdev);
agp_put_bridge(bridge);
}
@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_B43_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index d09b1ab7e8ab..90539df02504 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -215,44 +215,7 @@
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
- IS_SNB)
-
+int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge);
+void intel_gmch_remove(struct pci_dev *pdev);
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 75e0a3497888..6b6760ea2435 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -15,6 +15,18 @@
* /fairy-tale-mode off
*/
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <linux/intel-gtt.h>
+#include <drm/intel-gtt.h>
+
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -23,11 +35,12 @@
*/
#ifdef CONFIG_DMAR
#define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
#endif
/* Max amount of stolen space, anything above will be returned to Linux */
int intel_max_stolen = 32 * 1024 * 1024;
-EXPORT_SYMBOL(intel_max_stolen);
static const struct aper_size_info_fixed intel_i810_sizes[] =
{
@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
-static struct gatt_mask intel_gen6_masks[] =
-{
- {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
- .type = INTEL_AGP_UNCACHED_MEMORY },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+struct intel_gtt_driver {
+ unsigned int gen : 8;
+ unsigned int is_g33 : 1;
+ unsigned int is_pineview : 1;
+ unsigned int is_ironlake : 1;
+ unsigned int dma_mask_size : 8;
+ /* Chipset specific GTT setup */
+ int (*setup)(void);
+ /* This should undo anything done in ->setup() save the unmapping
+ * of the mmio register file, that's done in the generic code. */
+ void (*cleanup)(void);
+ void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ /* Flags is a more or less chipset specific opaque value.
+ * For chipsets that need to support old ums (non-gem) code, this
+ * needs to be identical to the various supported agp memory types! */
+ bool (*check_flags)(unsigned int flags);
+ void (*chipset_flush)(void);
};
static struct _intel_private {
+ struct intel_gtt base;
+ const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
+ struct pci_dev *bridge_dev;
u8 __iomem *registers;
+ phys_addr_t gtt_bus_addr;
+ phys_addr_t gma_bus_addr;
+ phys_addr_t pte_bus_addr;
u32 __iomem *gtt; /* I915G */
int num_dcache_entries;
- /* gtt_entries is the number of gtt entries that are already mapped
- * to stolen memory. Stolen memory is larger than the memory mapped
- * through gtt_entries, as it includes some reserved space for the BIOS
- * popup and for the GTT.
- */
- int gtt_entries; /* i830+ */
- int gtt_total_size;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
@@ -88,23 +105,14 @@ static struct _intel_private {
struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
+ struct page *scratch_page;
+ dma_addr_t scratch_page_dma;
} intel_private;
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
- *ret = pci_map_page(intel_private.pcidev, page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, *ret))
- return -EINVAL;
- return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
- pci_unmap_page(intel_private.pcidev, dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
+#define INTEL_GTT_GEN intel_private.driver->gen
+#define IS_G33 intel_private.driver->is_g33
+#define IS_PINEVIEW intel_private.driver->is_pineview
+#define IS_IRONLAKE intel_private.driver->is_ironlake
static void intel_agp_free_sglist(struct agp_memory *mem)
{
@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
struct scatterlist *sg;
int i;
+ if (mem->sg_list)
+ return 0; /* already mapped (for e.g. resume */
+
DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
intel_agp_free_sglist(mem);
}
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- struct scatterlist *sg;
- int i, j;
-
- j = pg_start;
-
- WARN_ON(!mem->num_sg);
-
- if (mem->num_sg == mem->page_count) {
- for_each_sg(mem->sg_list, sg, mem->page_count, i) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg), mask_type),
- intel_private.gtt+j);
- j++;
- }
- } else {
- /* sg may merge pages, but we have to separate
- * per-page addr for GTT */
- unsigned int len, m;
-
- for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
- len = sg_dma_len(sg) / PAGE_SIZE;
- for (m = 0; m < len; m++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg) + m * PAGE_SIZE,
- mask_type),
- intel_private.gtt+j);
- j++;
- }
- }
- }
- readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- int i, j;
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
-}
-
-#endif
-
static int intel_i810_fetch_size(void)
{
u32 smram_miscc;
struct aper_size_info_fixed *values;
- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ pci_read_config_dword(intel_private.bridge_dev,
+ I810_SMRAM_MISCC, &smram_miscc);
values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+ dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
return 0;
}
if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
iounmap(intel_private.registers);
}
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
return;
}
@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
atomic_dec(&agp_bridge->current_memory_agp);
}
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- if (type < AGP_USER_TYPES)
- return type;
- else if (type == AGP_USER_CACHED_MEMORY)
- return INTEL_AGP_CACHED_MEMORY;
- else
- return 0;
-}
-
-static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
-
- if (type_mask == AGP_USER_UNCACHED_MEMORY)
- return INTEL_AGP_UNCACHED_MEMORY;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC_MLC;
- else /* set 'normal'/'cached' to LLC by default */
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC;
-}
-
-
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
return addr | bridge->driver->masks[type].mask;
}
-static struct aper_size_info_fixed intel_i830_sizes[] =
+static int intel_gtt_setup_scratch_page(void)
{
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+ return -EINVAL;
+
+ intel_private.scratch_page_dma = dma_addr;
+ } else
+ intel_private.scratch_page_dma = page_to_phys(page);
+
+ intel_private.scratch_page = page;
+
+ return 0;
+}
+
+static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
{128, 32768, 5},
/* The 64M mode still requires a 128k gatt */
{64, 16384, 5},
@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
{512, 131072, 7},
};
-static void intel_i830_init_gtt_entries(void)
+static unsigned int intel_gtt_stolen_entries(void)
{
u16 gmch_ctrl;
- int gtt_entries = 0;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- int size; /* reserved space (in kb) at the top of stolen memory */
+ unsigned int overhead_entries, stolen_entries;
+ unsigned int stolen_size = 0;
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
- if (IS_I965) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
+ overhead_entries = 0;
+ else
+ overhead_entries = intel_private.base.gtt_mappable_entries
+ / 1024;
- /* The 965 has a field telling us the size of the GTT,
- * which may be larger than what is necessary to map the
- * aperture.
- */
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = 128;
- break;
- case I965_PGETBL_SIZE_256KB:
- size = 256;
- break;
- case I965_PGETBL_SIZE_512KB:
- size = 512;
- break;
- case I965_PGETBL_SIZE_1MB:
- size = 1024;
- break;
- case I965_PGETBL_SIZE_2MB:
- size = 2048;
- break;
- case I965_PGETBL_SIZE_1_5MB:
- size = 1024 + 512;
- break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = 512;
- }
- size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_PINEVIEW) {
- /* G33's GTT size defined in gmch_ctrl */
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
- size = 1024;
- break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
- size = 512;
- }
- size += 4;
- } else if (IS_G4X || IS_PINEVIEW) {
- /* On 4 series hardware, GTT stolen is separate from graphics
- * stolen, ignore it in stolen gtt entries counting. However,
- * 4KB of the stolen memory doesn't get mapped to the GTT.
- */
- size = 4;
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size() + 4;
- }
+ overhead_entries += 1; /* BIOS popup */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
+ stolen_size = KB(512);
break;
case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
+ stolen_size = MB(1);
break;
case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
+ stolen_size = MB(8);
break;
case I830_GMCH_GMS_LOCAL:
rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
- gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ stolen_size = (I830_RDRAM_ND(rdct) + 1) *
MB(ddt[I830_RDRAM_DDT(rdct)]);
local = 1;
break;
default:
- gtt_entries = 0;
+ stolen_size = 0;
break;
}
- } else if (IS_SNB) {
+ } else if (INTEL_GTT_GEN == 6) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
@@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void)
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
case SNB_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
+ stolen_size = MB(32);
break;
case SNB_GMCH_GMS_STOLEN_64M:
- gtt_entries = MB(64) - KB(size);
+ stolen_size = MB(64);
break;
case SNB_GMCH_GMS_STOLEN_96M:
- gtt_entries = MB(96) - KB(size);
+ stolen_size = MB(96);
break;
case SNB_GMCH_GMS_STOLEN_128M:
- gtt_entries = MB(128) - KB(size);
+ stolen_size = MB(128);
break;
case SNB_GMCH_GMS_STOLEN_160M:
- gtt_entries = MB(160) - KB(size);
+ stolen_size = MB(160);
break;
case SNB_GMCH_GMS_STOLEN_192M:
- gtt_entries = MB(192) - KB(size);
+ stolen_size = MB(192);
break;
case SNB_GMCH_GMS_STOLEN_224M:
- gtt_entries = MB(224) - KB(size);
+ stolen_size = MB(224);
break;
case SNB_GMCH_GMS_STOLEN_256M:
- gtt_entries = MB(256) - KB(size);
+ stolen_size = MB(256);
break;
case SNB_GMCH_GMS_STOLEN_288M:
- gtt_entries = MB(288) - KB(size);
+ stolen_size = MB(288);
break;
case SNB_GMCH_GMS_STOLEN_320M:
- gtt_entries = MB(320) - KB(size);
+ stolen_size = MB(320);
break;
case SNB_GMCH_GMS_STOLEN_352M:
- gtt_entries = MB(352) - KB(size);
+ stolen_size = MB(352);
break;
case SNB_GMCH_GMS_STOLEN_384M:
- gtt_entries = MB(384) - KB(size);
+ stolen_size = MB(384);
break;
case SNB_GMCH_GMS_STOLEN_416M:
- gtt_entries = MB(416) - KB(size);
+ stolen_size = MB(416);
break;
case SNB_GMCH_GMS_STOLEN_448M:
- gtt_entries = MB(448) - KB(size);
+ stolen_size = MB(448);
break;
case SNB_GMCH_GMS_STOLEN_480M:
- gtt_entries = MB(480) - KB(size);
+ stolen_size = MB(480);
break;
case SNB_GMCH_GMS_STOLEN_512M:
- gtt_entries = MB(512) - KB(size);
+ stolen_size = MB(512);
break;
}
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
+ stolen_size = MB(1);
break;
case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
+ stolen_size = MB(4);
break;
case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
+ stolen_size = MB(8);
break;
case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
+ stolen_size = MB(16);
break;
case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
+ stolen_size = MB(32);
break;
case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(48);
break;
case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(64);
break;
case G33_GMCH_GMS_STOLEN_128M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(128) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(128);
break;
case G33_GMCH_GMS_STOLEN_256M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(256) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(256);
break;
case INTEL_GMCH_GMS_STOLEN_96M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(96) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(96);
break;
case INTEL_GMCH_GMS_STOLEN_160M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(160) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(160);
break;
case INTEL_GMCH_GMS_STOLEN_224M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(224) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(224);
break;
case INTEL_GMCH_GMS_STOLEN_352M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(352) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(352);
break;
default:
- gtt_entries = 0;
+ stolen_size = 0;
break;
}
}
- if (!local && gtt_entries > intel_max_stolen) {
- dev_info(&agp_bridge->dev->dev,
+
+ if (!local && stolen_size > intel_max_stolen) {
+ dev_info(&intel_private.bridge_dev->dev,
"detected %dK stolen memory, trimming to %dK\n",
- gtt_entries / KB(1), intel_max_stolen / KB(1));
- gtt_entries = intel_max_stolen / KB(4);
- } else if (gtt_entries > 0) {
- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
- gtt_entries / KB(1), local ? "local" : "stolen");
- gtt_entries /= KB(4);
+ stolen_size / KB(1), intel_max_stolen / KB(1));
+ stolen_size = intel_max_stolen;
+ } else if (stolen_size > 0) {
+ dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
+ stolen_size / KB(1), local ? "local" : "stolen");
} else {
- dev_info(&agp_bridge->dev->dev,
+ dev_info(&intel_private.bridge_dev->dev,
"no pre-allocated video memory detected\n");
- gtt_entries = 0;
+ stolen_size = 0;
+ }
+
+ stolen_entries = stolen_size/KB(4) - overhead_entries;
+
+ return stolen_entries;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+ int size;
+
+ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
+ u32 pgetbl_ctl;
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = KB(512);
+ }
+
+ return size/4;
+ } else if (INTEL_GTT_GEN == 6) {
+ u16 snb_gmch_ctl;
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ size = MB(2);
+ break;
+ }
+ return size/4;
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ return intel_private.base.gtt_mappable_entries;
+ }
+}
+
+static unsigned int intel_gtt_mappable_entries(void)
+{
+ unsigned int aperture_size;
+
+ if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
+ aperture_size = MB(64);
+ else
+ aperture_size = MB(128);
+ } else {
+ /* 9xx supports large sizes, just look at the length */
+ aperture_size = pci_resource_len(intel_private.pcidev, 2);
}
- intel_private.gtt_entries = gtt_entries;
+ return aperture_size >> PAGE_SHIFT;
}
-static void intel_i830_fini_flush(void)
+static void intel_gtt_teardown_scratch_page(void)
+{
+ set_pages_wb(intel_private.scratch_page, 1);
+ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(intel_private.scratch_page);
+ __free_page(intel_private.scratch_page);
+}
+
+static void intel_gtt_cleanup(void)
+{
+ intel_private.driver->cleanup();
+
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+
+ intel_gtt_teardown_scratch_page();
+}
+
+static int intel_gtt_init(void)
+{
+ u32 gtt_map_size;
+ int ret;
+
+ ret = intel_private.driver->setup();
+ if (ret != 0)
+ return ret;
+
+ intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+
+ dev_info(&intel_private.bridge_dev->dev,
+ "detected gtt size: %dK total, %dK mappable\n",
+ intel_private.base.gtt_total_entries * 4,
+ intel_private.base.gtt_mappable_entries * 4);
+
+ gtt_map_size = intel_private.base.gtt_total_entries * 4;
+
+ intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (!intel_private.gtt) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
+
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
+ if (intel_private.base.gtt_stolen_entries == 0) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ ret = intel_gtt_setup_scratch_page();
+ if (ret != 0) {
+ intel_gtt_cleanup();
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_fake_agp_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
+ unsigned int aper_size;
+ int i;
+
+ aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
+ / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_fake_agp_sizes[i].size) {
+ agp_bridge->current_size =
+ (void *) (intel_fake_agp_sizes + i);
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+
+static void i830_cleanup(void)
{
kunmap(intel_private.i8xx_page);
intel_private.i8xx_flush_page = NULL;
- unmap_page_from_agp(intel_private.i8xx_page);
__free_page(intel_private.i8xx_page);
intel_private.i8xx_page = NULL;
@@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void)
if (intel_private.i8xx_page)
return;
- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL);
if (!intel_private.i8xx_page)
return;
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
- intel_i830_fini_flush();
+ i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void)
* that buffer out, we just fill 1KB and clflush it out, on the assumption
* that it'll push whatever was in there out. It appears to work.
*/
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+static void i830_chipset_flush(void)
{
unsigned int *pg = intel_private.i8xx_flush_page;
@@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
}
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
+ u32 pte_flags = I810_PTE_VALID;
+
+ switch (flags) {
+ case AGP_DCACHE_MEMORY:
+ pte_flags |= I810_PTE_LOCAL;
+ break;
+ case AGP_USER_CACHED_MEMORY:
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+ break;
+ }
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
+static void intel_enable_gtt(void)
+{
+ u32 gma_addr;
+ u16 gmch_ctrl;
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers)
- return -ENOMEM;
+ if (INTEL_GTT_GEN == 2)
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
+ &gma_addr);
+ else
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
+ &gma_addr);
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ?? */
+ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
- if (intel_private.gtt_entries == 0) {
- iounmap(intel_private.registers);
+ pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
+ intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+}
+
+static int i830_setup(void)
+{
+ u32 reg_addr;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+ reg_addr &= 0xfff80000;
+
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
return -ENOMEM;
- }
- agp_bridge->gatt_table = NULL;
+ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_private.pte_bus_addr =
+ readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- agp_bridge->gatt_bus_addr = temp;
+ intel_i830_setup_flush();
return 0;
}
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
return 0;
}
-static int intel_i830_fetch_size(void)
+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
{
- u16 gmch_ctrl;
- struct aper_size_info_fixed *values;
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
- /* 855GM/852GM/865G has 128MB aperture size */
- agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
- agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- } else {
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- }
-
return 0;
}
-static int intel_i830_configure(void)
+static int intel_fake_agp_configure(void)
{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
int i;
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+ intel_enable_gtt();
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+ for (i = intel_private.base.gtt_stolen_entries;
+ i < intel_private.base.gtt_total_entries; i++) {
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
}
+ readl(intel_private.gtt+i-1); /* PCI Posting. */
global_cache_flush();
- intel_i830_setup_flush();
return 0;
}
-static void intel_i830_cleanup(void)
+static bool i830_check_flags(unsigned int flags)
{
- iounmap(intel_private.registers);
+ switch (flags) {
+ case 0:
+ case AGP_PHYS_MEMORY:
+ case AGP_USER_CACHED_MEMORY:
+ case AGP_USER_MEMORY:
+ return true;
+ }
+
+ return false;
}
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags)
{
- int i, j, num_entries;
- void *temp;
+ struct scatterlist *sg;
+ unsigned int len, m;
+ int i, j;
+
+ j = pg_start;
+
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ for_each_sg(sg_list, sg, sg_len, i) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ j++;
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, j;
int ret = -EINVAL;
- int mask_type;
if (mem->page_count == 0)
goto out;
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
+ if (pg_start < intel_private.base.gtt_stolen_entries) {
dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
+ "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
+ pg_start, intel_private.base.gtt_stolen_entries);
dev_info(&intel_private.pcidev->dev,
"trying to insert into local/stolen memory\n");
goto out_err;
}
- if ((pg_start + mem->page_count) > num_entries)
+ if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
goto out_err;
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
if (type != mem->type)
goto out_err;
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
+ if (!intel_private.driver->check_flags(type))
goto out_err;
if (!mem->is_flushed)
global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ ret = intel_agp_map_memory(mem);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
+ pg_start, type);
+ } else {
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ dma_addr_t addr = page_to_phys(mem->pages[i]);
+ intel_private.driver->write_entry(addr,
+ j, type);
+ }
+ readl(intel_private.gtt+j-1);
}
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
out:
ret = 0;
@@ -982,29 +1042,39 @@ out_err:
return ret;
}
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
{
int i;
if (mem->page_count == 0)
return 0;
- if (pg_start < intel_private.gtt_entries) {
+ if (pg_start < intel_private.base.gtt_stolen_entries) {
dev_info(&intel_private.pcidev->dev,
"trying to disable local/stolen memory\n");
return -EINVAL;
}
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
+ intel_agp_unmap_memory(mem);
+
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
}
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ readl(intel_private.gtt+i-1);
return 0;
}
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
+{
+ intel_private.driver->chipset_flush();
+}
+
+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
+ int type)
{
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
@@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
static int intel_alloc_chipset_flush_resource(void)
{
int ret;
- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
- pcibios_align_resource, agp_bridge->dev);
+ pcibios_align_resource, intel_private.bridge_dev);
return ret;
}
@@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void)
int ret;
u32 temp;
- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
if (!(temp & 0x1)) {
intel_alloc_chipset_flush_resource();
intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
} else {
temp &= ~1;
@@ -1050,17 +1120,17 @@ static void intel_i965_g33_setup_chipset_flush(void)
u32 temp_hi, temp_lo;
int ret;
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
if (!(temp_lo & 0x1)) {
intel_alloc_chipset_flush_resource();
intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
upper_32_bits(intel_private.ifp_resource.start));
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
} else {
u64 l64;
@@ -1083,7 +1153,7 @@ static void intel_i9xx_setup_flush(void)
if (intel_private.ifp_resource.start)
return;
- if (IS_SNB)
+ if (INTEL_GTT_GEN == 6)
return;
/* setup a resource for this object */
@@ -1091,7 +1161,7 @@ static void intel_i9xx_setup_flush(void)
intel_private.ifp_resource.flags = IORESOURCE_MEM;
/* Setup chipset flush for 915 */
- if (IS_I965 || IS_G33 || IS_G4X) {
+ if (IS_G33 || INTEL_GTT_GEN >= 4) {
intel_i965_g33_setup_chipset_flush();
} else {
intel_i915_setup_chipset_flush();
@@ -1104,41 +1174,7 @@ static void intel_i9xx_setup_flush(void)
"can't ioremap flush page - no chipset flushing\n");
}
-static int intel_i9xx_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
- }
- readl(intel_private.gtt+i-1); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i9xx_setup_flush();
-
- return 0;
-}
-
-static void intel_i915_cleanup(void)
+static void i9xx_cleanup(void)
{
if (intel_private.i9xx_flush_page)
iounmap(intel_private.i9xx_flush_page);
@@ -1146,320 +1182,93 @@ static void intel_i915_cleanup(void)
release_resource(&intel_private.ifp_resource);
intel_private.ifp_resource.start = 0;
intel_private.resource_valid = 0;
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
}
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+static void i9xx_chipset_flush(void)
{
if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static void i965_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
{
- int num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i915 can't check the GTT for entries since it's read only;
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- intel_agp_insert_sg_entries(mem, pg_start, mask_type);
-
- out:
- ret = 0;
- out_err:
- mem->is_flushed = true;
- return ret;
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xf0;
+ writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
}
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static bool gen6_check_flags(unsigned int flags)
{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++)
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
- readl(intel_private.gtt+i-1);
-
- return 0;
+ return true;
}
-/* Return the aperture size by just checking the resource length. The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
+static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
{
- int num_sizes = ARRAY_SIZE(intel_i830_sizes);
- int aper_size; /* size in megabytes */
- int i;
-
- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+ u32 pte_flags;
- for (i = 0; i < num_sizes; i++) {
- if (aper_size == intel_i830_sizes[i].size) {
- agp_bridge->current_size = intel_i830_sizes + i;
- return aper_size;
- }
+ if (type_mask == AGP_USER_UNCACHED_MEMORY)
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ } else { /* set 'normal'/'cached' to LLC by default */
+ pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
}
- return 0;
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
}
-static int intel_i915_get_gtt_size(void)
+static void gen6_cleanup(void)
{
- int size;
-
- if (IS_G33) {
- u16 gmch_ctrl;
-
- /* G33's GTT size defined in gmch_ctrl */
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- size = 512;
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- size = 1024;
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- size = 8*1024;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & I830_GMCH_GMS_MASK));
- size = 512;
- }
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size();
- }
-
- return KB(size);
}
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+static int i9xx_setup(void)
{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp, temp2;
- int gtt_map_size;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+ u32 reg_addr;
- gtt_map_size = intel_i915_get_gtt_size();
-
- intel_private.gtt = ioremap(temp2, gtt_map_size);
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_map_size / 4;
-
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
+ reg_addr &= 0xfff80000;
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
- if (intel_private.gtt_entries == 0) {
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
+ intel_private.registers = ioremap(reg_addr, 128 * 4096);
+ if (!intel_private.registers)
return -ENOMEM;
- }
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Shift high bits down */
- addr |= (addr >> 28) & 0xf0;
-
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
+ if (INTEL_GTT_GEN == 3) {
+ u32 gtt_addr;
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
- u16 snb_gmch_ctl;
-
- switch (agp_bridge->dev->device) {
- case PCI_DEVICE_ID_INTEL_GM45_HB:
- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
- case PCI_DEVICE_ID_INTEL_Q45_HB:
- case PCI_DEVICE_ID_INTEL_G45_HB:
- case PCI_DEVICE_ID_INTEL_G41_HB:
- case PCI_DEVICE_ID_INTEL_B43_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
- *gtt_offset = *gtt_size = MB(2);
- break;
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
- *gtt_offset = MB(2);
+ pci_read_config_dword(intel_private.pcidev,
+ I915_PTEADDR, &gtt_addr);
+ intel_private.gtt_bus_addr = gtt_addr;
+ } else {
+ u32 gtt_offset;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- *gtt_size = MB(0);
+ switch (INTEL_GTT_GEN) {
+ case 5:
+ case 6:
+ gtt_offset = MB(2);
break;
- case SNB_GTT_SIZE_1M:
- *gtt_size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- *gtt_size = MB(2);
+ case 4:
+ default:
+ gtt_offset = KB(512);
break;
}
- break;
- default:
- *gtt_offset = *gtt_size = KB(512);
+ intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
- int gtt_offset, gtt_size;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
- temp &= 0xfff00000;
-
- intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+ intel_private.pte_bus_addr =
+ readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_size / 4;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
- if (intel_private.gtt_entries == 0) {
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
- return -ENOMEM;
- }
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
+ intel_i9xx_setup_flush();
return 0;
}
@@ -1475,7 +1284,7 @@ static const struct agp_bridge_driver intel_810_driver = {
.cleanup = intel_i810_cleanup,
.mask_memory = intel_i810_mask_memory,
.masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
+ .agp_enable = intel_fake_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
@@ -1490,161 +1299,282 @@ static const struct agp_bridge_driver intel_810_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_830_driver = {
+static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i830_configure,
- .fetch_size = intel_i830_fetch_size,
- .cleanup = intel_i830_cleanup,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
+ .aperture_sizes = intel_fake_agp_sizes,
+ .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
+ .configure = intel_fake_agp_configure,
+ .fetch_size = intel_fake_agp_fetch_size,
+ .cleanup = intel_gtt_cleanup,
+ .agp_enable = intel_fake_agp_enable,
.cache_flush = global_cache_flush,
- .create_gatt_table = intel_i830_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i830_insert_entries,
- .remove_memory = intel_i830_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
+ .create_gatt_table = intel_fake_agp_create_gatt_table,
+ .free_gatt_table = intel_fake_agp_free_gatt_table,
+ .insert_memory = intel_fake_agp_insert_entries,
+ .remove_memory = intel_fake_agp_remove_entries,
+ .alloc_by_type = intel_fake_agp_alloc_by_type,
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i830_chipset_flush,
+ .chipset_flush = intel_fake_agp_chipset_flush,
};
-static const struct agp_bridge_driver intel_915_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i81x_gtt_driver = {
+ .gen = 1,
+ .dma_mask_size = 32,
};
-
-static const struct agp_bridge_driver intel_i965_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i965_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i8xx_gtt_driver = {
+ .gen = 2,
+ .setup = i830_setup,
+ .cleanup = i830_cleanup,
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i830_chipset_flush,
};
-
-static const struct agp_bridge_driver intel_gen6_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_gen6_mask_memory,
- .masks = intel_gen6_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i965_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i915_gtt_driver = {
+ .gen = 3,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ /* i945 is the last gpu to need phys mem (for overlay and cursors). */
+ .write_entry = i830_write_entry,
+ .dma_mask_size = 32,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g33_gtt_driver = {
+ .gen = 3,
+ .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver pineview_gtt_driver = {
+ .gen = 3,
+ .is_pineview = 1, .is_g33 = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver i965_gtt_driver = {
+ .gen = 4,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g4x_gtt_driver = {
+ .gen = 5,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver ironlake_gtt_driver = {
+ .gen = 5,
+ .is_ironlake = 1,
+ .setup = i9xx_setup,
+ .cleanup = i9xx_cleanup,
+ .write_entry = i965_write_entry,
+ .dma_mask_size = 36,
+ .check_flags = i830_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver sandybridge_gtt_driver = {
+ .gen = 6,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = gen6_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
};
-static const struct agp_bridge_driver intel_g33_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i9xx_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_gtt_driver_description {
+ unsigned int gmch_chip_id;
+ char *name;
+ const struct agp_bridge_driver *gmch_driver;
+ const struct intel_gtt_driver *gtt_driver;
+} intel_gtt_chipsets[] = {
+ { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
+ &i81x_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82854_IG, "854",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_82865_IG, "865",
+ &intel_fake_agp_driver, &i8xx_gtt_driver},
+ { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+ &intel_fake_agp_driver, &i915_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+ &intel_fake_agp_driver, &i965_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+ &intel_fake_agp_driver, &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+ &intel_fake_agp_driver, &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+ &intel_fake_agp_driver, &g33_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+ &intel_fake_agp_driver, &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+ &intel_fake_agp_driver, &pineview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
+ &intel_fake_agp_driver, &g4x_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+ "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+ "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+ { 0, NULL, NULL }
};
+
+static int find_gmch(u16 device)
+{
+ struct pci_dev *gmch_device;
+
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+ device, gmch_device);
+ }
+
+ if (!gmch_device)
+ return 0;
+
+ intel_private.pcidev = gmch_device;
+ return 1;
+}
+
+int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge)
+{
+ int i, mask;
+ bridge->driver = NULL;
+
+ for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
+ if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+ bridge->driver =
+ intel_gtt_chipsets[i].gmch_driver;
+ intel_private.driver =
+ intel_gtt_chipsets[i].gtt_driver;
+ break;
+ }
+ }
+
+ if (!bridge->driver)
+ return 0;
+
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = pdev;
+
+ intel_private.bridge_dev = pci_dev_get(pdev);
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+
+ mask = intel_private.driver->dma_mask_size;
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask %d-bit failed!\n", mask);
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(mask));
+
+ if (bridge->driver == &intel_810_driver)
+ return 1;
+
+ if (intel_gtt_init() != 0)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(intel_gmch_probe);
+
+struct intel_gtt *intel_gtt_get(void)
+{
+ return &intel_private.base;
+}
+EXPORT_SYMBOL(intel_gtt_get);
+
+void intel_gmch_remove(struct pci_dev *pdev)
+{
+ if (intel_private.pcidev)
+ pci_dev_put(intel_private.pcidev);
+ if (intel_private.bridge_dev)
+ pci_dev_put(intel_private.bridge_dev);
+}
+EXPORT_SYMBOL(intel_gmch_remove);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 1c129211302d..17e380f5f818 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -358,8 +358,12 @@ parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
bridge->dev = fake_bridge_dev;
error = agp_add_bridge(bridge);
+ if (error)
+ goto fail;
+ return 0;
fail:
+ kfree(fake_bridge_dev);
return error;
}
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index e7ba774beda6..25373df1dcf8 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -566,6 +566,7 @@ static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_
struct mailbox mailbox;
/* Got a packet for us */
+ memset(&st_loc, 0, sizeof(st_loc));
ret = do_ac_read(i, buf, &st_loc, &mailbox);
spin_unlock_irqrestore(&apbs[i].mutex, flags);
set_current_state(TASK_RUNNING);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index a4eee324eb1e..55b8667f739f 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -32,12 +32,12 @@
#include <linux/bitops.h>
#include <linux/compat.h>
#include <linux/clocksource.h>
+#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/current.h>
-#include <asm/uaccess.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include <asm/div64.h>
@@ -81,13 +81,13 @@ static cycle_t read_hpet(struct clocksource *cs)
}
static struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = CLOCKSOURCE_MASK(64),
- .mult = 0, /* to be calculated */
- .shift = 10,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 0, /* to be calculated */
+ .shift = 10,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
#endif
@@ -465,6 +465,21 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
if (irq) {
unsigned long irq_flags;
+ if (devp->hd_flags & HPET_SHARED_IRQ) {
+ /*
+ * To prevent the interrupt handler from seeing an
+ * unwanted interrupt status bit, program the timer
+ * so that it will not fire in the near future ...
+ */
+ writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+ &timer->hpet_config);
+ write_counter(read_counter(&hpet->hpet_mc),
+ &timer->hpet_compare);
+ /* ... and clear any left-over status. */
+ isr = 1 << (devp - devp->hd_hpets->hp_dev);
+ writel(isr, &hpet->hpet_isr);
+ }
+
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
irq_flags = devp->hd_flags & HPET_SHARED_IRQ
? IRQF_SHARED : IRQF_DISABLED;
@@ -581,11 +596,10 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
break;
case HPET_INFO:
{
+ memset(info, 0, sizeof(*info));
if (devp->hd_ireqfreq)
info->hi_ireqfreq =
hpet_time_div(hpetp, devp->hd_ireqfreq);
- else
- info->hi_ireqfreq = 0;
info->hi_flags =
readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
info->hi_hpet = hpetp->hp_which;
@@ -811,7 +825,7 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpets *hpetp;
size_t siz;
struct hpet __iomem *hpet;
- static struct hpets *last = NULL;
+ static struct hpets *last;
unsigned long period;
unsigned long long temp;
u32 remainder;
@@ -1000,6 +1014,8 @@ static int hpet_acpi_add(struct acpi_device *device)
return -ENODEV;
if (!data.hd_address || !data.hd_nirqs) {
+ if (data.hd_address)
+ iounmap(data.hd_address);
printk("%s: no address or irqs in _CRS\n", __func__);
return -ENODEV;
}
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 3afd62e856eb..e9cba13ee800 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -713,7 +713,6 @@ static int khvcd(void *unused)
struct hvc_struct *hp;
set_freezable();
- __set_current_state(TASK_RUNNING);
do {
poll_mask = 0;
hvc_kicked = 0;
diff --git a/drivers/char/hvc_tile.c b/drivers/char/hvc_tile.c
index c4efb55cbc03..7a84a0595477 100644
--- a/drivers/char/hvc_tile.c
+++ b/drivers/char/hvc_tile.c
@@ -61,7 +61,8 @@ console_initcall(hvc_tile_console_init);
static int __init hvc_tile_init(void)
{
- hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
- return 0;
+ struct hvc_struct *s;
+ s = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
+ return IS_ERR(s) ? PTR_ERR(s) : 0;
}
device_initcall(hvc_tile_init);
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index 60446f82a3fc..6b8e6d18a8e6 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -74,7 +74,8 @@ static int __write_console(const char *data, int len)
wmb(); /* write ring before updating pointer */
intf->out_prod = prod;
- notify_daemon();
+ if (sent)
+ notify_daemon();
return sent;
}
diff --git a/drivers/char/ip2/Makefile b/drivers/char/ip2/Makefile
index bc397d92b499..7b78e0dfc5b0 100644
--- a/drivers/char/ip2/Makefile
+++ b/drivers/char/ip2/Makefile
@@ -4,5 +4,5 @@
obj-$(CONFIG_COMPUTONE) += ip2.o
-ip2-objs := ip2main.o
+ip2-y := ip2main.o
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index eb8a1a8c188e..16a93648d54e 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -2,7 +2,7 @@
# Makefile for the ipmi drivers.
#
-ipmi_si-objs := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 1fc8876af1f5..2aa3977aae5e 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -916,7 +916,7 @@ static struct ipmi_smi_watcher smi_watcher =
.smi_gone = ipmi_smi_gone,
};
-static __init int init_ipmi_devintf(void)
+static int __init init_ipmi_devintf(void)
{
int rv;
@@ -954,7 +954,7 @@ static __init int init_ipmi_devintf(void)
}
module_init(init_ipmi_devintf);
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
{
struct ipmi_reg_list *entry, *entry2;
mutex_lock(&reg_list_mutex);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 4f3f8c9ec262..2fe72f8edf44 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4442,13 +4442,13 @@ static int ipmi_init_msghandler(void)
return 0;
}
-static __init int ipmi_init_msghandler_mod(void)
+static int __init ipmi_init_msghandler_mod(void)
{
ipmi_init_msghandler();
return 0;
}
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
{
int count;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e537610d2f09..035da9e64a17 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1665,6 +1665,17 @@ static int check_hotmod_int_op(const char *curr, const char *option,
return 0;
}
+static struct smi_info *smi_info_alloc(void)
+{
+ struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (info) {
+ spin_lock_init(&info->si_lock);
+ spin_lock_init(&info->msg_lock);
+ }
+ return info;
+}
+
static int hotmod_handler(const char *val, struct kernel_param *kp)
{
char *str = kstrdup(val, GFP_KERNEL);
@@ -1779,7 +1790,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
if (op == HM_ADD) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
rv = -ENOMEM;
goto out;
@@ -1835,7 +1846,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
return rv;
}
-static __devinit void hardcode_find_bmc(void)
+static void __devinit hardcode_find_bmc(void)
{
int i;
struct smi_info *info;
@@ -1844,7 +1855,7 @@ static __devinit void hardcode_find_bmc(void)
if (!ports[i] && !addrs[i])
continue;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return;
@@ -2018,7 +2029,7 @@ struct SPMITable {
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
-static __devinit int try_init_spmi(struct SPMITable *spmi)
+static int __devinit try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
@@ -2027,7 +2038,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
return -ENODEV;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
@@ -2101,7 +2112,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
return 0;
}
-static __devinit void spmi_find_bmc(void)
+static void __devinit spmi_find_bmc(void)
{
acpi_status status;
struct SPMITable *spmi;
@@ -2137,7 +2148,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
if (!acpi_dev)
return -ENODEV;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return -ENOMEM;
@@ -2314,11 +2325,11 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
return 0;
}
-static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
+static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
{
struct smi_info *info;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
printk(KERN_ERR PFX "Could not allocate SI data\n");
return;
@@ -2425,7 +2436,7 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return -ENOMEM;
@@ -2566,7 +2577,7 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
dev_err(&dev->dev,
@@ -3001,7 +3012,7 @@ static __devinitdata struct ipmi_default_vals
{ .port = 0 }
};
-static __devinit void default_find_bmc(void)
+static void __devinit default_find_bmc(void)
{
struct smi_info *info;
int i;
@@ -3013,7 +3024,7 @@ static __devinit void default_find_bmc(void)
if (check_legacy_ioport(ipmi_defaults[i].port))
continue;
#endif
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return;
@@ -3138,9 +3149,6 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
- spin_lock_init(&(new_smi->si_lock));
- spin_lock_init(&(new_smi->msg_lock));
-
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
@@ -3304,7 +3312,7 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
-static __devinit int init_ipmi_si(void)
+static int __devinit init_ipmi_si(void)
{
int i;
char *str;
@@ -3517,7 +3525,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
kfree(to_clean);
}
-static __exit void cleanup_ipmi_si(void)
+static void __exit cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index e985b1c2730e..1256454b2d43 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -876,6 +876,10 @@ static int memory_open(struct inode *inode, struct file *filp)
if (dev->dev_info)
filp->f_mapping->backing_dev_info = dev->dev_info;
+ /* Is /dev/mem or /dev/kmem ? */
+ if (dev->dev_info == &directly_mappable_cdev_bdi)
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+
if (dev->fops->open)
return dev->fops->open(inode, filp);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index c070b53984e4..e6d75627c6c8 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -176,9 +176,9 @@ static void mmtimer_setup_int_2(int cpu, u64 expires)
* in order to insure that the setup succeeds in a deterministic time frame.
* It will check if the interrupt setup succeeded.
*/
-static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
+static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
+ u64 *set_completion_time)
{
-
switch (comparator) {
case 0:
mmtimer_setup_int_0(cpu, expires);
@@ -191,7 +191,8 @@ static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
break;
}
/* We might've missed our expiration time */
- if (rtc_time() <= expires)
+ *set_completion_time = rtc_time();
+ if (*set_completion_time <= expires)
return 1;
/*
@@ -227,6 +228,8 @@ static int mmtimer_disable_int(long nasid, int comparator)
#define TIMER_OFF 0xbadcabLL /* Timer is not setup */
#define TIMER_SET 0 /* Comparator is set for this timer */
+#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
+
/* There is one of these for each timer */
struct mmtimer {
struct rb_node list;
@@ -242,6 +245,11 @@ struct mmtimer_node {
};
static struct mmtimer_node *timers;
+static unsigned mmtimer_interval_retry_increment =
+ MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
+module_param(mmtimer_interval_retry_increment, uint, 0644);
+MODULE_PARM_DESC(mmtimer_interval_retry_increment,
+ "RTC ticks to add to expiration on interval retry (default 40)");
/*
* Add a new mmtimer struct to the node's mmtimer list.
@@ -289,7 +297,8 @@ static void mmtimer_set_next_timer(int nodeid)
struct mmtimer_node *n = &timers[nodeid];
struct mmtimer *x;
struct k_itimer *t;
- int o;
+ u64 expires, exp, set_completion_time;
+ int i;
restart:
if (n->next == NULL)
@@ -300,7 +309,8 @@ restart:
if (!t->it.mmtimer.incr) {
/* Not an interval timer */
if (!mmtimer_setup(x->cpu, COMPARATOR,
- t->it.mmtimer.expires)) {
+ t->it.mmtimer.expires,
+ &set_completion_time)) {
/* Late setup, fire now */
tasklet_schedule(&n->tasklet);
}
@@ -308,14 +318,23 @@ restart:
}
/* Interval timer */
- o = 0;
- while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
- unsigned long e, e1;
- struct rb_node *next;
- t->it.mmtimer.expires += t->it.mmtimer.incr << o;
- t->it_overrun += 1 << o;
- o++;
- if (o > 20) {
+ i = 0;
+ expires = exp = t->it.mmtimer.expires;
+ while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
+ &set_completion_time)) {
+ int to;
+
+ i++;
+ expires = set_completion_time +
+ mmtimer_interval_retry_increment + (1 << i);
+ /* Calculate overruns as we go. */
+ to = ((u64)(expires - exp) / t->it.mmtimer.incr);
+ if (to) {
+ t->it_overrun += to;
+ t->it.mmtimer.expires += t->it.mmtimer.incr * to;
+ exp = t->it.mmtimer.expires;
+ }
+ if (i > 20) {
printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
t->it.mmtimer.clock = TIMER_OFF;
n->next = rb_next(&x->list);
@@ -323,21 +342,6 @@ restart:
kfree(x);
goto restart;
}
-
- e = t->it.mmtimer.expires;
- next = rb_next(&x->list);
-
- if (next == NULL)
- continue;
-
- e1 = rb_entry(next, struct mmtimer, list)->
- timer->it.mmtimer.expires;
- if (e > e1) {
- n->next = next;
- rb_erase(&x->list, &n->timer_head);
- mmtimer_add_list(x);
- goto restart;
- }
}
}
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
index 754c9e2058ed..26b4fce217b6 100644
--- a/drivers/char/mwave/Makefile
+++ b/drivers/char/mwave/Makefile
@@ -6,10 +6,10 @@
obj-$(CONFIG_MWAVE) += mwave.o
-mwave-objs := mwavedd.o smapi.o tp3780i.o 3780i.o
+mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
# To have the mwave driver disable other uarts if necessary
# EXTRA_CFLAGS += -DMWAVE_FUTZ_WITH_OTHER_DEVICES
# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-EXTRA_CFLAGS += -DMW_TRACE
+ccflags-y := -DMW_TRACE
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 463df27494bd..dd9d75351cd6 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -303,6 +303,7 @@ static void mxser_enable_must_enchance_mode(unsigned long baseio)
outb(oldlcr, baseio + UART_LCR);
}
+#ifdef CONFIG_PCI
static void mxser_disable_must_enchance_mode(unsigned long baseio)
{
u8 oldlcr;
@@ -317,6 +318,7 @@ static void mxser_disable_must_enchance_mode(unsigned long baseio)
outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
+#endif
static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
{
@@ -388,6 +390,7 @@ static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
outb(oldlcr, baseio + UART_LCR);
}
+#ifdef CONFIG_PCI
static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
{
u8 oldlcr;
@@ -404,6 +407,7 @@ static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
*pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
outb(oldlcr, baseio + UART_LCR);
}
+#endif
static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
{
diff --git a/drivers/char/pcmcia/ipwireless/Makefile b/drivers/char/pcmcia/ipwireless/Makefile
index b71eb593643d..db80873d7f20 100644
--- a/drivers/char/pcmcia/ipwireless/Makefile
+++ b/drivers/char/pcmcia/ipwireless/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_IPWIRELESS) += ipwireless.o
-ipwireless-objs := hardware.o main.o network.o tty.o
+ipwireless-y := hardware.o main.o network.o tty.o
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 723152d978a9..f176dbaeb15a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -613,6 +613,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PPGETTIME:
to_jiffies = pp->pdev->timeout;
+ memset(&par_timeout, 0, sizeof(par_timeout));
par_timeout.tv_sec = to_jiffies / HZ;
par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 74f00b5ffa36..73dcb0ee41fd 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -25,6 +25,8 @@
#include <linux/time.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/ramoops.h>
#define RAMOOPS_KERNMSG_HDR "===="
#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
@@ -91,11 +93,17 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
cxt->count = (cxt->count + 1) % cxt->max_count;
}
-static int __init ramoops_init(void)
+static int __init ramoops_probe(struct platform_device *pdev)
{
+ struct ramoops_platform_data *pdata = pdev->dev.platform_data;
struct ramoops_context *cxt = &oops_cxt;
int err = -EINVAL;
+ if (pdata) {
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ }
+
if (!mem_size) {
printk(KERN_ERR "ramoops: invalid size specification");
goto fail3;
@@ -142,7 +150,7 @@ fail3:
return err;
}
-static void __exit ramoops_exit(void)
+static int __exit ramoops_remove(struct platform_device *pdev)
{
struct ramoops_context *cxt = &oops_cxt;
@@ -151,8 +159,26 @@ static void __exit ramoops_exit(void)
iounmap(cxt->virt_addr);
release_mem_region(cxt->phys_addr, cxt->size);
+ return 0;
}
+static struct platform_driver ramoops_driver = {
+ .remove = __exit_p(ramoops_remove),
+ .driver = {
+ .name = "ramoops",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ramoops_init(void)
+{
+ return platform_driver_probe(&ramoops_driver, ramoops_probe);
+}
+
+static void __exit ramoops_exit(void)
+{
+ platform_driver_unregister(&ramoops_driver);
+}
module_init(ramoops_init);
module_exit(ramoops_exit);
diff --git a/drivers/char/rio/Makefile b/drivers/char/rio/Makefile
index 2d1c5a7cba7d..1661875883fb 100644
--- a/drivers/char/rio/Makefile
+++ b/drivers/char/rio/Makefile
@@ -8,5 +8,5 @@
obj-$(CONFIG_RIO) += rio.o
-rio-objs := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
+rio-y := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
rioparam.o rioroute.o riotable.o riotty.o
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 7c79d243acc9..86308830ac42 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -2345,7 +2345,7 @@ static int __init rp_init(void)
ret = tty_register_driver(rocket_driver);
if (ret < 0) {
printk(KERN_ERR "Couldn't install tty RocketPort driver\n");
- goto err_tty;
+ goto err_controller;
}
#ifdef ROCKET_DEBUG_OPEN
@@ -2380,6 +2380,9 @@ static int __init rp_init(void)
return 0;
err_ttyu:
tty_unregister_driver(rocket_driver);
+err_controller:
+ if (controller)
+ release_region(controller, 4);
err_tty:
put_tty_driver(rocket_driver);
err:
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 1746d91205f7..d01fffeac951 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -301,6 +301,8 @@ struct slgt_info {
unsigned int rx_pio;
unsigned int if_mode;
unsigned int base_clock;
+ unsigned int xsync;
+ unsigned int xctrl;
/* device status */
@@ -405,6 +407,8 @@ static MGSL_PARAMS default_params = {
#define TDCSR 0x94 /* tx DMA control/status */
#define RDDAR 0x98 /* rx DMA descriptor address */
#define TDDAR 0x9c /* tx DMA descriptor address */
+#define XSR 0x40 /* extended sync pattern */
+#define XCR 0x44 /* extended control */
#define RXIDLE BIT14
#define RXBREAK BIT14
@@ -517,6 +521,10 @@ static int set_interface(struct slgt_info *info, int if_mode);
static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
+static int get_xsync(struct slgt_info *info, int __user *if_mode);
+static int set_xsync(struct slgt_info *info, int if_mode);
+static int get_xctrl(struct slgt_info *info, int __user *if_mode);
+static int set_xctrl(struct slgt_info *info, int if_mode);
/*
* driver functions
@@ -1056,6 +1064,14 @@ static int ioctl(struct tty_struct *tty, struct file *file,
return get_gpio(info, argp);
case MGSL_IOCWAITGPIO:
return wait_gpio(info, argp);
+ case MGSL_IOCGXSYNC:
+ return get_xsync(info, argp);
+ case MGSL_IOCSXSYNC:
+ return set_xsync(info, (int)arg);
+ case MGSL_IOCGXCTRL:
+ return get_xctrl(info, argp);
+ case MGSL_IOCSXCTRL:
+ return set_xctrl(info, (int)arg);
}
mutex_lock(&info->port.mutex);
switch (cmd) {
@@ -1132,6 +1148,7 @@ static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *us
struct MGSL_PARAMS32 tmp_params;
DBGINFO(("%s get_params32\n", info->device_name));
+ memset(&tmp_params, 0, sizeof(tmp_params));
tmp_params.mode = (compat_ulong_t)info->params.mode;
tmp_params.loopback = info->params.loopback;
tmp_params.flags = info->params.flags;
@@ -1212,12 +1229,16 @@ static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
case MGSL_IOCSGPIO:
case MGSL_IOCGGPIO:
case MGSL_IOCWAITGPIO:
+ case MGSL_IOCGXSYNC:
+ case MGSL_IOCGXCTRL:
case MGSL_IOCSTXIDLE:
case MGSL_IOCTXENABLE:
case MGSL_IOCRXENABLE:
case MGSL_IOCTXABORT:
case TIOCMIWAIT:
case MGSL_IOCSIF:
+ case MGSL_IOCSXSYNC:
+ case MGSL_IOCSXCTRL:
rc = ioctl(tty, file, cmd, arg);
break;
}
@@ -1617,6 +1638,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
+ memset(&new_line, 0, sizeof(new_line));
+
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
@@ -1958,6 +1981,7 @@ static void bh_handler(struct work_struct *work)
case MGSL_MODE_RAW:
case MGSL_MODE_MONOSYNC:
case MGSL_MODE_BISYNC:
+ case MGSL_MODE_XSYNC:
while(rx_get_buf(info));
break;
}
@@ -2357,26 +2381,27 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
- spin_lock(&info->lock);
-
while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
info->irq_occurred = true;
for(i=0; i < info->port_count ; i++) {
if (info->port_array[i] == NULL)
continue;
+ spin_lock(&info->port_array[i]->lock);
if (gsr & (BIT8 << i))
isr_serial(info->port_array[i]);
if (gsr & (BIT16 << (i*2)))
isr_rdma(info->port_array[i]);
if (gsr & (BIT17 << (i*2)))
isr_tdma(info->port_array[i]);
+ spin_unlock(&info->port_array[i]->lock);
}
}
if (info->gpio_present) {
unsigned int state;
unsigned int changed;
+ spin_lock(&info->lock);
while ((changed = rd_reg32(info, IOSR)) != 0) {
DBGISR(("%s iosr=%08x\n", info->device_name, changed));
/* read latched state of GPIO signals */
@@ -2388,22 +2413,24 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
isr_gpio(info->port_array[i], changed, state);
}
}
+ spin_unlock(&info->lock);
}
for(i=0; i < info->port_count ; i++) {
struct slgt_info *port = info->port_array[i];
-
- if (port && (port->port.count || port->netcount) &&
+ if (port == NULL)
+ continue;
+ spin_lock(&port->lock);
+ if ((port->port.count || port->netcount) &&
port->pending_bh && !port->bh_running &&
!port->bh_requested) {
DBGISR(("%s bh queued\n", port->device_name));
schedule_work(&port->task);
port->bh_requested = true;
}
+ spin_unlock(&port->lock);
}
- spin_unlock(&info->lock);
-
DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
return IRQ_HANDLED;
}
@@ -2883,6 +2910,69 @@ static int set_interface(struct slgt_info *info, int if_mode)
return 0;
}
+static int get_xsync(struct slgt_info *info, int __user *xsync)
+{
+ DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
+ if (put_user(info->xsync, xsync))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * set extended sync pattern (1 to 4 bytes) for extended sync mode
+ *
+ * sync pattern is contained in least significant bytes of value
+ * most significant byte of sync pattern is oldest (1st sent/detected)
+ */
+static int set_xsync(struct slgt_info *info, int xsync)
+{
+ unsigned long flags;
+
+ DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
+ spin_lock_irqsave(&info->lock, flags);
+ info->xsync = xsync;
+ wr_reg32(info, XSR, xsync);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
+static int get_xctrl(struct slgt_info *info, int __user *xctrl)
+{
+ DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
+ if (put_user(info->xctrl, xctrl))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * set extended control options
+ *
+ * xctrl[31:19] reserved, must be zero
+ * xctrl[18:17] extended sync pattern length in bytes
+ * 00 = 1 byte in xsr[7:0]
+ * 01 = 2 bytes in xsr[15:0]
+ * 10 = 3 bytes in xsr[23:0]
+ * 11 = 4 bytes in xsr[31:0]
+ * xctrl[16] 1 = enable terminal count, 0=disabled
+ * xctrl[15:0] receive terminal count for fixed length packets
+ * value is count minus one (0 = 1 byte packet)
+ * when terminal count is reached, receiver
+ * automatically returns to hunt mode and receive
+ * FIFO contents are flushed to DMA buffers with
+ * end of frame (EOF) status
+ */
+static int set_xctrl(struct slgt_info *info, int xctrl)
+{
+ unsigned long flags;
+
+ DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
+ spin_lock_irqsave(&info->lock, flags);
+ info->xctrl = xctrl;
+ wr_reg32(info, XCR, xctrl);
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+}
+
/*
* set general purpose IO pin state and direction
*
@@ -2906,7 +2996,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
info->device_name, gpio.state, gpio.smask,
gpio.dir, gpio.dmask));
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
if (gpio.dmask) {
data = rd_reg32(info, IODR);
data |= gpio.dmask & gpio.dir;
@@ -2919,7 +3009,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
data &= ~(gpio.smask & ~gpio.state);
wr_reg32(info, IOVR, data);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
return 0;
}
@@ -3020,7 +3110,7 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
return -EINVAL;
init_cond_wait(&wait, gpio.smask);
- spin_lock_irqsave(&info->lock, flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
/* enable interrupts for watched pins */
wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
/* get current pin states */
@@ -3032,20 +3122,20 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
} else {
/* wait for target state */
add_cond_wait(&info->gpio_wait_q, &wait);
- spin_unlock_irqrestore(&info->lock, flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
schedule();
if (signal_pending(current))
rc = -ERESTARTSYS;
else
gpio.state = wait.data;
- spin_lock_irqsave(&info->lock, flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
remove_cond_wait(&info->gpio_wait_q, &wait);
}
/* disable all GPIO interrupts if no waiting processes */
if (info->gpio_wait_q == NULL)
wr_reg32(info, IOER, 0);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
rc = -EFAULT;
@@ -3578,7 +3668,6 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
/* copy resource information from first port to others */
for (i = 1; i < port_count; ++i) {
- port_array[i]->lock = port_array[0]->lock;
port_array[i]->irq_level = port_array[0]->irq_level;
port_array[i]->reg_addr = port_array[0]->reg_addr;
alloc_dma_bufs(port_array[i]);
@@ -3763,7 +3852,9 @@ module_exit(slgt_exit);
#define CALC_REGADDR() \
unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
if (addr >= 0x80) \
- reg_addr += (info->port_num) * 32;
+ reg_addr += (info->port_num) * 32; \
+ else if (addr >= 0x40) \
+ reg_addr += (info->port_num) * 16;
static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
{
@@ -4182,7 +4273,13 @@ static void sync_mode(struct slgt_info *info)
/* TCR (tx control)
*
- * 15..13 mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+ * 15..13 mode
+ * 000=HDLC/SDLC
+ * 001=raw bit synchronous
+ * 010=asynchronous/isochronous
+ * 011=monosync byte synchronous
+ * 100=bisync byte synchronous
+ * 101=xsync byte synchronous
* 12..10 encoding
* 09 CRC enable
* 08 CRC32
@@ -4197,6 +4294,9 @@ static void sync_mode(struct slgt_info *info)
val = BIT2;
switch(info->params.mode) {
+ case MGSL_MODE_XSYNC:
+ val |= BIT15 + BIT13;
+ break;
case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
case MGSL_MODE_BISYNC: val |= BIT15; break;
case MGSL_MODE_RAW: val |= BIT13; break;
@@ -4251,7 +4351,13 @@ static void sync_mode(struct slgt_info *info)
/* RCR (rx control)
*
- * 15..13 mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
+ * 15..13 mode
+ * 000=HDLC/SDLC
+ * 001=raw bit synchronous
+ * 010=asynchronous/isochronous
+ * 011=monosync byte synchronous
+ * 100=bisync byte synchronous
+ * 101=xsync byte synchronous
* 12..10 encoding
* 09 CRC enable
* 08 CRC32
@@ -4263,6 +4369,9 @@ static void sync_mode(struct slgt_info *info)
val = 0;
switch(info->params.mode) {
+ case MGSL_MODE_XSYNC:
+ val |= BIT15 + BIT13;
+ break;
case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
case MGSL_MODE_BISYNC: val |= BIT15; break;
case MGSL_MODE_RAW: val |= BIT13; break;
@@ -4679,6 +4788,7 @@ static bool rx_get_buf(struct slgt_info *info)
switch(info->params.mode) {
case MGSL_MODE_MONOSYNC:
case MGSL_MODE_BISYNC:
+ case MGSL_MODE_XSYNC:
/* ignore residue in byte synchronous modes */
if (desc_residue(info->rbufs[i]))
count--;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 38df8c19e74c..6b68a0fb4611 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -503,6 +503,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
struct kbd_struct * kbd;
unsigned int console;
unsigned char ucval;
+ unsigned int uival;
void __user *up = (void __user *)arg;
int i, perm;
int ret = 0;
@@ -657,7 +658,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
break;
case KDGETMODE:
- ucval = vc->vc_mode;
+ uival = vc->vc_mode;
goto setint;
case KDMAPDISP:
@@ -695,7 +696,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
break;
case KDGKBMODE:
- ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
+ uival = ((kbd->kbdmode == VC_RAW) ? K_RAW :
(kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW :
(kbd->kbdmode == VC_UNICODE) ? K_UNICODE :
K_XLATE);
@@ -717,9 +718,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
break;
case KDGKBMETA:
- ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
+ uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
setint:
- ret = put_user(ucval, (int __user *)arg);
+ ret = put_user(uival, (int __user *)arg);
break;
case KDGETKEYCODE:
@@ -949,7 +950,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
for (i = 0; i < MAX_NR_CONSOLES; ++i)
if (! VT_IS_IN_USE(i))
break;
- ucval = i < MAX_NR_CONSOLES ? (i+1) : -1;
+ uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
goto setint;
/*
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 210338ea222f..81270d221e5a 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,48 +31,6 @@
#include <linux/connector.h>
#include <linux/delay.h>
-
-/*
- * This job is sent to the kevent workqueue.
- * While no event is once sent to any callback, the connector workqueue
- * is not created to avoid a useless waiting kernel task.
- * Once the first event is received, we create this dedicated workqueue which
- * is necessary because the flow of data can be high and we don't want
- * to encumber keventd with that.
- */
-static void cn_queue_create(struct work_struct *work)
-{
- struct cn_queue_dev *dev;
-
- dev = container_of(work, struct cn_queue_dev, wq_creation);
-
- dev->cn_queue = create_singlethread_workqueue(dev->name);
- /* If we fail, we will use keventd for all following connector jobs */
- WARN_ON(!dev->cn_queue);
-}
-
-/*
- * Queue a data sent to a callback.
- * If the connector workqueue is already created, we queue the job on it.
- * Otherwise, we queue the job to kevent and queue the connector workqueue
- * creation too.
- */
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
-{
- struct cn_queue_dev *pdev = cbq->pdev;
-
- if (likely(pdev->cn_queue))
- return queue_work(pdev->cn_queue, work);
-
- /* Don't create the connector workqueue twice */
- if (atomic_inc_return(&pdev->wq_requested) == 1)
- schedule_work(&pdev->wq_creation);
- else
- atomic_dec(&pdev->wq_requested);
-
- return schedule_work(work);
-}
-
void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
@@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
{
- /* The first jobs have been sent to kevent, flush them too */
- flush_scheduled_work();
- if (cbq->pdev->cn_queue)
- flush_workqueue(cbq->pdev->cn_queue);
-
+ flush_workqueue(cbq->pdev->cn_queue);
kfree(cbq);
}
@@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
atomic_set(&dev->refcnt, 0);
INIT_LIST_HEAD(&dev->queue_list);
spin_lock_init(&dev->queue_lock);
- init_waitqueue_head(&dev->wq_created);
dev->nls = nls;
- INIT_WORK(&dev->wq_creation, cn_queue_create);
+ dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
+ if (!dev->cn_queue) {
+ kfree(dev);
+ return NULL;
+ }
return dev;
}
@@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
void cn_queue_free_dev(struct cn_queue_dev *dev)
{
struct cn_callback_entry *cbq, *n;
- long timeout;
- DEFINE_WAIT(wait);
-
- /* Flush the first pending jobs queued on kevent */
- flush_scheduled_work();
-
- /* If the connector workqueue creation is still pending, wait for it */
- prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
- timeout = schedule_timeout(HZ * 2);
- if (!timeout && !dev->cn_queue)
- WARN_ON(1);
- }
- finish_wait(&dev->wq_created, &wait);
- if (dev->cn_queue) {
- flush_workqueue(dev->cn_queue);
- destroy_workqueue(dev->cn_queue);
- }
+ flush_workqueue(dev->cn_queue);
+ destroy_workqueue(dev->cn_queue);
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 1d48f40342cb..e16c3fa8d2e3 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb)
__cbq->data.skb == NULL)) {
__cbq->data.skb = skb;
- if (queue_cn_work(__cbq, &__cbq->work))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__cbq->work))
err = 0;
else
err = -EINVAL;
@@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb)
d->callback = __cbq->data.callback;
d->free = __new_cbq;
- __new_cbq->pdev = __cbq->pdev;
-
INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
- if (queue_cn_work(__new_cbq,
- &__new_cbq->work))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__new_cbq->work))
err = 0;
else {
kfree(__new_cbq);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 199dcb9f0b83..c63a43823744 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -918,8 +918,8 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
+ if (!cpu_online(j))
+ continue;
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 7b5093664e49..c631f27a3dcc 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,6 +30,8 @@
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
@@ -82,6 +84,7 @@ struct cpu_dbs_info_s {
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
+ unsigned int rate_mult;
int cpu;
unsigned int sample_type:1;
/*
@@ -108,10 +111,12 @@ static struct dbs_tuners {
unsigned int up_threshold;
unsigned int down_differential;
unsigned int ignore_nice;
+ unsigned int sampling_down_factor;
unsigned int powersave_bias;
unsigned int io_is_busy;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
@@ -259,6 +264,7 @@ static ssize_t show_##file_name \
show_one(sampling_rate, sampling_rate);
show_one(io_is_busy, io_is_busy);
show_one(up_threshold, up_threshold);
+show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
@@ -340,6 +346,29 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
return count;
}
+static ssize_t store_sampling_down_factor(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input, j;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.sampling_down_factor = input;
+
+ /* Reset down sampling multiplier in case it was active */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ dbs_info->rate_mult = 1;
+ }
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -401,6 +430,7 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
define_one_global_rw(sampling_rate);
define_one_global_rw(io_is_busy);
define_one_global_rw(up_threshold);
+define_one_global_rw(sampling_down_factor);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
@@ -409,6 +439,7 @@ static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
+ &sampling_down_factor.attr,
&ignore_nice_load.attr,
&powersave_bias.attr,
&io_is_busy.attr,
@@ -562,6 +593,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
/* Check for frequency increase */
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ this_dbs_info->rate_mult =
+ dbs_tuners_ins.sampling_down_factor;
dbs_freq_increase(policy, policy->max);
return;
}
@@ -584,6 +619,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
(dbs_tuners_ins.up_threshold -
dbs_tuners_ins.down_differential);
+ /* No longer fully busy, reset rate_mult */
+ this_dbs_info->rate_mult = 1;
+
if (freq_next < policy->min)
freq_next = policy->min;
@@ -607,7 +645,8 @@ static void do_dbs_timer(struct work_struct *work)
int sample_type = dbs_info->sample_type;
/* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
if (num_online_cpus() > 1)
delay -= jiffies % delay;
@@ -711,6 +750,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
}
this_dbs_info->cpu = cpu;
+ this_dbs_info->rate_mult = 1;
ondemand_powersave_bias_init_cpu(cpu);
/*
* Start the timerschedule work, when this governor
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 0eac3da566ba..a84250a5dd51 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1467,7 +1467,7 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return -EINVAL;
while (size) {
- copy = min(drest, min(size, dst->length));
+ copy = min3(drest, size, dst->length);
size -= copy;
drest -= copy;
@@ -1729,7 +1729,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
return -EINVAL;
while (size) {
- copy = min(srest, min(dst->length, size));
+ copy = min3(srest, dst->length, size);
daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
memcpy(daddr + dst->offset + offset, saddr, copy);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9520cf02edc8..79d1542f31c0 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -46,15 +46,22 @@ config INTEL_MID_DMAC
If unsure, say N.
-config ASYNC_TX_DISABLE_CHANNEL_SWITCH
+config ASYNC_TX_ENABLE_CHANNEL_SWITCH
bool
+config AMBA_PL08X
+ bool "ARM PrimeCell PL080 or PL081 support"
+ depends on ARM_AMBA && EXPERIMENTAL
+ select DMA_ENGINE
+ help
+ Platform has a PL08x DMAC device
+ which can provide DMA engine support
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
select DCA
- select ASYNC_TX_DISABLE_CHANNEL_SWITCH
select ASYNC_TX_DISABLE_PQ_VAL_DMA
select ASYNC_TX_DISABLE_XOR_VAL_DMA
help
@@ -69,6 +76,7 @@ config INTEL_IOP_ADMA
tristate "Intel IOP ADMA support"
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
Enable support for the Intel(R) IOP Series RAID engines.
@@ -93,6 +101,7 @@ config FSL_DMA
tristate "Freescale Elo and Elo Plus DMA support"
depends on FSL_SOC
select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Freescale Elo and Elo Plus DMA controllers.
The Elo is the DMA controller on some 82xx and 83xx parts, and the
@@ -109,6 +118,7 @@ config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
select DMA_ENGINE
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Marvell XOR engine.
@@ -166,6 +176,7 @@ config AMCC_PPC440SPE_ADMA
depends on 440SPe || 440SP
select DMA_ENGINE
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
Enable support for the AMCC PPC440SPe RAID engines.
@@ -195,6 +206,22 @@ config PCH_DMA
help
Enable support for the Topcliff PCH DMA engine.
+config IMX_SDMA
+ tristate "i.MX SDMA support"
+ depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ select DMA_ENGINE
+ help
+ Support the i.MX SDMA engine. This engine is integrated into
+ Freescale i.MX25/31/35/51 chips.
+
+config IMX_DMA
+ tristate "i.MX DMA support"
+ depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
+ select DMA_ENGINE
+ help
+ Support the i.MX DMA engine. This engine is integrated into
+ Freescale i.MX1/21/27 chips.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 72bd70384d8a..a8a84f4587f2 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,7 +21,10 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
+obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
new file mode 100644
index 000000000000..b605cc9ac3a2
--- /dev/null
+++ b/drivers/dma/amba-pl08x.c
@@ -0,0 +1,2167 @@
+/*
+ * Copyright (c) 2006 ARM Ltd.
+ * Copyright (c) 2010 ST-Ericsson SA
+ *
+ * Author: Peter Pearse <peter.pearse@arm.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is iin this distribution in the
+ * file called COPYING.
+ *
+ * Documentation: ARM DDI 0196G == PL080
+ * Documentation: ARM DDI 0218E == PL081
+ *
+ * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
+ * any channel.
+ *
+ * The PL080 has 8 channels available for simultaneous use, and the PL081
+ * has only two channels. So on these DMA controllers the number of channels
+ * and the number of incoming DMA signals are two totally different things.
+ * It is usually not possible to theoretically handle all physical signals,
+ * so a multiplexing scheme with possible denial of use is necessary.
+ *
+ * The PL080 has a dual bus master, PL081 has a single master.
+ *
+ * Memory to peripheral transfer may be visualized as
+ * Get data from memory to DMAC
+ * Until no data left
+ * On burst request from peripheral
+ * Destination burst from DMAC to peripheral
+ * Clear burst request
+ * Raise terminal count interrupt
+ *
+ * For peripherals with a FIFO:
+ * Source burst size == half the depth of the peripheral FIFO
+ * Destination burst size == the depth of the peripheral FIFO
+ *
+ * (Bursts are irrelevant for mem to mem transfers - there are no burst
+ * signals, the DMA controller will simply facilitate its AHB master.)
+ *
+ * ASSUMES default (little) endianness for DMA transfers
+ *
+ * Only DMAC flow control is implemented
+ *
+ * Global TODO:
+ * - Break out common code from arch/arm/mach-s3c64xx and share
+ */
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/amba/bus.h>
+#include <linux/dmaengine.h>
+#include <linux/amba/pl08x.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/hardware/pl080.h>
+#include <asm/dma.h>
+#include <asm/mach/dma.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+
+#define DRIVER_NAME "pl08xdmac"
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL08x derivates
+ * @name: the name of this specific variant
+ * @channels: the number of channels available in this variant
+ * @dualmaster: whether this version supports dual AHB masters
+ * or not.
+ */
+struct vendor_data {
+ char *name;
+ u8 channels;
+ bool dualmaster;
+};
+
+/*
+ * PL08X private data structures
+ * An LLI struct - see pl08x TRM
+ * Note that next uses bit[0] as a bus bit,
+ * start & end do not - their bus bit info
+ * is in cctl
+ */
+struct lli {
+ dma_addr_t src;
+ dma_addr_t dst;
+ dma_addr_t next;
+ u32 cctl;
+};
+
+/**
+ * struct pl08x_driver_data - the local state holder for the PL08x
+ * @slave: slave engine for this instance
+ * @memcpy: memcpy engine for this instance
+ * @base: virtual memory base (remapped) for the PL08x
+ * @adev: the corresponding AMBA (PrimeCell) bus entry
+ * @vd: vendor data for this PL08x variant
+ * @pd: platform data passed in from the platform/machine
+ * @phy_chans: array of data for the physical channels
+ * @pool: a pool for the LLI descriptors
+ * @pool_ctr: counter of LLIs in the pool
+ * @lock: a spinlock for this struct
+ */
+struct pl08x_driver_data {
+ struct dma_device slave;
+ struct dma_device memcpy;
+ void __iomem *base;
+ struct amba_device *adev;
+ struct vendor_data *vd;
+ struct pl08x_platform_data *pd;
+ struct pl08x_phy_chan *phy_chans;
+ struct dma_pool *pool;
+ int pool_ctr;
+ spinlock_t lock;
+};
+
+/*
+ * PL08X specific defines
+ */
+
+/*
+ * Memory boundaries: the manual for PL08x says that the controller
+ * cannot read past a 1KiB boundary, so these defines are used to
+ * create transfer LLIs that do not cross such boundaries.
+ */
+#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
+#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
+
+/* Minimum period between work queue runs */
+#define PL08X_WQ_PERIODMIN 20
+
+/* Size (bytes) of each LLI buffer allocated for one transfer */
+# define PL08X_LLI_TSFR_SIZE 0x2000
+
+/* Maximimum times we call dma_pool_alloc on this pool without freeing */
+#define PL08X_MAX_ALLOCS 0x40
+#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli))
+#define PL08X_ALIGN 8
+
+static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct pl08x_dma_chan, chan);
+}
+
+/*
+ * Physical channel handling
+ */
+
+/* Whether a certain channel is busy or not */
+static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
+{
+ unsigned int val;
+
+ val = readl(ch->base + PL080_CH_CONFIG);
+ return val & PL080_CONFIG_ACTIVE;
+}
+
+/*
+ * Set the initial DMA register values i.e. those for the first LLI
+ * The next lli pointer and the configuration interrupt bit have
+ * been set when the LLIs were constructed
+ */
+static void pl08x_set_cregs(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *ch)
+{
+ /* Wait for channel inactive */
+ while (pl08x_phy_channel_busy(ch))
+ ;
+
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=%08x, cdst=%08x, "
+ "cctl=%08x, clli=%08x, ccfg=%08x\n",
+ ch->id,
+ ch->csrc,
+ ch->cdst,
+ ch->cctl,
+ ch->clli,
+ ch->ccfg);
+
+ writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
+ writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
+ writel(ch->clli, ch->base + PL080_CH_LLI);
+ writel(ch->cctl, ch->base + PL080_CH_CONTROL);
+ writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
+}
+
+static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_channel_data *cd = plchan->cd;
+ struct pl08x_phy_chan *phychan = plchan->phychan;
+ struct pl08x_txd *txd = plchan->at;
+
+ /* Copy the basic control register calculated at transfer config */
+ phychan->csrc = txd->csrc;
+ phychan->cdst = txd->cdst;
+ phychan->clli = txd->clli;
+ phychan->cctl = txd->cctl;
+
+ /* Assign the signal to the proper control registers */
+ phychan->ccfg = cd->ccfg;
+ phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
+ phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
+ /* If it wasn't set from AMBA, ignore it */
+ if (txd->direction == DMA_TO_DEVICE)
+ /* Select signal as destination */
+ phychan->ccfg |=
+ (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
+ else if (txd->direction == DMA_FROM_DEVICE)
+ /* Select signal as source */
+ phychan->ccfg |=
+ (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
+ /* Always enable error interrupts */
+ phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
+ /* Always enable terminal interrupts */
+ phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
+}
+
+/*
+ * Enable the DMA channel
+ * Assumes all other configuration bits have been set
+ * as desired before this code is called
+ */
+static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ /*
+ * Do not access config register until channel shows as disabled
+ */
+ while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
+ ;
+
+ /*
+ * Do not access config register until channel shows as inactive
+ */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
+ val = readl(ch->base + PL080_CH_CONFIG);
+
+ writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG);
+}
+
+/*
+ * Overall DMAC remains enabled always.
+ *
+ * Disabling individual channels could lose data.
+ *
+ * Disable the peripheral DMA after disabling the DMAC
+ * in order to allow the DMAC FIFO to drain, and
+ * hence allow the channel to show inactive
+ *
+ */
+static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ /* Set the HALT bit and wait for the FIFO to drain */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ val |= PL080_CONFIG_HALT;
+ writel(val, ch->base + PL080_CH_CONFIG);
+
+ /* Wait for channel inactive */
+ while (pl08x_phy_channel_busy(ch))
+ ;
+}
+
+static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ /* Clear the HALT bit */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ val &= ~PL080_CONFIG_HALT;
+ writel(val, ch->base + PL080_CH_CONFIG);
+}
+
+
+/* Stops the channel */
+static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+{
+ u32 val;
+
+ pl08x_pause_phy_chan(ch);
+
+ /* Disable channel */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ val &= ~PL080_CONFIG_ENABLE;
+ val &= ~PL080_CONFIG_ERR_IRQ_MASK;
+ val &= ~PL080_CONFIG_TC_IRQ_MASK;
+ writel(val, ch->base + PL080_CH_CONFIG);
+}
+
+static inline u32 get_bytes_in_cctl(u32 cctl)
+{
+ /* The source width defines the number of bytes */
+ u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+ switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ case PL080_WIDTH_8BIT:
+ break;
+ case PL080_WIDTH_16BIT:
+ bytes *= 2;
+ break;
+ case PL080_WIDTH_32BIT:
+ bytes *= 4;
+ break;
+ }
+ return bytes;
+}
+
+/* The channel should be paused when calling this */
+static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_phy_chan *ch;
+ struct pl08x_txd *txdi = NULL;
+ struct pl08x_txd *txd;
+ unsigned long flags;
+ u32 bytes = 0;
+
+ spin_lock_irqsave(&plchan->lock, flags);
+
+ ch = plchan->phychan;
+ txd = plchan->at;
+
+ /*
+ * Next follow the LLIs to get the number of pending bytes in the
+ * currently active transaction.
+ */
+ if (ch && txd) {
+ struct lli *llis_va = txd->llis_va;
+ struct lli *llis_bus = (struct lli *) txd->llis_bus;
+ u32 clli = readl(ch->base + PL080_CH_LLI);
+
+ /* First get the bytes in the current active LLI */
+ bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
+
+ if (clli) {
+ int i = 0;
+
+ /* Forward to the LLI pointed to by clli */
+ while ((clli != (u32) &(llis_bus[i])) &&
+ (i < MAX_NUM_TSFR_LLIS))
+ i++;
+
+ while (clli) {
+ bytes += get_bytes_in_cctl(llis_va[i].cctl);
+ /*
+ * A clli of 0x00000000 will terminate the
+ * LLI list
+ */
+ clli = llis_va[i].next;
+ i++;
+ }
+ }
+ }
+
+ /* Sum up all queued transactions */
+ if (!list_empty(&plchan->desc_list)) {
+ list_for_each_entry(txdi, &plchan->desc_list, node) {
+ bytes += txdi->len;
+ }
+
+ }
+
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ return bytes;
+}
+
+/*
+ * Allocate a physical channel for a virtual channel
+ */
+static struct pl08x_phy_chan *
+pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
+ struct pl08x_dma_chan *virt_chan)
+{
+ struct pl08x_phy_chan *ch = NULL;
+ unsigned long flags;
+ int i;
+
+ /*
+ * Try to locate a physical channel to be used for
+ * this transfer. If all are taken return NULL and
+ * the requester will have to cope by using some fallback
+ * PIO mode or retrying later.
+ */
+ for (i = 0; i < pl08x->vd->channels; i++) {
+ ch = &pl08x->phy_chans[i];
+
+ spin_lock_irqsave(&ch->lock, flags);
+
+ if (!ch->serving) {
+ ch->serving = virt_chan;
+ ch->signal = -1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ if (i == pl08x->vd->channels) {
+ /* No physical channel available, cope with it */
+ return NULL;
+ }
+
+ return ch;
+}
+
+static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *ch)
+{
+ unsigned long flags;
+
+ /* Stop the channel and clear its interrupts */
+ pl08x_stop_phy_chan(ch);
+ writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
+ writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+
+ /* Mark it as free */
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->serving = NULL;
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+/*
+ * LLI handling
+ */
+
+static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
+{
+ switch (coded) {
+ case PL080_WIDTH_8BIT:
+ return 1;
+ case PL080_WIDTH_16BIT:
+ return 2;
+ case PL080_WIDTH_32BIT:
+ return 4;
+ default:
+ break;
+ }
+ BUG();
+ return 0;
+}
+
+static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
+ u32 tsize)
+{
+ u32 retbits = cctl;
+
+ /* Remove all src, dst and transfersize bits */
+ retbits &= ~PL080_CONTROL_DWIDTH_MASK;
+ retbits &= ~PL080_CONTROL_SWIDTH_MASK;
+ retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+ /* Then set the bits according to the parameters */
+ switch (srcwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ switch (dstwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
+ return retbits;
+}
+
+/*
+ * Autoselect a master bus to use for the transfer
+ * this prefers the destination bus if both available
+ * if fixed address on one bus the other will be chosen
+ */
+void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus,
+ struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus,
+ struct pl08x_bus_data **sbus, u32 cctl)
+{
+ if (!(cctl & PL080_CONTROL_DST_INCR)) {
+ *mbus = src_bus;
+ *sbus = dst_bus;
+ } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ } else {
+ if (dst_bus->buswidth == 4) {
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ } else if (src_bus->buswidth == 4) {
+ *mbus = src_bus;
+ *sbus = dst_bus;
+ } else if (dst_bus->buswidth == 2) {
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ } else if (src_bus->buswidth == 2) {
+ *mbus = src_bus;
+ *sbus = dst_bus;
+ } else {
+ /* src_bus->buswidth == 1 */
+ *mbus = dst_bus;
+ *sbus = src_bus;
+ }
+ }
+}
+
+/*
+ * Fills in one LLI for a certain transfer descriptor
+ * and advance the counter
+ */
+int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd, int num_llis, int len,
+ u32 cctl, u32 *remainder)
+{
+ struct lli *llis_va = txd->llis_va;
+ struct lli *llis_bus = (struct lli *) txd->llis_bus;
+
+ BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
+
+ llis_va[num_llis].cctl = cctl;
+ llis_va[num_llis].src = txd->srcbus.addr;
+ llis_va[num_llis].dst = txd->dstbus.addr;
+
+ /*
+ * On versions with dual masters, you can optionally AND on
+ * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
+ * in new LLIs with that controller, but we always try to
+ * choose AHB1 to point into memory. The idea is to have AHB2
+ * fixed on the peripheral and AHB1 messing around in the
+ * memory. So we don't manipulate this bit currently.
+ */
+
+ llis_va[num_llis].next =
+ (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
+
+ if (cctl & PL080_CONTROL_SRC_INCR)
+ txd->srcbus.addr += len;
+ if (cctl & PL080_CONTROL_DST_INCR)
+ txd->dstbus.addr += len;
+
+ *remainder -= len;
+
+ return num_llis + 1;
+}
+
+/*
+ * Return number of bytes to fill to boundary, or len
+ */
+static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
+{
+ u32 boundary;
+
+ boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
+ << PL08X_BOUNDARY_SHIFT;
+
+ if (boundary < addr + len)
+ return boundary - addr;
+ else
+ return len;
+}
+
+/*
+ * This fills in the table of LLIs for the transfer descriptor
+ * Note that we assume we never have to change the burst sizes
+ * Return 0 for error
+ */
+static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd)
+{
+ struct pl08x_channel_data *cd = txd->cd;
+ struct pl08x_bus_data *mbus, *sbus;
+ u32 remainder;
+ int num_llis = 0;
+ u32 cctl;
+ int max_bytes_per_lli;
+ int total_bytes = 0;
+ struct lli *llis_va;
+ struct lli *llis_bus;
+
+ if (!txd) {
+ dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
+ return 0;
+ }
+
+ txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
+ &txd->llis_bus);
+ if (!txd->llis_va) {
+ dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
+ return 0;
+ }
+
+ pl08x->pool_ctr++;
+
+ /*
+ * Initialize bus values for this transfer
+ * from the passed optimal values
+ */
+ if (!cd) {
+ dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
+ return 0;
+ }
+
+ /* Get the default CCTL from the platform data */
+ cctl = cd->cctl;
+
+ /*
+ * On the PL080 we have two bus masters and we
+ * should select one for source and one for
+ * destination. We try to use AHB2 for the
+ * bus which does not increment (typically the
+ * peripheral) else we just choose something.
+ */
+ cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
+ if (pl08x->vd->dualmaster) {
+ if (cctl & PL080_CONTROL_SRC_INCR)
+ /* Source increments, use AHB2 for destination */
+ cctl |= PL080_CONTROL_DST_AHB2;
+ else if (cctl & PL080_CONTROL_DST_INCR)
+ /* Destination increments, use AHB2 for source */
+ cctl |= PL080_CONTROL_SRC_AHB2;
+ else
+ /* Just pick something, source AHB1 dest AHB2 */
+ cctl |= PL080_CONTROL_DST_AHB2;
+ }
+
+ /* Find maximum width of the source bus */
+ txd->srcbus.maxwidth =
+ pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
+ PL080_CONTROL_SWIDTH_SHIFT);
+
+ /* Find maximum width of the destination bus */
+ txd->dstbus.maxwidth =
+ pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
+ PL080_CONTROL_DWIDTH_SHIFT);
+
+ /* Set up the bus widths to the maximum */
+ txd->srcbus.buswidth = txd->srcbus.maxwidth;
+ txd->dstbus.buswidth = txd->dstbus.maxwidth;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
+ __func__, txd->srcbus.buswidth, txd->dstbus.buswidth);
+
+
+ /*
+ * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
+ */
+ max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) *
+ PL080_CONTROL_TRANSFER_SIZE_MASK;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s max bytes per lli = %d\n",
+ __func__, max_bytes_per_lli);
+
+ /* We need to count this down to zero */
+ remainder = txd->len;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s remainder = %d\n",
+ __func__, remainder);
+
+ /*
+ * Choose bus to align to
+ * - prefers destination bus if both available
+ * - if fixed address on one bus chooses other
+ * - modifies cctl to choose an apropriate master
+ */
+ pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
+ &mbus, &sbus, cctl);
+
+
+ /*
+ * The lowest bit of the LLI register
+ * is also used to indicate which master to
+ * use for reading the LLIs.
+ */
+
+ if (txd->len < mbus->buswidth) {
+ /*
+ * Less than a bus width available
+ * - send as single bytes
+ */
+ while (remainder) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s single byte LLIs for a transfer of "
+ "less than a bus width (remain %08x)\n",
+ __func__, remainder);
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ num_llis =
+ pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
+ cctl, &remainder);
+ total_bytes++;
+ }
+ } else {
+ /*
+ * Make one byte LLIs until master bus is aligned
+ * - slave will then be aligned also
+ */
+ while ((mbus->addr) % (mbus->buswidth)) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s adjustment lli for less than bus width "
+ "(remain %08x)\n",
+ __func__, remainder);
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ num_llis = pl08x_fill_lli_for_desc
+ (pl08x, txd, num_llis, 1, cctl, &remainder);
+ total_bytes++;
+ }
+
+ /*
+ * Master now aligned
+ * - if slave is not then we must set its width down
+ */
+ if (sbus->addr % sbus->buswidth) {
+ dev_dbg(&pl08x->adev->dev,
+ "%s set down bus width to one byte\n",
+ __func__);
+
+ sbus->buswidth = 1;
+ }
+
+ /*
+ * Make largest possible LLIs until less than one bus
+ * width left
+ */
+ while (remainder > (mbus->buswidth - 1)) {
+ int lli_len, target_len;
+ int tsize;
+ int odd_bytes;
+
+ /*
+ * If enough left try to send max possible,
+ * otherwise try to send the remainder
+ */
+ target_len = remainder;
+ if (remainder > max_bytes_per_lli)
+ target_len = max_bytes_per_lli;
+
+ /*
+ * Set bus lengths for incrementing busses
+ * to number of bytes which fill to next memory
+ * boundary
+ */
+ if (cctl & PL080_CONTROL_SRC_INCR)
+ txd->srcbus.fill_bytes =
+ pl08x_pre_boundary(
+ txd->srcbus.addr,
+ remainder);
+ else
+ txd->srcbus.fill_bytes =
+ max_bytes_per_lli;
+
+ if (cctl & PL080_CONTROL_DST_INCR)
+ txd->dstbus.fill_bytes =
+ pl08x_pre_boundary(
+ txd->dstbus.addr,
+ remainder);
+ else
+ txd->dstbus.fill_bytes =
+ max_bytes_per_lli;
+
+ /*
+ * Find the nearest
+ */
+ lli_len = min(txd->srcbus.fill_bytes,
+ txd->dstbus.fill_bytes);
+
+ BUG_ON(lli_len > remainder);
+
+ if (lli_len <= 0) {
+ dev_err(&pl08x->adev->dev,
+ "%s lli_len is %d, <= 0\n",
+ __func__, lli_len);
+ return 0;
+ }
+
+ if (lli_len == target_len) {
+ /*
+ * Can send what we wanted
+ */
+ /*
+ * Maintain alignment
+ */
+ lli_len = (lli_len/mbus->buswidth) *
+ mbus->buswidth;
+ odd_bytes = 0;
+ } else {
+ /*
+ * So now we know how many bytes to transfer
+ * to get to the nearest boundary
+ * The next lli will past the boundary
+ * - however we may be working to a boundary
+ * on the slave bus
+ * We need to ensure the master stays aligned
+ */
+ odd_bytes = lli_len % mbus->buswidth;
+ /*
+ * - and that we are working in multiples
+ * of the bus widths
+ */
+ lli_len -= odd_bytes;
+
+ }
+
+ if (lli_len) {
+ /*
+ * Check against minimum bus alignment:
+ * Calculate actual transfer size in relation
+ * to bus width an get a maximum remainder of
+ * the smallest bus width - 1
+ */
+ /* FIXME: use round_down()? */
+ tsize = lli_len / min(mbus->buswidth,
+ sbus->buswidth);
+ lli_len = tsize * min(mbus->buswidth,
+ sbus->buswidth);
+
+ if (target_len != lli_len) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n",
+ __func__, target_len, lli_len, txd->len);
+ }
+
+ cctl = pl08x_cctl_bits(cctl,
+ txd->srcbus.buswidth,
+ txd->dstbus.buswidth,
+ tsize);
+
+ dev_vdbg(&pl08x->adev->dev,
+ "%s fill lli with single lli chunk of size %08x (remainder %08x)\n",
+ __func__, lli_len, remainder);
+ num_llis = pl08x_fill_lli_for_desc(pl08x, txd,
+ num_llis, lli_len, cctl,
+ &remainder);
+ total_bytes += lli_len;
+ }
+
+
+ if (odd_bytes) {
+ /*
+ * Creep past the boundary,
+ * maintaining master alignment
+ */
+ int j;
+ for (j = 0; (j < mbus->buswidth)
+ && (remainder); j++) {
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ dev_vdbg(&pl08x->adev->dev,
+ "%s align with boundardy, single byte (remain %08x)\n",
+ __func__, remainder);
+ num_llis =
+ pl08x_fill_lli_for_desc(pl08x,
+ txd, num_llis, 1,
+ cctl, &remainder);
+ total_bytes++;
+ }
+ }
+ }
+
+ /*
+ * Send any odd bytes
+ */
+ if (remainder < 0) {
+ dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
+ __func__, remainder);
+ return 0;
+ }
+
+ while (remainder) {
+ cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
+ dev_vdbg(&pl08x->adev->dev,
+ "%s align with boundardy, single odd byte (remain %d)\n",
+ __func__, remainder);
+ num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis,
+ 1, cctl, &remainder);
+ total_bytes++;
+ }
+ }
+ if (total_bytes != txd->len) {
+ dev_err(&pl08x->adev->dev,
+ "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n",
+ __func__, total_bytes, txd->len);
+ return 0;
+ }
+
+ if (num_llis >= MAX_NUM_TSFR_LLIS) {
+ dev_err(&pl08x->adev->dev,
+ "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
+ __func__, (u32) MAX_NUM_TSFR_LLIS);
+ return 0;
+ }
+ /*
+ * Decide whether this is a loop or a terminated transfer
+ */
+ llis_va = txd->llis_va;
+ llis_bus = (struct lli *) txd->llis_bus;
+
+ if (cd->circular_buffer) {
+ /*
+ * Loop the circular buffer so that the next element
+ * points back to the beginning of the LLI.
+ */
+ llis_va[num_llis - 1].next =
+ (dma_addr_t)((unsigned int)&(llis_bus[0]));
+ } else {
+ /*
+ * On non-circular buffers, the final LLI terminates
+ * the LLI.
+ */
+ llis_va[num_llis - 1].next = 0;
+ /*
+ * The final LLI element shall also fire an interrupt
+ */
+ llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+ }
+
+ /* Now store the channel register values */
+ txd->csrc = llis_va[0].src;
+ txd->cdst = llis_va[0].dst;
+ if (num_llis > 1)
+ txd->clli = llis_va[0].next;
+ else
+ txd->clli = 0;
+
+ txd->cctl = llis_va[0].cctl;
+ /* ccfg will be set at physical channel allocation time */
+
+#ifdef VERBOSE_DEBUG
+ {
+ int i;
+
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n",
+ i,
+ &llis_va[i],
+ llis_va[i].src,
+ llis_va[i].dst,
+ llis_va[i].cctl,
+ llis_va[i].next
+ );
+ }
+ }
+#endif
+
+ return num_llis;
+}
+
+/* You should call this with the struct pl08x lock held */
+static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
+ struct pl08x_txd *txd)
+{
+ if (!txd)
+ dev_err(&pl08x->adev->dev,
+ "%s no descriptor to free\n",
+ __func__);
+
+ /* Free the LLI */
+ dma_pool_free(pl08x->pool, txd->llis_va,
+ txd->llis_bus);
+
+ pl08x->pool_ctr--;
+
+ kfree(txd);
+}
+
+static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
+ struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_txd *txdi = NULL;
+ struct pl08x_txd *next;
+
+ if (!list_empty(&plchan->desc_list)) {
+ list_for_each_entry_safe(txdi,
+ next, &plchan->desc_list, node) {
+ list_del(&txdi->node);
+ pl08x_free_txd(pl08x, txdi);
+ }
+
+ }
+}
+
+/*
+ * The DMA ENGINE API
+ */
+static int pl08x_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void pl08x_free_chan_resources(struct dma_chan *chan)
+{
+}
+
+/*
+ * This should be called with the channel plchan->lock held
+ */
+static int prep_phy_channel(struct pl08x_dma_chan *plchan,
+ struct pl08x_txd *txd)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_phy_chan *ch;
+ int ret;
+
+ /* Check if we already have a channel */
+ if (plchan->phychan)
+ return 0;
+
+ ch = pl08x_get_phy_channel(pl08x, plchan);
+ if (!ch) {
+ /* No physical channel available, cope with it */
+ dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+ return -EBUSY;
+ }
+
+ /*
+ * OK we have a physical channel: for memcpy() this is all we
+ * need, but for slaves the physical signals may be muxed!
+ * Can the platform allow us to use this channel?
+ */
+ if (plchan->slave &&
+ ch->signal < 0 &&
+ pl08x->pd->get_signal) {
+ ret = pl08x->pd->get_signal(plchan);
+ if (ret < 0) {
+ dev_dbg(&pl08x->adev->dev,
+ "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
+ ch->id, plchan->name);
+ /* Release physical channel & return */
+ pl08x_put_phy_channel(pl08x, ch);
+ return -EBUSY;
+ }
+ ch->signal = ret;
+ }
+
+ dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
+ ch->id,
+ ch->signal,
+ plchan->name);
+
+ plchan->phychan = ch;
+
+ return 0;
+}
+
+static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
+
+ atomic_inc(&plchan->last_issued);
+ tx->cookie = atomic_read(&plchan->last_issued);
+ /* This unlock follows the lock in the prep() function */
+ spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+
+ return tx->cookie;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *retval = NULL;
+
+ return retval;
+}
+
+/*
+ * Code accessing dma_async_is_complete() in a tight loop
+ * may give problems - could schedule where indicated.
+ * If slaves are relying on interrupts to signal completion this
+ * function must not be called with interrupts disabled
+ */
+static enum dma_status
+pl08x_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+ u32 bytesleft = 0;
+
+ last_used = atomic_read(&plchan->last_issued);
+ last_complete = plchan->lc;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS) {
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ return ret;
+ }
+
+ /*
+ * schedule(); could be inserted here
+ */
+
+ /*
+ * This cookie not complete yet
+ */
+ last_used = atomic_read(&plchan->last_issued);
+ last_complete = plchan->lc;
+
+ /* Get number of bytes left in the active transactions and queue */
+ bytesleft = pl08x_getbytes_chan(plchan);
+
+ dma_set_tx_state(txstate, last_complete, last_used,
+ bytesleft);
+
+ if (plchan->state == PL08X_CHAN_PAUSED)
+ return DMA_PAUSED;
+
+ /* Whether waiting or running, we're in progress */
+ return DMA_IN_PROGRESS;
+}
+
+/* PrimeCell DMA extension */
+struct burst_table {
+ int burstwords;
+ u32 reg;
+};
+
+static const struct burst_table burst_sizes[] = {
+ {
+ .burstwords = 256,
+ .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 128,
+ .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 64,
+ .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 32,
+ .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 16,
+ .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 8,
+ .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 4,
+ .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+ {
+ .burstwords = 1,
+ .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
+ },
+};
+
+static void dma_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_channel_data *cd = plchan->cd;
+ enum dma_slave_buswidth addr_width;
+ u32 maxburst;
+ u32 cctl = 0;
+ /* Mask out all except src and dst channel */
+ u32 ccfg = cd->ccfg & 0x000003DEU;
+ int i = 0;
+
+ /* Transfer direction */
+ plchan->runtime_direction = config->direction;
+ if (config->direction == DMA_TO_DEVICE) {
+ plchan->runtime_addr = config->dst_addr;
+ cctl |= PL080_CONTROL_SRC_INCR;
+ ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr_width = config->dst_addr_width;
+ maxburst = config->dst_maxburst;
+ } else if (config->direction == DMA_FROM_DEVICE) {
+ plchan->runtime_addr = config->src_addr;
+ cctl |= PL080_CONTROL_DST_INCR;
+ ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ addr_width = config->src_addr_width;
+ maxburst = config->src_maxburst;
+ } else {
+ dev_err(&pl08x->adev->dev,
+ "bad runtime_config: alien transfer direction\n");
+ return;
+ }
+
+ switch (addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+ (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+ (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
+ (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
+ break;
+ default:
+ dev_err(&pl08x->adev->dev,
+ "bad runtime_config: alien address width\n");
+ return;
+ }
+
+ /*
+ * Now decide on a maxburst:
+ * If this channel will only request single transfers, set
+ * this down to ONE element.
+ */
+ if (plchan->cd->single) {
+ cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
+ (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
+ } else {
+ while (i < ARRAY_SIZE(burst_sizes)) {
+ if (burst_sizes[i].burstwords <= maxburst)
+ break;
+ i++;
+ }
+ cctl |= burst_sizes[i].reg;
+ }
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ cctl &= ~PL080_CONTROL_PROT_MASK;
+ cctl |= PL080_CONTROL_PROT_SYS;
+
+ /* Modify the default channel data to fit PrimeCell request */
+ cd->cctl = cctl;
+ cd->ccfg = ccfg;
+
+ dev_dbg(&pl08x->adev->dev,
+ "configured channel %s (%s) for %s, data width %d, "
+ "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n",
+ dma_chan_name(chan), plchan->name,
+ (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ addr_width,
+ maxburst,
+ cctl, ccfg);
+}
+
+/*
+ * Slave transactions callback to the slave device to allow
+ * synchronization of slave DMA signals with the DMAC enable
+ */
+static void pl08x_issue_pending(struct dma_chan *chan)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ unsigned long flags;
+
+ spin_lock_irqsave(&plchan->lock, flags);
+ /* Something is already active */
+ if (plchan->at) {
+ spin_unlock_irqrestore(&plchan->lock, flags);
+ return;
+ }
+
+ /* Didn't get a physical channel so waiting for it ... */
+ if (plchan->state == PL08X_CHAN_WAITING)
+ return;
+
+ /* Take the first element in the queue and execute it */
+ if (!list_empty(&plchan->desc_list)) {
+ struct pl08x_txd *next;
+
+ next = list_first_entry(&plchan->desc_list,
+ struct pl08x_txd,
+ node);
+ list_del(&next->node);
+ plchan->at = next;
+ plchan->state = PL08X_CHAN_RUNNING;
+
+ /* Configure the physical channel for the active txd */
+ pl08x_config_phychan_for_txd(plchan);
+ pl08x_set_cregs(pl08x, plchan->phychan);
+ pl08x_enable_phy_chan(pl08x, plchan->phychan);
+ }
+
+ spin_unlock_irqrestore(&plchan->lock, flags);
+}
+
+static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
+ struct pl08x_txd *txd)
+{
+ int num_llis;
+ struct pl08x_driver_data *pl08x = plchan->host;
+ int ret;
+
+ num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
+
+ if (!num_llis)
+ return -EINVAL;
+
+ spin_lock_irqsave(&plchan->lock, plchan->lockflags);
+
+ /*
+ * If this device is not using a circular buffer then
+ * queue this new descriptor for transfer.
+ * The descriptor for a circular buffer continues
+ * to be used until the channel is freed.
+ */
+ if (txd->cd->circular_buffer)
+ dev_err(&pl08x->adev->dev,
+ "%s attempting to queue a circular buffer\n",
+ __func__);
+ else
+ list_add_tail(&txd->node,
+ &plchan->desc_list);
+
+ /*
+ * See if we already have a physical channel allocated,
+ * else this is the time to try to get one.
+ */
+ ret = prep_phy_channel(plchan, txd);
+ if (ret) {
+ /*
+ * No physical channel available, we will
+ * stack up the memcpy channels until there is a channel
+ * available to handle it whereas slave transfers may
+ * have been denied due to platform channel muxing restrictions
+ * and since there is no guarantee that this will ever be
+ * resolved, and since the signal must be aquired AFTER
+ * aquiring the physical channel, we will let them be NACK:ed
+ * with -EBUSY here. The drivers can alway retry the prep()
+ * call if they are eager on doing this using DMA.
+ */
+ if (plchan->slave) {
+ pl08x_free_txd_list(pl08x, plchan);
+ spin_unlock_irqrestore(&plchan->lock, plchan->lockflags);
+ return -EBUSY;
+ }
+ /* Do this memcpy whenever there is a channel ready */
+ plchan->state = PL08X_CHAN_WAITING;
+ plchan->waiting = txd;
+ } else
+ /*
+ * Else we're all set, paused and ready to roll,
+ * status will switch to PL08X_CHAN_RUNNING when
+ * we call issue_pending(). If there is something
+ * running on the channel already we don't change
+ * its state.
+ */
+ if (plchan->state == PL08X_CHAN_IDLE)
+ plchan->state = PL08X_CHAN_PAUSED;
+
+ /*
+ * Notice that we leave plchan->lock locked on purpose:
+ * it will be unlocked in the subsequent tx_submit()
+ * call. This is a consequence of the current API.
+ */
+
+ return 0;
+}
+
+/*
+ * Initialize a descriptor to be used by memcpy submit
+ */
+static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret;
+
+ txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ if (!txd) {
+ dev_err(&pl08x->adev->dev,
+ "%s no memory for descriptor\n", __func__);
+ return NULL;
+ }
+
+ dma_async_tx_descriptor_init(&txd->tx, chan);
+ txd->direction = DMA_NONE;
+ txd->srcbus.addr = src;
+ txd->dstbus.addr = dest;
+
+ /* Set platform data for m2m */
+ txd->cd = &pl08x->pd->memcpy_channel;
+ /* Both to be incremented or the code will break */
+ txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+ txd->tx.tx_submit = pl08x_tx_submit;
+ txd->tx.callback = NULL;
+ txd->tx.callback_param = NULL;
+ txd->len = len;
+
+ INIT_LIST_HEAD(&txd->node);
+ ret = pl08x_prep_channel_resources(plchan, txd);
+ if (ret)
+ return NULL;
+ /*
+ * NB: the channel lock is held at this point so tx_submit()
+ * must be called in direct succession.
+ */
+
+ return &txd->tx;
+}
+
+struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret;
+
+ /*
+ * Current implementation ASSUMES only one sg
+ */
+ if (sg_len != 1) {
+ dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
+ __func__);
+ BUG();
+ }
+
+ dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+ __func__, sgl->length, plchan->name);
+
+ txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ if (!txd) {
+ dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
+ return NULL;
+ }
+
+ dma_async_tx_descriptor_init(&txd->tx, chan);
+
+ if (direction != plchan->runtime_direction)
+ dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
+ "the direction configured for the PrimeCell\n",
+ __func__);
+
+ /*
+ * Set up addresses, the PrimeCell configured address
+ * will take precedence since this may configure the
+ * channel target address dynamically at runtime.
+ */
+ txd->direction = direction;
+ if (direction == DMA_TO_DEVICE) {
+ txd->srcbus.addr = sgl->dma_address;
+ if (plchan->runtime_addr)
+ txd->dstbus.addr = plchan->runtime_addr;
+ else
+ txd->dstbus.addr = plchan->cd->addr;
+ } else if (direction == DMA_FROM_DEVICE) {
+ if (plchan->runtime_addr)
+ txd->srcbus.addr = plchan->runtime_addr;
+ else
+ txd->srcbus.addr = plchan->cd->addr;
+ txd->dstbus.addr = sgl->dma_address;
+ } else {
+ dev_err(&pl08x->adev->dev,
+ "%s direction unsupported\n", __func__);
+ return NULL;
+ }
+ txd->cd = plchan->cd;
+ txd->tx.tx_submit = pl08x_tx_submit;
+ txd->tx.callback = NULL;
+ txd->tx.callback_param = NULL;
+ txd->len = sgl->length;
+ INIT_LIST_HEAD(&txd->node);
+
+ ret = pl08x_prep_channel_resources(plchan, txd);
+ if (ret)
+ return NULL;
+ /*
+ * NB: the channel lock is held at this point so tx_submit()
+ * must be called in direct succession.
+ */
+
+ return &txd->tx;
+}
+
+static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Controls applicable to inactive channels */
+ if (cmd == DMA_SLAVE_CONFIG) {
+ dma_set_runtime_config(chan,
+ (struct dma_slave_config *)
+ arg);
+ return 0;
+ }
+
+ /*
+ * Anything succeeds on channels with no physical allocation and
+ * no queued transfers.
+ */
+ spin_lock_irqsave(&plchan->lock, flags);
+ if (!plchan->phychan && !plchan->at) {
+ spin_unlock_irqrestore(&plchan->lock, flags);
+ return 0;
+ }
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ plchan->state = PL08X_CHAN_IDLE;
+
+ if (plchan->phychan) {
+ pl08x_stop_phy_chan(plchan->phychan);
+
+ /*
+ * Mark physical channel as free and free any slave
+ * signal
+ */
+ if ((plchan->phychan->signal >= 0) &&
+ pl08x->pd->put_signal) {
+ pl08x->pd->put_signal(plchan);
+ plchan->phychan->signal = -1;
+ }
+ pl08x_put_phy_channel(pl08x, plchan->phychan);
+ plchan->phychan = NULL;
+ }
+ /* Stop any pending tasklet */
+ tasklet_disable(&plchan->tasklet);
+ /* Dequeue jobs and free LLIs */
+ if (plchan->at) {
+ pl08x_free_txd(pl08x, plchan->at);
+ plchan->at = NULL;
+ }
+ /* Dequeue jobs not yet fired as well */
+ pl08x_free_txd_list(pl08x, plchan);
+ break;
+ case DMA_PAUSE:
+ pl08x_pause_phy_chan(plchan->phychan);
+ plchan->state = PL08X_CHAN_PAUSED;
+ break;
+ case DMA_RESUME:
+ pl08x_resume_phy_chan(plchan->phychan);
+ plchan->state = PL08X_CHAN_RUNNING;
+ break;
+ default:
+ /* Unknown command */
+ ret = -ENXIO;
+ break;
+ }
+
+ spin_unlock_irqrestore(&plchan->lock, flags);
+
+ return ret;
+}
+
+bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ char *name = chan_id;
+
+ /* Check that the channel is not taken! */
+ if (!strcmp(plchan->name, name))
+ return true;
+
+ return false;
+}
+
+/*
+ * Just check that the device is there and active
+ * TODO: turn this bit on/off depending on the number of
+ * physical channels actually used, if it is zero... well
+ * shut it off. That will save some power. Cut the clock
+ * at the same time.
+ */
+static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
+{
+ u32 val;
+
+ val = readl(pl08x->base + PL080_CONFIG);
+ val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
+ /* We implictly clear bit 1 and that means little-endian mode */
+ val |= PL080_CONFIG_ENABLE;
+ writel(val, pl08x->base + PL080_CONFIG);
+}
+
+static void pl08x_tasklet(unsigned long data)
+{
+ struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
+ struct pl08x_phy_chan *phychan = plchan->phychan;
+ struct pl08x_driver_data *pl08x = plchan->host;
+
+ if (!plchan)
+ BUG();
+
+ spin_lock(&plchan->lock);
+
+ if (plchan->at) {
+ dma_async_tx_callback callback =
+ plchan->at->tx.callback;
+ void *callback_param =
+ plchan->at->tx.callback_param;
+
+ /*
+ * Update last completed
+ */
+ plchan->lc =
+ (plchan->at->tx.cookie);
+
+ /*
+ * Callback to signal completion
+ */
+ if (callback)
+ callback(callback_param);
+
+ /*
+ * Device callbacks should NOT clear
+ * the current transaction on the channel
+ * Linus: sometimes they should?
+ */
+ if (!plchan->at)
+ BUG();
+
+ /*
+ * Free the descriptor if it's not for a device
+ * using a circular buffer
+ */
+ if (!plchan->at->cd->circular_buffer) {
+ pl08x_free_txd(pl08x, plchan->at);
+ plchan->at = NULL;
+ }
+ /*
+ * else descriptor for circular
+ * buffers only freed when
+ * client has disabled dma
+ */
+ }
+ /*
+ * If a new descriptor is queued, set it up
+ * plchan->at is NULL here
+ */
+ if (!list_empty(&plchan->desc_list)) {
+ struct pl08x_txd *next;
+
+ next = list_first_entry(&plchan->desc_list,
+ struct pl08x_txd,
+ node);
+ list_del(&next->node);
+ plchan->at = next;
+ /* Configure the physical channel for the next txd */
+ pl08x_config_phychan_for_txd(plchan);
+ pl08x_set_cregs(pl08x, plchan->phychan);
+ pl08x_enable_phy_chan(pl08x, plchan->phychan);
+ } else {
+ struct pl08x_dma_chan *waiting = NULL;
+
+ /*
+ * No more jobs, so free up the physical channel
+ * Free any allocated signal on slave transfers too
+ */
+ if ((phychan->signal >= 0) && pl08x->pd->put_signal) {
+ pl08x->pd->put_signal(plchan);
+ phychan->signal = -1;
+ }
+ pl08x_put_phy_channel(pl08x, phychan);
+ plchan->phychan = NULL;
+ plchan->state = PL08X_CHAN_IDLE;
+
+ /*
+ * And NOW before anyone else can grab that free:d
+ * up physical channel, see if there is some memcpy
+ * pending that seriously needs to start because of
+ * being stacked up while we were choking the
+ * physical channels with data.
+ */
+ list_for_each_entry(waiting, &pl08x->memcpy.channels,
+ chan.device_node) {
+ if (waiting->state == PL08X_CHAN_WAITING &&
+ waiting->waiting != NULL) {
+ int ret;
+
+ /* This should REALLY not fail now */
+ ret = prep_phy_channel(waiting,
+ waiting->waiting);
+ BUG_ON(ret);
+ waiting->state = PL08X_CHAN_RUNNING;
+ waiting->waiting = NULL;
+ pl08x_issue_pending(&waiting->chan);
+ break;
+ }
+ }
+ }
+
+ spin_unlock(&plchan->lock);
+}
+
+static irqreturn_t pl08x_irq(int irq, void *dev)
+{
+ struct pl08x_driver_data *pl08x = dev;
+ u32 mask = 0;
+ u32 val;
+ int i;
+
+ val = readl(pl08x->base + PL080_ERR_STATUS);
+ if (val) {
+ /*
+ * An error interrupt (on one or more channels)
+ */
+ dev_err(&pl08x->adev->dev,
+ "%s error interrupt, register value 0x%08x\n",
+ __func__, val);
+ /*
+ * Simply clear ALL PL08X error interrupts,
+ * regardless of channel and cause
+ * FIXME: should be 0x00000003 on PL081 really.
+ */
+ writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+ }
+ val = readl(pl08x->base + PL080_INT_STATUS);
+ for (i = 0; i < pl08x->vd->channels; i++) {
+ if ((1 << i) & val) {
+ /* Locate physical channel */
+ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
+ struct pl08x_dma_chan *plchan = phychan->serving;
+
+ /* Schedule tasklet on this channel */
+ tasklet_schedule(&plchan->tasklet);
+
+ mask |= (1 << i);
+ }
+ }
+ /*
+ * Clear only the terminal interrupts on channels we processed
+ */
+ writel(mask, pl08x->base + PL080_TC_CLEAR);
+
+ return mask ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * Initialise the DMAC memcpy/slave channels.
+ * Make a local wrapper to hold required data
+ */
+static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
+ struct dma_device *dmadev,
+ unsigned int channels,
+ bool slave)
+{
+ struct pl08x_dma_chan *chan;
+ int i;
+
+ INIT_LIST_HEAD(&dmadev->channels);
+ /*
+ * Register as many many memcpy as we have physical channels,
+ * we won't always be able to use all but the code will have
+ * to cope with that situation.
+ */
+ for (i = 0; i < channels; i++) {
+ chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(&pl08x->adev->dev,
+ "%s no memory for channel\n", __func__);
+ return -ENOMEM;
+ }
+
+ chan->host = pl08x;
+ chan->state = PL08X_CHAN_IDLE;
+
+ if (slave) {
+ chan->slave = true;
+ chan->name = pl08x->pd->slave_channels[i].bus_id;
+ chan->cd = &pl08x->pd->slave_channels[i];
+ } else {
+ chan->cd = &pl08x->pd->memcpy_channel;
+ chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
+ if (!chan->name) {
+ kfree(chan);
+ return -ENOMEM;
+ }
+ }
+ dev_info(&pl08x->adev->dev,
+ "initialize virtual channel \"%s\"\n",
+ chan->name);
+
+ chan->chan.device = dmadev;
+ atomic_set(&chan->last_issued, 0);
+ chan->lc = atomic_read(&chan->last_issued);
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->desc_list);
+ tasklet_init(&chan->tasklet, pl08x_tasklet,
+ (unsigned long) chan);
+
+ list_add_tail(&chan->chan.device_node, &dmadev->channels);
+ }
+ dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
+ i, slave ? "slave" : "memcpy");
+ return i;
+}
+
+static void pl08x_free_virtual_channels(struct dma_device *dmadev)
+{
+ struct pl08x_dma_chan *chan = NULL;
+ struct pl08x_dma_chan *next;
+
+ list_for_each_entry_safe(chan,
+ next, &dmadev->channels, chan.device_node) {
+ list_del(&chan->chan.device_node);
+ kfree(chan);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
+{
+ switch (state) {
+ case PL08X_CHAN_IDLE:
+ return "idle";
+ case PL08X_CHAN_RUNNING:
+ return "running";
+ case PL08X_CHAN_PAUSED:
+ return "paused";
+ case PL08X_CHAN_WAITING:
+ return "waiting";
+ default:
+ break;
+ }
+ return "UNKNOWN STATE";
+}
+
+static int pl08x_debugfs_show(struct seq_file *s, void *data)
+{
+ struct pl08x_driver_data *pl08x = s->private;
+ struct pl08x_dma_chan *chan;
+ struct pl08x_phy_chan *ch;
+ unsigned long flags;
+ int i;
+
+ seq_printf(s, "PL08x physical channels:\n");
+ seq_printf(s, "CHANNEL:\tUSER:\n");
+ seq_printf(s, "--------\t-----\n");
+ for (i = 0; i < pl08x->vd->channels; i++) {
+ struct pl08x_dma_chan *virt_chan;
+
+ ch = &pl08x->phy_chans[i];
+
+ spin_lock_irqsave(&ch->lock, flags);
+ virt_chan = ch->serving;
+
+ seq_printf(s, "%d\t\t%s\n",
+ ch->id, virt_chan ? virt_chan->name : "(none)");
+
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ seq_printf(s, "\nPL08x virtual memcpy channels:\n");
+ seq_printf(s, "CHANNEL:\tSTATE:\n");
+ seq_printf(s, "--------\t------\n");
+ list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+ seq_printf(s, "%s\t\t\%s\n", chan->name,
+ pl08x_state_str(chan->state));
+ }
+
+ seq_printf(s, "\nPL08x virtual slave channels:\n");
+ seq_printf(s, "CHANNEL:\tSTATE:\n");
+ seq_printf(s, "--------\t------\n");
+ list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+ seq_printf(s, "%s\t\t\%s\n", chan->name,
+ pl08x_state_str(chan->state));
+ }
+
+ return 0;
+}
+
+static int pl08x_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pl08x_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations pl08x_debugfs_operations = {
+ .open = pl08x_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
+{
+ /* Expose a simple debugfs interface to view all clocks */
+ (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
+ NULL, pl08x,
+ &pl08x_debugfs_operations);
+}
+
+#else
+static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
+{
+}
+#endif
+
+static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
+{
+ struct pl08x_driver_data *pl08x;
+ struct vendor_data *vd = id->data;
+ int ret = 0;
+ int i;
+
+ ret = amba_request_regions(adev, NULL);
+ if (ret)
+ return ret;
+
+ /* Create the driver state holder */
+ pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
+ if (!pl08x) {
+ ret = -ENOMEM;
+ goto out_no_pl08x;
+ }
+
+ /* Initialize memcpy engine */
+ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
+ pl08x->memcpy.dev = &adev->dev;
+ pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
+ pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
+ pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
+ pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
+ pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
+ pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
+ pl08x->memcpy.device_control = pl08x_control;
+
+ /* Initialize slave engine */
+ dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ pl08x->slave.dev = &adev->dev;
+ pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
+ pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
+ pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
+ pl08x->slave.device_tx_status = pl08x_dma_tx_status;
+ pl08x->slave.device_issue_pending = pl08x_issue_pending;
+ pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_control = pl08x_control;
+
+ /* Get the platform data */
+ pl08x->pd = dev_get_platdata(&adev->dev);
+ if (!pl08x->pd) {
+ dev_err(&adev->dev, "no platform data supplied\n");
+ goto out_no_platdata;
+ }
+
+ /* Assign useful pointers to the driver state */
+ pl08x->adev = adev;
+ pl08x->vd = vd;
+
+ /* A DMA memory pool for LLIs, align on 1-byte boundary */
+ pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
+ PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+ if (!pl08x->pool) {
+ ret = -ENOMEM;
+ goto out_no_lli_pool;
+ }
+
+ spin_lock_init(&pl08x->lock);
+
+ pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (!pl08x->base) {
+ ret = -ENOMEM;
+ goto out_no_ioremap;
+ }
+
+ /* Turn on the PL08x */
+ pl08x_ensure_on(pl08x);
+
+ /*
+ * Attach the interrupt handler
+ */
+ writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+ writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
+
+ ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
+ vd->name, pl08x);
+ if (ret) {
+ dev_err(&adev->dev, "%s failed to request interrupt %d\n",
+ __func__, adev->irq[0]);
+ goto out_no_irq;
+ }
+
+ /* Initialize physical channels */
+ pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
+ GFP_KERNEL);
+ if (!pl08x->phy_chans) {
+ dev_err(&adev->dev, "%s failed to allocate "
+ "physical channel holders\n",
+ __func__);
+ goto out_no_phychans;
+ }
+
+ for (i = 0; i < vd->channels; i++) {
+ struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
+
+ ch->id = i;
+ ch->base = pl08x->base + PL080_Cx_BASE(i);
+ spin_lock_init(&ch->lock);
+ ch->serving = NULL;
+ ch->signal = -1;
+ dev_info(&adev->dev,
+ "physical channel %d is %s\n", i,
+ pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
+ }
+
+ /* Register as many memcpy channels as there are physical channels */
+ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
+ pl08x->vd->channels, false);
+ if (ret <= 0) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to enumerate memcpy channels - %d\n",
+ __func__, ret);
+ goto out_no_memcpy;
+ }
+ pl08x->memcpy.chancnt = ret;
+
+ /* Register slave channels */
+ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
+ pl08x->pd->num_slave_channels,
+ true);
+ if (ret <= 0) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to enumerate slave channels - %d\n",
+ __func__, ret);
+ goto out_no_slave;
+ }
+ pl08x->slave.chancnt = ret;
+
+ ret = dma_async_device_register(&pl08x->memcpy);
+ if (ret) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to register memcpy as an async device - %d\n",
+ __func__, ret);
+ goto out_no_memcpy_reg;
+ }
+
+ ret = dma_async_device_register(&pl08x->slave);
+ if (ret) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to register slave as an async device - %d\n",
+ __func__, ret);
+ goto out_no_slave_reg;
+ }
+
+ amba_set_drvdata(adev, pl08x);
+ init_pl08x_debugfs(pl08x);
+ dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n",
+ vd->name, adev->res.start);
+ return 0;
+
+out_no_slave_reg:
+ dma_async_device_unregister(&pl08x->memcpy);
+out_no_memcpy_reg:
+ pl08x_free_virtual_channels(&pl08x->slave);
+out_no_slave:
+ pl08x_free_virtual_channels(&pl08x->memcpy);
+out_no_memcpy:
+ kfree(pl08x->phy_chans);
+out_no_phychans:
+ free_irq(adev->irq[0], pl08x);
+out_no_irq:
+ iounmap(pl08x->base);
+out_no_ioremap:
+ dma_pool_destroy(pl08x->pool);
+out_no_lli_pool:
+out_no_platdata:
+ kfree(pl08x);
+out_no_pl08x:
+ amba_release_regions(adev);
+ return ret;
+}
+
+/* PL080 has 8 channels and the PL080 have just 2 */
+static struct vendor_data vendor_pl080 = {
+ .name = "PL080",
+ .channels = 8,
+ .dualmaster = true,
+};
+
+static struct vendor_data vendor_pl081 = {
+ .name = "PL081",
+ .channels = 2,
+ .dualmaster = false,
+};
+
+static struct amba_id pl08x_ids[] = {
+ /* PL080 */
+ {
+ .id = 0x00041080,
+ .mask = 0x000fffff,
+ .data = &vendor_pl080,
+ },
+ /* PL081 */
+ {
+ .id = 0x00041081,
+ .mask = 0x000fffff,
+ .data = &vendor_pl081,
+ },
+ /* Nomadik 8815 PL080 variant */
+ {
+ .id = 0x00280880,
+ .mask = 0x00ffffff,
+ .data = &vendor_pl080,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl08x_amba_driver = {
+ .drv.name = DRIVER_NAME,
+ .id_table = pl08x_ids,
+ .probe = pl08x_probe,
+};
+
+static int __init pl08x_init(void)
+{
+ int retval;
+ retval = amba_driver_register(&pl08x_amba_driver);
+ if (retval)
+ printk(KERN_WARNING DRIVER_NAME
+ "failed to register as an amba device (%d)\n",
+ retval);
+ return retval;
+}
+subsys_initcall(pl08x_init);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index ae2b8714d190..a6656834f0ff 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
-subsys_initcall(coh901318_init);
+arch_initcall(coh901318_init);
void __exit coh901318_exit(void)
{
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9d31d5eb95c1..8bcb15fb959d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -690,8 +690,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
+ BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
+ !device->device_prep_dma_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg);
+ BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
+ !device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
@@ -702,7 +706,7 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(!device->dev);
/* note: this only matters in the
- * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
+ * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
@@ -976,7 +980,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
tx->chan = chan;
- #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock);
#endif
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index cea08bed9cf9..286c3ac6bdcc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -35,9 +35,10 @@
#include <linux/dmapool.h>
#include <linux/of_platform.h>
-#include <asm/fsldma.h>
#include "fsldma.h"
+static const char msg_ld_oom[] = "No free memory for link descriptor\n";
+
static void dma_init(struct fsldma_chan *chan)
{
/* Reset the channel */
@@ -499,7 +500,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev, "No free memory for link descriptor\n");
+ dev_err(chan->dev, msg_ld_oom);
return NULL;
}
@@ -536,8 +537,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(chan->dev,
- "No free memory for link descriptor\n");
+ dev_err(chan->dev, msg_ld_oom);
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
@@ -583,223 +583,205 @@ fail:
return NULL;
}
-/**
- * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: DMAEngine flags
- *
- * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
- * DMA_SLAVE API, this gets the device-specific information from the
- * chan->private variable.
- */
-static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
- struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
{
- struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
- struct fsl_dma_slave *slave;
- size_t copy;
-
- int i;
- struct scatterlist *sg;
- size_t sg_used;
- size_t hw_used;
- struct fsl_dma_hw_addr *hw;
- dma_addr_t dma_dst, dma_src;
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ size_t dst_avail, src_avail;
+ dma_addr_t dst, src;
+ size_t len;
- if (!dchan)
+ /* basic sanity checks */
+ if (dst_nents == 0 || src_nents == 0)
return NULL;
- if (!dchan->private)
+ if (dst_sg == NULL || src_sg == NULL)
return NULL;
- chan = to_fsl_chan(dchan);
- slave = dchan->private;
+ /*
+ * TODO: should we check that both scatterlists have the same
+ * TODO: number of bytes in total? Is that really an error?
+ */
- if (list_empty(&slave->addresses))
- return NULL;
+ /* get prepared for the loop */
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
- hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
- hw_used = 0;
+ /* run until we are out of scatterlist entries */
+ while (true) {
- /*
- * Build the hardware transaction to copy from the scatterlist to
- * the hardware, or from the hardware to the scatterlist
- *
- * If you are copying from the hardware to the scatterlist and it
- * takes two hardware entries to fill an entire page, then both
- * hardware entries will be coalesced into the same page
- *
- * If you are copying from the scatterlist to the hardware and a
- * single page can fill two hardware entries, then the data will
- * be read out of the page into the first hardware entry, and so on
- */
- for_each_sg(sgl, sg, sg_len, i) {
- sg_used = 0;
-
- /* Loop until the entire scatterlist entry is used */
- while (sg_used < sg_dma_len(sg)) {
-
- /*
- * If we've used up the current hardware address/length
- * pair, we need to load a new one
- *
- * This is done in a while loop so that descriptors with
- * length == 0 will be skipped
- */
- while (hw_used >= hw->length) {
-
- /*
- * If the current hardware entry is the last
- * entry in the list, we're finished
- */
- if (list_is_last(&hw->entry, &slave->addresses))
- goto finished;
-
- /* Get the next hardware address/length pair */
- hw = list_entry(hw->entry.next,
- struct fsl_dma_hw_addr, entry);
- hw_used = 0;
- }
-
- /* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(chan);
- if (!new) {
- dev_err(chan->dev, "No free memory for "
- "link descriptor\n");
- goto fail;
- }
+ /* create the largest transaction possible */
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
+ if (len == 0)
+ goto fetch;
+
+ dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
+ src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
+
+ /* allocate and populate the descriptor */
+ new = fsl_dma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, msg_ld_oom);
+ goto fail;
+ }
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the DMA controller limit
- */
- copy = min_t(size_t, sg_dma_len(sg) - sg_used,
- hw->length - hw_used);
- copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
-
- /*
- * DMA_FROM_DEVICE
- * from the hardware to the scatterlist
- *
- * DMA_TO_DEVICE
- * from the scatterlist to the hardware
- */
- if (direction == DMA_FROM_DEVICE) {
- dma_src = hw->address + hw_used;
- dma_dst = sg_dma_address(sg) + sg_used;
- } else {
- dma_src = sg_dma_address(sg) + sg_used;
- dma_dst = hw->address + hw_used;
- }
-
- /* Fill in the descriptor */
- set_desc_cnt(chan, &new->hw, copy);
- set_desc_src(chan, &new->hw, dma_src);
- set_desc_dst(chan, &new->hw, dma_dst);
-
- /*
- * If this is not the first descriptor, chain the
- * current descriptor after the previous descriptor
- */
- if (!first) {
- first = new;
- } else {
- set_desc_next(chan, &prev->hw,
- new->async_tx.phys);
- }
-
- new->async_tx.cookie = 0;
- async_tx_ack(&new->async_tx);
-
- prev = new;
- sg_used += copy;
- hw_used += copy;
-
- /* Insert the link descriptor into the LD ring */
- list_add_tail(&new->node, &first->tx_list);
- }
- }
+ set_desc_cnt(chan, &new->hw, len);
+ set_desc_src(chan, &new->hw, src);
+ set_desc_dst(chan, &new->hw, dst);
-finished:
+ if (!first)
+ first = new;
+ else
+ set_desc_next(chan, &prev->hw, new->async_tx.phys);
- /* All of the hardware address/length pairs had length == 0 */
- if (!first || !new)
- return NULL;
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+ prev = new;
- new->async_tx.flags = flags;
- new->async_tx.cookie = -EBUSY;
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
- /* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(chan, new);
+ /* update metadata */
+ dst_avail -= len;
+ src_avail -= len;
+
+fetch:
+ /* fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+
+ /* no more entries: we're done */
+ if (dst_nents == 0)
+ break;
+
+ /* fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+
+ dst_nents--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
- /* Enable extra controller features */
- if (chan->set_src_loop_size)
- chan->set_src_loop_size(chan, slave->src_loop_size);
+ /* fetch the next src scatterlist entry */
+ if (src_avail == 0) {
- if (chan->set_dst_loop_size)
- chan->set_dst_loop_size(chan, slave->dst_loop_size);
+ /* no more entries: we're done */
+ if (src_nents == 0)
+ break;
- if (chan->toggle_ext_start)
- chan->toggle_ext_start(chan, slave->external_start);
+ /* fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
- if (chan->toggle_ext_pause)
- chan->toggle_ext_pause(chan, slave->external_pause);
+ src_nents--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
- if (chan->set_request_count)
- chan->set_request_count(chan, slave->request_count);
+ new->async_tx.flags = flags; /* client is in control of this ack */
+ new->async_tx.cookie = -EBUSY;
+
+ /* Set End-of-link to the last link descriptor of new list */
+ set_ld_eol(chan, new);
return &first->async_tx;
fail:
- /* If first was not set, then we failed to allocate the very first
- * descriptor, and we're done */
if (!first)
return NULL;
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
+ return NULL;
+}
+
+/**
+ * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: DMAEngine flags
+ *
+ * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
+ * DMA_SLAVE API, this gets the device-specific information from the
+ * chan->private variable.
+ */
+static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
/*
- * First is set, so all of the descriptors we allocated have been added
- * to first->tx_list, INCLUDING "first" itself. Therefore we
- * must traverse the list backwards freeing each descriptor in turn
+ * This operation is not supported on the Freescale DMA controller
*
- * We're re-using variables for the loop, oh well
+ * However, we need to provide the function pointer to allow the
+ * device_control() method to work.
*/
- fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
static int fsl_dma_device_control(struct dma_chan *dchan,
enum dma_ctrl_cmd cmd, unsigned long arg)
{
+ struct dma_slave_config *config;
struct fsldma_chan *chan;
unsigned long flags;
-
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ int size;
if (!dchan)
return -EINVAL;
chan = to_fsl_chan(dchan);
- /* Halt the DMA engine */
- dma_halt(chan);
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ /* Halt the DMA engine */
+ dma_halt(chan);
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- /* Remove and free all of the descriptors in the LD queue */
- fsldma_free_desc_list(chan, &chan->ld_pending);
- fsldma_free_desc_list(chan, &chan->ld_running);
+ /* Remove and free all of the descriptors in the LD queue */
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ return 0;
+
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+
+ /* make sure the channel supports setting burst size */
+ if (!chan->set_request_count)
+ return -ENXIO;
+
+ /* we set the controller burst size depending on direction */
+ if (config->direction == DMA_TO_DEVICE)
+ size = config->dst_addr_width * config->dst_maxburst;
+ else
+ size = config->src_addr_width * config->src_maxburst;
+
+ chan->set_request_count(chan, size);
+ return 0;
+
+ case FSLDMA_EXTERNAL_START:
+
+ /* make sure the channel supports external start */
+ if (!chan->toggle_ext_start)
+ return -ENXIO;
+
+ chan->toggle_ext_start(chan, arg);
+ return 0;
+
+ default:
+ return -ENXIO;
+ }
return 0;
}
@@ -1327,11 +1309,13 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
+ dma_cap_set(DMA_SG, fdev->common.cap_mask);
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
+ fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
new file mode 100644
index 000000000000..f629e4961af5
--- /dev/null
+++ b/drivers/dma/imx-dma.c
@@ -0,0 +1,424 @@
+/*
+ * drivers/dma/imx-dma.c
+ *
+ * This file contains a driver for the Freescale i.MX DMA engine
+ * found on i.MX1/21/27
+ *
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+
+#include <asm/irq.h>
+#include <mach/dma-v1.h>
+#include <mach/hardware.h>
+
+struct imxdma_channel {
+ struct imxdma_engine *imxdma;
+ unsigned int channel;
+ unsigned int imxdma_channel;
+
+ enum dma_slave_buswidth word_size;
+ dma_addr_t per_address;
+ u32 watermark_level;
+ struct dma_chan chan;
+ spinlock_t lock;
+ struct dma_async_tx_descriptor desc;
+ dma_cookie_t last_completed;
+ enum dma_status status;
+ int dma_request;
+ struct scatterlist *sg_list;
+};
+
+#define MAX_DMA_CHANNELS 8
+
+struct imxdma_engine {
+ struct device *dev;
+ struct dma_device dma_device;
+ struct imxdma_channel channel[MAX_DMA_CHANNELS];
+};
+
+static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct imxdma_channel, chan);
+}
+
+static void imxdma_handle(struct imxdma_channel *imxdmac)
+{
+ if (imxdmac->desc.callback)
+ imxdmac->desc.callback(imxdmac->desc.callback_param);
+ imxdmac->last_completed = imxdmac->desc.cookie;
+}
+
+static void imxdma_irq_handler(int channel, void *data)
+{
+ struct imxdma_channel *imxdmac = data;
+
+ imxdmac->status = DMA_SUCCESS;
+ imxdma_handle(imxdmac);
+}
+
+static void imxdma_err_handler(int channel, void *data, int error)
+{
+ struct imxdma_channel *imxdmac = data;
+
+ imxdmac->status = DMA_ERROR;
+ imxdma_handle(imxdmac);
+}
+
+static void imxdma_progression(int channel, void *data,
+ struct scatterlist *sg)
+{
+ struct imxdma_channel *imxdmac = data;
+
+ imxdmac->status = DMA_SUCCESS;
+ imxdma_handle(imxdmac);
+}
+
+static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct dma_slave_config *dmaengine_cfg = (void *)arg;
+ int ret;
+ unsigned int mode = 0;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ imxdmac->status = DMA_ERROR;
+ imx_dma_disable(imxdmac->imxdma_channel);
+ return 0;
+ case DMA_SLAVE_CONFIG:
+ if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ imxdmac->per_address = dmaengine_cfg->src_addr;
+ imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
+ imxdmac->word_size = dmaengine_cfg->src_addr_width;
+ } else {
+ imxdmac->per_address = dmaengine_cfg->dst_addr;
+ imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+ imxdmac->word_size = dmaengine_cfg->dst_addr_width;
+ }
+
+ switch (imxdmac->word_size) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ mode = IMX_DMA_MEMSIZE_8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ mode = IMX_DMA_MEMSIZE_16;
+ break;
+ default:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ mode = IMX_DMA_MEMSIZE_32;
+ break;
+ }
+ ret = imx_dma_config_channel(imxdmac->imxdma_channel,
+ mode | IMX_DMA_TYPE_FIFO,
+ IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+ imxdmac->dma_request, 1);
+
+ if (ret)
+ return ret;
+
+ imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
+
+ return 0;
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+static enum dma_status imxdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ dma_cookie_t last_used;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
+ dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
+
+ return ret;
+}
+
+static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
+{
+ dma_cookie_t cookie = imxdma->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ imxdma->chan.cookie = cookie;
+ imxdma->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_irq(&imxdmac->lock);
+
+ cookie = imxdma_assign_cookie(imxdmac);
+
+ imx_dma_enable(imxdmac->imxdma_channel);
+
+ spin_unlock_irq(&imxdmac->lock);
+
+ return cookie;
+}
+
+static int imxdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imx_dma_data *data = chan->private;
+
+ imxdmac->dma_request = data->dma_request;
+
+ dma_async_tx_descriptor_init(&imxdmac->desc, chan);
+ imxdmac->desc.tx_submit = imxdma_tx_submit;
+ /* txd.flags will be overwritten in prep funcs */
+ imxdmac->desc.flags = DMA_CTRL_ACK;
+
+ imxdmac->status = DMA_SUCCESS;
+
+ return 0;
+}
+
+static void imxdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+
+ imx_dma_disable(imxdmac->imxdma_channel);
+
+ if (imxdmac->sg_list) {
+ kfree(imxdmac->sg_list);
+ imxdmac->sg_list = NULL;
+ }
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct scatterlist *sg;
+ int i, ret, dma_length = 0;
+ unsigned int dmamode;
+
+ if (imxdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+
+ imxdmac->status = DMA_IN_PROGRESS;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_length += sg->length;
+ }
+
+ if (direction == DMA_FROM_DEVICE)
+ dmamode = DMA_MODE_READ;
+ else
+ dmamode = DMA_MODE_WRITE;
+
+ ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
+ dma_length, imxdmac->per_address, dmamode);
+ if (ret)
+ return NULL;
+
+ return &imxdmac->desc;
+}
+
+static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+ int i, ret;
+ unsigned int periods = buf_len / period_len;
+ unsigned int dmamode;
+
+ dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
+ __func__, imxdmac->channel, buf_len, period_len);
+
+ if (imxdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+ imxdmac->status = DMA_IN_PROGRESS;
+
+ ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
+ imxdma_progression);
+ if (ret) {
+ dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
+ return NULL;
+ }
+
+ if (imxdmac->sg_list)
+ kfree(imxdmac->sg_list);
+
+ imxdmac->sg_list = kcalloc(periods + 1,
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!imxdmac->sg_list)
+ return NULL;
+
+ sg_init_table(imxdmac->sg_list, periods);
+
+ for (i = 0; i < periods; i++) {
+ imxdmac->sg_list[i].page_link = 0;
+ imxdmac->sg_list[i].offset = 0;
+ imxdmac->sg_list[i].dma_address = dma_addr;
+ imxdmac->sg_list[i].length = period_len;
+ dma_addr += period_len;
+ }
+
+ /* close the loop */
+ imxdmac->sg_list[periods].offset = 0;
+ imxdmac->sg_list[periods].length = 0;
+ imxdmac->sg_list[periods].page_link =
+ ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
+
+ if (direction == DMA_FROM_DEVICE)
+ dmamode = DMA_MODE_READ;
+ else
+ dmamode = DMA_MODE_WRITE;
+
+ ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
+ IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
+ if (ret)
+ return NULL;
+
+ return &imxdmac->desc;
+}
+
+static void imxdma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * Nothing to do. We only have a single descriptor
+ */
+}
+
+static int __init imxdma_probe(struct platform_device *pdev)
+{
+ struct imxdma_engine *imxdma;
+ int ret, i;
+
+ imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
+ if (!imxdma)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&imxdma->dma_device.channels);
+
+ /* Initialize channel parameters */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+ imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
+ DMA_PRIO_MEDIUM);
+ if ((int)imxdmac->channel < 0) {
+ ret = -ENODEV;
+ goto err_init;
+ }
+
+ imx_dma_setup_handlers(imxdmac->imxdma_channel,
+ imxdma_irq_handler, imxdma_err_handler, imxdmac);
+
+ imxdmac->imxdma = imxdma;
+ spin_lock_init(&imxdmac->lock);
+
+ dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
+ imxdmac->chan.device = &imxdma->dma_device;
+ imxdmac->chan.chan_id = i;
+ imxdmac->channel = i;
+
+ /* Add the channel to the DMAC list */
+ list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
+ }
+
+ imxdma->dev = &pdev->dev;
+ imxdma->dma_device.dev = &pdev->dev;
+
+ imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
+ imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
+ imxdma->dma_device.device_tx_status = imxdma_tx_status;
+ imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
+ imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
+ imxdma->dma_device.device_control = imxdma_control;
+ imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
+
+ platform_set_drvdata(pdev, imxdma);
+
+ ret = dma_async_device_register(&imxdma->dma_device);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto err_init;
+ }
+
+ return 0;
+
+err_init:
+ while (i-- >= 0) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+ imx_dma_free(imxdmac->imxdma_channel);
+ }
+
+ kfree(imxdma);
+ return ret;
+}
+
+static int __exit imxdma_remove(struct platform_device *pdev)
+{
+ struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
+ int i;
+
+ dma_async_device_unregister(&imxdma->dma_device);
+
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+ imx_dma_free(imxdmac->imxdma_channel);
+ }
+
+ kfree(imxdma);
+
+ return 0;
+}
+
+static struct platform_driver imxdma_driver = {
+ .driver = {
+ .name = "imx-dma",
+ },
+ .remove = __exit_p(imxdma_remove),
+};
+
+static int __init imxdma_module_init(void)
+{
+ return platform_driver_probe(&imxdma_driver, imxdma_probe);
+}
+subsys_initcall(imxdma_module_init);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX dma driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
new file mode 100644
index 000000000000..0834323a0599
--- /dev/null
+++ b/drivers/dma/imx-sdma.c
@@ -0,0 +1,1392 @@
+/*
+ * drivers/dma/imx-sdma.c
+ *
+ * This file contains a driver for the Freescale Smart DMA engine
+ *
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * Based on code from Freescale:
+ *
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+
+#include <asm/irq.h>
+#include <mach/sdma.h>
+#include <mach/dma.h>
+#include <mach/hardware.h>
+
+/* SDMA registers */
+#define SDMA_H_C0PTR 0x000
+#define SDMA_H_INTR 0x004
+#define SDMA_H_STATSTOP 0x008
+#define SDMA_H_START 0x00c
+#define SDMA_H_EVTOVR 0x010
+#define SDMA_H_DSPOVR 0x014
+#define SDMA_H_HOSTOVR 0x018
+#define SDMA_H_EVTPEND 0x01c
+#define SDMA_H_DSPENBL 0x020
+#define SDMA_H_RESET 0x024
+#define SDMA_H_EVTERR 0x028
+#define SDMA_H_INTRMSK 0x02c
+#define SDMA_H_PSW 0x030
+#define SDMA_H_EVTERRDBG 0x034
+#define SDMA_H_CONFIG 0x038
+#define SDMA_ONCE_ENB 0x040
+#define SDMA_ONCE_DATA 0x044
+#define SDMA_ONCE_INSTR 0x048
+#define SDMA_ONCE_STAT 0x04c
+#define SDMA_ONCE_CMD 0x050
+#define SDMA_EVT_MIRROR 0x054
+#define SDMA_ILLINSTADDR 0x058
+#define SDMA_CHN0ADDR 0x05c
+#define SDMA_ONCE_RTB 0x060
+#define SDMA_XTRIG_CONF1 0x070
+#define SDMA_XTRIG_CONF2 0x074
+#define SDMA_CHNENBL0_V2 0x200
+#define SDMA_CHNENBL0_V1 0x080
+#define SDMA_CHNPRI_0 0x100
+
+/*
+ * Buffer descriptor status values.
+ */
+#define BD_DONE 0x01
+#define BD_WRAP 0x02
+#define BD_CONT 0x04
+#define BD_INTR 0x08
+#define BD_RROR 0x10
+#define BD_LAST 0x20
+#define BD_EXTD 0x80
+
+/*
+ * Data Node descriptor status values.
+ */
+#define DND_END_OF_FRAME 0x80
+#define DND_END_OF_XFER 0x40
+#define DND_DONE 0x20
+#define DND_UNUSED 0x01
+
+/*
+ * IPCV2 descriptor status values.
+ */
+#define BD_IPCV2_END_OF_FRAME 0x40
+
+#define IPCV2_MAX_NODES 50
+/*
+ * Error bit set in the CCB status field by the SDMA,
+ * in setbd routine, in case of a transfer error
+ */
+#define DATA_ERROR 0x10000000
+
+/*
+ * Buffer descriptor commands.
+ */
+#define C0_ADDR 0x01
+#define C0_LOAD 0x02
+#define C0_DUMP 0x03
+#define C0_SETCTX 0x07
+#define C0_GETCTX 0x03
+#define C0_SETDM 0x01
+#define C0_SETPM 0x04
+#define C0_GETDM 0x02
+#define C0_GETPM 0x08
+/*
+ * Change endianness indicator in the BD command field
+ */
+#define CHANGE_ENDIANNESS 0x80
+
+/*
+ * Mode/Count of data node descriptors - IPCv2
+ */
+struct sdma_mode_count {
+ u32 count : 16; /* size of the buffer pointed by this BD */
+ u32 status : 8; /* E,R,I,C,W,D status bits stored here */
+ u32 command : 8; /* command mostlky used for channel 0 */
+};
+
+/*
+ * Buffer descriptor
+ */
+struct sdma_buffer_descriptor {
+ struct sdma_mode_count mode;
+ u32 buffer_addr; /* address of the buffer described */
+ u32 ext_buffer_addr; /* extended buffer address */
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_channel_control - Channel control Block
+ *
+ * @current_bd_ptr current buffer descriptor processed
+ * @base_bd_ptr first element of buffer descriptor array
+ * @unused padding. The SDMA engine expects an array of 128 byte
+ * control blocks
+ */
+struct sdma_channel_control {
+ u32 current_bd_ptr;
+ u32 base_bd_ptr;
+ u32 unused[2];
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_state_registers - SDMA context for a channel
+ *
+ * @pc: program counter
+ * @t: test bit: status of arithmetic & test instruction
+ * @rpc: return program counter
+ * @sf: source fault while loading data
+ * @spc: loop start program counter
+ * @df: destination fault while storing data
+ * @epc: loop end program counter
+ * @lm: loop mode
+ */
+struct sdma_state_registers {
+ u32 pc :14;
+ u32 unused1: 1;
+ u32 t : 1;
+ u32 rpc :14;
+ u32 unused0: 1;
+ u32 sf : 1;
+ u32 spc :14;
+ u32 unused2: 1;
+ u32 df : 1;
+ u32 epc :14;
+ u32 lm : 2;
+} __attribute__ ((packed));
+
+/**
+ * struct sdma_context_data - sdma context specific to a channel
+ *
+ * @channel_state: channel state bits
+ * @gReg: general registers
+ * @mda: burst dma destination address register
+ * @msa: burst dma source address register
+ * @ms: burst dma status register
+ * @md: burst dma data register
+ * @pda: peripheral dma destination address register
+ * @psa: peripheral dma source address register
+ * @ps: peripheral dma status register
+ * @pd: peripheral dma data register
+ * @ca: CRC polynomial register
+ * @cs: CRC accumulator register
+ * @dda: dedicated core destination address register
+ * @dsa: dedicated core source address register
+ * @ds: dedicated core status register
+ * @dd: dedicated core data register
+ */
+struct sdma_context_data {
+ struct sdma_state_registers channel_state;
+ u32 gReg[8];
+ u32 mda;
+ u32 msa;
+ u32 ms;
+ u32 md;
+ u32 pda;
+ u32 psa;
+ u32 ps;
+ u32 pd;
+ u32 ca;
+ u32 cs;
+ u32 dda;
+ u32 dsa;
+ u32 ds;
+ u32 dd;
+ u32 scratch0;
+ u32 scratch1;
+ u32 scratch2;
+ u32 scratch3;
+ u32 scratch4;
+ u32 scratch5;
+ u32 scratch6;
+ u32 scratch7;
+} __attribute__ ((packed));
+
+#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
+
+struct sdma_engine;
+
+/**
+ * struct sdma_channel - housekeeping for a SDMA channel
+ *
+ * @sdma pointer to the SDMA engine for this channel
+ * @channel the channel number, matches dmaengine chan_id
+ * @direction transfer type. Needed for setting SDMA script
+ * @peripheral_type Peripheral type. Needed for setting SDMA script
+ * @event_id0 aka dma request line
+ * @event_id1 for channels that use 2 events
+ * @word_size peripheral access size
+ * @buf_tail ID of the buffer that was processed
+ * @done channel completion
+ * @num_bd max NUM_BD. number of descriptors currently handling
+ */
+struct sdma_channel {
+ struct sdma_engine *sdma;
+ unsigned int channel;
+ enum dma_data_direction direction;
+ enum sdma_peripheral_type peripheral_type;
+ unsigned int event_id0;
+ unsigned int event_id1;
+ enum dma_slave_buswidth word_size;
+ unsigned int buf_tail;
+ struct completion done;
+ unsigned int num_bd;
+ struct sdma_buffer_descriptor *bd;
+ dma_addr_t bd_phys;
+ unsigned int pc_from_device, pc_to_device;
+ unsigned long flags;
+ dma_addr_t per_address;
+ u32 event_mask0, event_mask1;
+ u32 watermark_level;
+ u32 shp_addr, per_addr;
+ struct dma_chan chan;
+ spinlock_t lock;
+ struct dma_async_tx_descriptor desc;
+ dma_cookie_t last_completed;
+ enum dma_status status;
+};
+
+#define IMX_DMA_SG_LOOP (1 << 0)
+
+#define MAX_DMA_CHANNELS 32
+#define MXC_SDMA_DEFAULT_PRIORITY 1
+#define MXC_SDMA_MIN_PRIORITY 1
+#define MXC_SDMA_MAX_PRIORITY 7
+
+/**
+ * struct sdma_script_start_addrs - SDMA script start pointers
+ *
+ * start addresses of the different functions in the physical
+ * address space of the SDMA engine.
+ */
+struct sdma_script_start_addrs {
+ u32 ap_2_ap_addr;
+ u32 ap_2_bp_addr;
+ u32 ap_2_ap_fixed_addr;
+ u32 bp_2_ap_addr;
+ u32 loopback_on_dsp_side_addr;
+ u32 mcu_interrupt_only_addr;
+ u32 firi_2_per_addr;
+ u32 firi_2_mcu_addr;
+ u32 per_2_firi_addr;
+ u32 mcu_2_firi_addr;
+ u32 uart_2_per_addr;
+ u32 uart_2_mcu_addr;
+ u32 per_2_app_addr;
+ u32 mcu_2_app_addr;
+ u32 per_2_per_addr;
+ u32 uartsh_2_per_addr;
+ u32 uartsh_2_mcu_addr;
+ u32 per_2_shp_addr;
+ u32 mcu_2_shp_addr;
+ u32 ata_2_mcu_addr;
+ u32 mcu_2_ata_addr;
+ u32 app_2_per_addr;
+ u32 app_2_mcu_addr;
+ u32 shp_2_per_addr;
+ u32 shp_2_mcu_addr;
+ u32 mshc_2_mcu_addr;
+ u32 mcu_2_mshc_addr;
+ u32 spdif_2_mcu_addr;
+ u32 mcu_2_spdif_addr;
+ u32 asrc_2_mcu_addr;
+ u32 ext_mem_2_ipu_addr;
+ u32 descrambler_addr;
+ u32 dptc_dvfs_addr;
+ u32 utra_addr;
+ u32 ram_code_start_addr;
+};
+
+#define SDMA_FIRMWARE_MAGIC 0x414d4453
+
+/**
+ * struct sdma_firmware_header - Layout of the firmware image
+ *
+ * @magic "SDMA"
+ * @version_major increased whenever layout of struct sdma_script_start_addrs
+ * changes.
+ * @version_minor firmware minor version (for binary compatible changes)
+ * @script_addrs_start offset of struct sdma_script_start_addrs in this image
+ * @num_script_addrs Number of script addresses in this image
+ * @ram_code_start offset of SDMA ram image in this firmware image
+ * @ram_code_size size of SDMA ram image
+ * @script_addrs Stores the start address of the SDMA scripts
+ * (in SDMA memory space)
+ */
+struct sdma_firmware_header {
+ u32 magic;
+ u32 version_major;
+ u32 version_minor;
+ u32 script_addrs_start;
+ u32 num_script_addrs;
+ u32 ram_code_start;
+ u32 ram_code_size;
+};
+
+struct sdma_engine {
+ struct device *dev;
+ struct sdma_channel channel[MAX_DMA_CHANNELS];
+ struct sdma_channel_control *channel_control;
+ void __iomem *regs;
+ unsigned int version;
+ unsigned int num_events;
+ struct sdma_context_data *context;
+ dma_addr_t context_phys;
+ struct dma_device dma_device;
+ struct clk *clk;
+ struct sdma_script_start_addrs *script_addrs;
+};
+
+#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
+#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
+#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
+#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
+
+static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
+{
+ u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
+
+ return chnenbl0 + event * 4;
+}
+
+static int sdma_config_ownership(struct sdma_channel *sdmac,
+ bool event_override, bool mcu_override, bool dsp_override)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ u32 evt, mcu, dsp;
+
+ if (event_override && mcu_override && dsp_override)
+ return -EINVAL;
+
+ evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
+ mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
+ dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
+
+ if (dsp_override)
+ dsp &= ~(1 << channel);
+ else
+ dsp |= (1 << channel);
+
+ if (event_override)
+ evt &= ~(1 << channel);
+ else
+ evt |= (1 << channel);
+
+ if (mcu_override)
+ mcu &= ~(1 << channel);
+ else
+ mcu |= (1 << channel);
+
+ __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
+ __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
+ __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
+
+ return 0;
+}
+
+/*
+ * sdma_run_channel - run a channel and wait till it's done
+ */
+static int sdma_run_channel(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ int ret;
+
+ init_completion(&sdmac->done);
+
+ __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+
+ ret = wait_for_completion_timeout(&sdmac->done, HZ);
+
+ return ret ? 0 : -ETIMEDOUT;
+}
+
+static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
+ u32 address)
+{
+ struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ void *buf_virt;
+ dma_addr_t buf_phys;
+ int ret;
+
+ buf_virt = dma_alloc_coherent(NULL,
+ size,
+ &buf_phys, GFP_KERNEL);
+ if (!buf_virt)
+ return -ENOMEM;
+
+ bd0->mode.command = C0_SETPM;
+ bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.count = size / 2;
+ bd0->buffer_addr = buf_phys;
+ bd0->ext_buffer_addr = address;
+
+ memcpy(buf_virt, buf, size);
+
+ ret = sdma_run_channel(&sdma->channel[0]);
+
+ dma_free_coherent(NULL, size, buf_virt, buf_phys);
+
+ return ret;
+}
+
+static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ u32 val;
+ u32 chnenbl = chnenbl_ofs(sdma, event);
+
+ val = __raw_readl(sdma->regs + chnenbl);
+ val |= (1 << channel);
+ __raw_writel(val, sdma->regs + chnenbl);
+}
+
+static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ u32 chnenbl = chnenbl_ofs(sdma, event);
+ u32 val;
+
+ val = __raw_readl(sdma->regs + chnenbl);
+ val &= ~(1 << channel);
+ __raw_writel(val, sdma->regs + chnenbl);
+}
+
+static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
+{
+ struct sdma_buffer_descriptor *bd;
+
+ /*
+ * loop mode. Iterate over descriptors, re-setup them and
+ * call callback function.
+ */
+ while (1) {
+ bd = &sdmac->bd[sdmac->buf_tail];
+
+ if (bd->mode.status & BD_DONE)
+ break;
+
+ if (bd->mode.status & BD_RROR)
+ sdmac->status = DMA_ERROR;
+ else
+ sdmac->status = DMA_SUCCESS;
+
+ bd->mode.status |= BD_DONE;
+ sdmac->buf_tail++;
+ sdmac->buf_tail %= sdmac->num_bd;
+
+ if (sdmac->desc.callback)
+ sdmac->desc.callback(sdmac->desc.callback_param);
+ }
+}
+
+static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
+{
+ struct sdma_buffer_descriptor *bd;
+ int i, error = 0;
+
+ /*
+ * non loop mode. Iterate over all descriptors, collect
+ * errors and call callback function
+ */
+ for (i = 0; i < sdmac->num_bd; i++) {
+ bd = &sdmac->bd[i];
+
+ if (bd->mode.status & (BD_DONE | BD_RROR))
+ error = -EIO;
+ }
+
+ if (error)
+ sdmac->status = DMA_ERROR;
+ else
+ sdmac->status = DMA_SUCCESS;
+
+ if (sdmac->desc.callback)
+ sdmac->desc.callback(sdmac->desc.callback_param);
+ sdmac->last_completed = sdmac->desc.cookie;
+}
+
+static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+{
+ complete(&sdmac->done);
+
+ /* not interested in channel 0 interrupts */
+ if (sdmac->channel == 0)
+ return;
+
+ if (sdmac->flags & IMX_DMA_SG_LOOP)
+ sdma_handle_channel_loop(sdmac);
+ else
+ mxc_sdma_handle_channel_normal(sdmac);
+}
+
+static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+{
+ struct sdma_engine *sdma = dev_id;
+ u32 stat;
+
+ stat = __raw_readl(sdma->regs + SDMA_H_INTR);
+ __raw_writel(stat, sdma->regs + SDMA_H_INTR);
+
+ while (stat) {
+ int channel = fls(stat) - 1;
+ struct sdma_channel *sdmac = &sdma->channel[channel];
+
+ mxc_sdma_handle_channel(sdmac);
+
+ stat &= ~(1 << channel);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * sets the pc of SDMA script according to the peripheral type
+ */
+static void sdma_get_pc(struct sdma_channel *sdmac,
+ enum sdma_peripheral_type peripheral_type)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int per_2_emi = 0, emi_2_per = 0;
+ /*
+ * These are needed once we start to support transfers between
+ * two peripherals or memory-to-memory transfers
+ */
+ int per_2_per = 0, emi_2_emi = 0;
+
+ sdmac->pc_from_device = 0;
+ sdmac->pc_to_device = 0;
+
+ switch (peripheral_type) {
+ case IMX_DMATYPE_MEMORY:
+ emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
+ break;
+ case IMX_DMATYPE_DSP:
+ emi_2_per = sdma->script_addrs->bp_2_ap_addr;
+ per_2_emi = sdma->script_addrs->ap_2_bp_addr;
+ break;
+ case IMX_DMATYPE_FIRI:
+ per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
+ break;
+ case IMX_DMATYPE_UART:
+ per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+ break;
+ case IMX_DMATYPE_UART_SP:
+ per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ break;
+ case IMX_DMATYPE_ATA:
+ per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
+ break;
+ case IMX_DMATYPE_CSPI:
+ case IMX_DMATYPE_EXT:
+ case IMX_DMATYPE_SSI:
+ per_2_emi = sdma->script_addrs->app_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+ break;
+ case IMX_DMATYPE_SSI_SP:
+ case IMX_DMATYPE_MMC:
+ case IMX_DMATYPE_SDHC:
+ case IMX_DMATYPE_CSPI_SP:
+ case IMX_DMATYPE_ESAI:
+ case IMX_DMATYPE_MSHC_SP:
+ per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ break;
+ case IMX_DMATYPE_ASRC:
+ per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
+ per_2_per = sdma->script_addrs->per_2_per_addr;
+ break;
+ case IMX_DMATYPE_MSHC:
+ per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
+ break;
+ case IMX_DMATYPE_CCM:
+ per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
+ break;
+ case IMX_DMATYPE_SPDIF:
+ per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
+ break;
+ case IMX_DMATYPE_IPU_MEMORY:
+ emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
+ break;
+ default:
+ break;
+ }
+
+ sdmac->pc_from_device = per_2_emi;
+ sdmac->pc_to_device = emi_2_per;
+}
+
+static int sdma_load_context(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ int load_address;
+ struct sdma_context_data *context = sdma->context;
+ struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
+ int ret;
+
+ if (sdmac->direction == DMA_FROM_DEVICE) {
+ load_address = sdmac->pc_from_device;
+ } else {
+ load_address = sdmac->pc_to_device;
+ }
+
+ if (load_address < 0)
+ return load_address;
+
+ dev_dbg(sdma->dev, "load_address = %d\n", load_address);
+ dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
+ dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
+ dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
+ dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
+ dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+
+ memset(context, 0, sizeof(*context));
+ context->channel_state.pc = load_address;
+
+ /* Send by context the event mask,base address for peripheral
+ * and watermark level
+ */
+ context->gReg[0] = sdmac->event_mask1;
+ context->gReg[1] = sdmac->event_mask0;
+ context->gReg[2] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ context->gReg[7] = sdmac->watermark_level;
+
+ bd0->mode.command = C0_SETDM;
+ bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.count = sizeof(*context) / 4;
+ bd0->buffer_addr = sdma->context_phys;
+ bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+
+ ret = sdma_run_channel(&sdma->channel[0]);
+
+ return ret;
+}
+
+static void sdma_disable_channel(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+
+ __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
+ sdmac->status = DMA_ERROR;
+}
+
+static int sdma_config_channel(struct sdma_channel *sdmac)
+{
+ int ret;
+
+ sdma_disable_channel(sdmac);
+
+ sdmac->event_mask0 = 0;
+ sdmac->event_mask1 = 0;
+ sdmac->shp_addr = 0;
+ sdmac->per_addr = 0;
+
+ if (sdmac->event_id0) {
+ if (sdmac->event_id0 > 32)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
+ }
+
+ switch (sdmac->peripheral_type) {
+ case IMX_DMATYPE_DSP:
+ sdma_config_ownership(sdmac, false, true, true);
+ break;
+ case IMX_DMATYPE_MEMORY:
+ sdma_config_ownership(sdmac, false, true, false);
+ break;
+ default:
+ sdma_config_ownership(sdmac, true, true, false);
+ break;
+ }
+
+ sdma_get_pc(sdmac, sdmac->peripheral_type);
+
+ if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
+ (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
+ /* Handle multiple event channels differently */
+ if (sdmac->event_id1) {
+ sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
+ if (sdmac->event_id1 > 31)
+ sdmac->watermark_level |= 1 << 31;
+ sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
+ if (sdmac->event_id0 > 31)
+ sdmac->watermark_level |= 1 << 30;
+ } else {
+ sdmac->event_mask0 = 1 << sdmac->event_id0;
+ sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
+ }
+ /* Watermark Level */
+ sdmac->watermark_level |= sdmac->watermark_level;
+ /* Address */
+ sdmac->shp_addr = sdmac->per_address;
+ } else {
+ sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
+ }
+
+ ret = sdma_load_context(sdmac);
+
+ return ret;
+}
+
+static int sdma_set_channel_priority(struct sdma_channel *sdmac,
+ unsigned int priority)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+
+ if (priority < MXC_SDMA_MIN_PRIORITY
+ || priority > MXC_SDMA_MAX_PRIORITY) {
+ return -EINVAL;
+ }
+
+ __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
+
+ return 0;
+}
+
+static int sdma_request_channel(struct sdma_channel *sdmac)
+{
+ struct sdma_engine *sdma = sdmac->sdma;
+ int channel = sdmac->channel;
+ int ret = -EBUSY;
+
+ sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
+ if (!sdmac->bd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(sdmac->bd, 0, PAGE_SIZE);
+
+ sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+ clk_enable(sdma->clk);
+
+ sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
+
+ init_completion(&sdmac->done);
+
+ sdmac->buf_tail = 0;
+
+ return 0;
+out:
+
+ return ret;
+}
+
+static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
+{
+ __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+}
+
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+{
+ dma_cookie_t cookie = sdma->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ sdma->chan.cookie = cookie;
+ sdma->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct sdma_channel, chan);
+}
+
+static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ dma_cookie_t cookie;
+
+ spin_lock_irq(&sdmac->lock);
+
+ cookie = sdma_assign_cookie(sdmac);
+
+ sdma_enable_channel(sdma, tx->chan->chan_id);
+
+ spin_unlock_irq(&sdmac->lock);
+
+ return cookie;
+}
+
+static int sdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct imx_dma_data *data = chan->private;
+ int prio, ret;
+
+ /* No need to execute this for internal channel 0 */
+ if (chan->chan_id == 0)
+ return 0;
+
+ if (!data)
+ return -EINVAL;
+
+ switch (data->priority) {
+ case DMA_PRIO_HIGH:
+ prio = 3;
+ break;
+ case DMA_PRIO_MEDIUM:
+ prio = 2;
+ break;
+ case DMA_PRIO_LOW:
+ default:
+ prio = 1;
+ break;
+ }
+
+ sdmac->peripheral_type = data->peripheral_type;
+ sdmac->event_id0 = data->dma_request;
+ ret = sdma_set_channel_priority(sdmac, prio);
+ if (ret)
+ return ret;
+
+ ret = sdma_request_channel(sdmac);
+ if (ret)
+ return ret;
+
+ dma_async_tx_descriptor_init(&sdmac->desc, chan);
+ sdmac->desc.tx_submit = sdma_tx_submit;
+ /* txd.flags will be overwritten in prep funcs */
+ sdmac->desc.flags = DMA_CTRL_ACK;
+
+ return 0;
+}
+
+static void sdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+
+ sdma_disable_channel(sdmac);
+
+ if (sdmac->event_id0)
+ sdma_event_disable(sdmac, sdmac->event_id0);
+ if (sdmac->event_id1)
+ sdma_event_disable(sdmac, sdmac->event_id1);
+
+ sdmac->event_id0 = 0;
+ sdmac->event_id1 = 0;
+
+ sdma_set_channel_priority(sdmac, 0);
+
+ dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
+
+ clk_disable(sdma->clk);
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ int ret, i, count;
+ int channel = chan->chan_id;
+ struct scatterlist *sg;
+
+ if (sdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+ sdmac->status = DMA_IN_PROGRESS;
+
+ sdmac->flags = 0;
+
+ dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
+ sg_len, channel);
+
+ sdmac->direction = direction;
+ ret = sdma_load_context(sdmac);
+ if (ret)
+ goto err_out;
+
+ if (sg_len > NUM_BD) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+ channel, sg_len, NUM_BD);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ int param;
+
+ bd->buffer_addr = sgl->dma_address;
+
+ count = sg->length;
+
+ if (count > 0xffff) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
+ channel, count, 0xffff);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ bd->mode.count = count;
+
+ if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+ if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+ bd->mode.command = 0;
+ else
+ bd->mode.command = sdmac->word_size;
+
+ param = BD_DONE | BD_EXTD | BD_CONT;
+
+ if (sdmac->flags & IMX_DMA_SG_LOOP) {
+ param |= BD_INTR;
+ if (i + 1 == sg_len)
+ param |= BD_WRAP;
+ }
+
+ if (i + 1 == sg_len)
+ param |= BD_INTR;
+
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+ i, count, sg->dma_address,
+ param & BD_WRAP ? "wrap" : "",
+ param & BD_INTR ? " intr" : "");
+
+ bd->mode.status = param;
+ }
+
+ sdmac->num_bd = sg_len;
+ sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+ return &sdmac->desc;
+err_out:
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+ int num_periods = buf_len / period_len;
+ int channel = chan->chan_id;
+ int ret, i = 0, buf = 0;
+
+ dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+ if (sdmac->status == DMA_IN_PROGRESS)
+ return NULL;
+
+ sdmac->status = DMA_IN_PROGRESS;
+
+ sdmac->flags |= IMX_DMA_SG_LOOP;
+ sdmac->direction = direction;
+ ret = sdma_load_context(sdmac);
+ if (ret)
+ goto err_out;
+
+ if (num_periods > NUM_BD) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+ channel, num_periods, NUM_BD);
+ goto err_out;
+ }
+
+ if (period_len > 0xffff) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
+ channel, period_len, 0xffff);
+ goto err_out;
+ }
+
+ while (buf < buf_len) {
+ struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
+ int param;
+
+ bd->buffer_addr = dma_addr;
+
+ bd->mode.count = period_len;
+
+ if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
+ goto err_out;
+ if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+ bd->mode.command = 0;
+ else
+ bd->mode.command = sdmac->word_size;
+
+ param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
+ if (i + 1 == num_periods)
+ param |= BD_WRAP;
+
+ dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+ i, period_len, dma_addr,
+ param & BD_WRAP ? "wrap" : "",
+ param & BD_INTR ? " intr" : "");
+
+ bd->mode.status = param;
+
+ dma_addr += period_len;
+ buf += period_len;
+
+ i++;
+ }
+
+ sdmac->num_bd = num_periods;
+ sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+ return &sdmac->desc;
+err_out:
+ sdmac->status = DMA_ERROR;
+ return NULL;
+}
+
+static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct dma_slave_config *dmaengine_cfg = (void *)arg;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ sdma_disable_channel(sdmac);
+ return 0;
+ case DMA_SLAVE_CONFIG:
+ if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ sdmac->per_address = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst;
+ sdmac->word_size = dmaengine_cfg->src_addr_width;
+ } else {
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+ sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ }
+ return sdma_config_channel(sdmac);
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+static enum dma_status sdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ dma_cookie_t last_used;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
+ dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+
+ return ret;
+}
+
+static void sdma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * Nothing to do. We only have a single descriptor
+ */
+}
+
+static int __init sdma_init(struct sdma_engine *sdma,
+ void *ram_code, int ram_code_size)
+{
+ int i, ret;
+ dma_addr_t ccb_phys;
+
+ switch (sdma->version) {
+ case 1:
+ sdma->num_events = 32;
+ break;
+ case 2:
+ sdma->num_events = 48;
+ break;
+ default:
+ dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
+ return -ENODEV;
+ }
+
+ clk_enable(sdma->clk);
+
+ /* Be sure SDMA has not started yet */
+ __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
+
+ sdma->channel_control = dma_alloc_coherent(NULL,
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
+ sizeof(struct sdma_context_data),
+ &ccb_phys, GFP_KERNEL);
+
+ if (!sdma->channel_control) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ sdma->context = (void *)sdma->channel_control +
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
+ sdma->context_phys = ccb_phys +
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
+
+ /* Zero-out the CCB structures array just allocated */
+ memset(sdma->channel_control, 0,
+ MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
+
+ /* disable all channels */
+ for (i = 0; i < sdma->num_events; i++)
+ __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
+
+ /* All channels have priority 0 */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++)
+ __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
+
+ ret = sdma_request_channel(&sdma->channel[0]);
+ if (ret)
+ goto err_dma_alloc;
+
+ sdma_config_ownership(&sdma->channel[0], false, true, false);
+
+ /* Set Command Channel (Channel Zero) */
+ __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
+
+ /* Set bits of CONFIG register but with static context switching */
+ /* FIXME: Check whether to set ACR bit depending on clock ratios */
+ __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
+
+ __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
+
+ /* download the RAM image for SDMA */
+ sdma_load_script(sdma, ram_code,
+ ram_code_size,
+ sdma->script_addrs->ram_code_start_addr);
+
+ /* Set bits of CONFIG register with given context switching mode */
+ __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+
+ /* Initializes channel's priorities */
+ sdma_set_channel_priority(&sdma->channel[0], 7);
+
+ clk_disable(sdma->clk);
+
+ return 0;
+
+err_dma_alloc:
+ clk_disable(sdma->clk);
+ dev_err(sdma->dev, "initialisation failed with %d\n", ret);
+ return ret;
+}
+
+static int __init sdma_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct firmware *fw;
+ const struct sdma_firmware_header *header;
+ const struct sdma_script_start_addrs *addr;
+ int irq;
+ unsigned short *ram_code;
+ struct resource *iores;
+ struct sdma_platform_data *pdata = pdev->dev.platform_data;
+ char *fwname;
+ int i;
+ dma_cap_mask_t mask;
+ struct sdma_engine *sdma;
+
+ sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
+ if (!sdma)
+ return -ENOMEM;
+
+ sdma->dev = &pdev->dev;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!iores || irq < 0 || !pdata) {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
+ ret = -EBUSY;
+ goto err_request_region;
+ }
+
+ sdma->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdma->clk)) {
+ ret = PTR_ERR(sdma->clk);
+ goto err_clk;
+ }
+
+ sdma->regs = ioremap(iores->start, resource_size(iores));
+ if (!sdma->regs) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
+ if (ret)
+ goto err_request_irq;
+
+ fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin",
+ pdata->cpu_name, pdata->to_version);
+ if (!fwname) {
+ ret = -ENOMEM;
+ goto err_cputype;
+ }
+
+ ret = request_firmware(&fw, fwname, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n",
+ fwname, ret);
+ kfree(fwname);
+ goto err_cputype;
+ }
+ kfree(fwname);
+
+ if (fw->size < sizeof(*header))
+ goto err_firmware;
+
+ header = (struct sdma_firmware_header *)fw->data;
+
+ if (header->magic != SDMA_FIRMWARE_MAGIC)
+ goto err_firmware;
+ if (header->ram_code_start + header->ram_code_size > fw->size)
+ goto err_firmware;
+
+ addr = (void *)header + header->script_addrs_start;
+ ram_code = (void *)header + header->ram_code_start;
+ sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL);
+ if (!sdma->script_addrs)
+ goto err_firmware;
+ memcpy(sdma->script_addrs, addr, sizeof(*addr));
+
+ sdma->version = pdata->sdma_version;
+
+ INIT_LIST_HEAD(&sdma->dma_device.channels);
+ /* Initialize channel parameters */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct sdma_channel *sdmac = &sdma->channel[i];
+
+ sdmac->sdma = sdma;
+ spin_lock_init(&sdmac->lock);
+
+ dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
+ sdmac->chan.device = &sdma->dma_device;
+ sdmac->chan.chan_id = i;
+ sdmac->channel = i;
+
+ /* Add the channel to the DMAC list */
+ list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+ }
+
+ ret = sdma_init(sdma, ram_code, header->ram_code_size);
+ if (ret)
+ goto err_init;
+
+ sdma->dma_device.dev = &pdev->dev;
+
+ sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
+ sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
+ sdma->dma_device.device_tx_status = sdma_tx_status;
+ sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
+ sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
+ sdma->dma_device.device_control = sdma_control;
+ sdma->dma_device.device_issue_pending = sdma_issue_pending;
+
+ ret = dma_async_device_register(&sdma->dma_device);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto err_init;
+ }
+
+ dev_info(&pdev->dev, "initialized (firmware %d.%d)\n",
+ header->version_major,
+ header->version_minor);
+
+ /* request channel 0. This is an internal control channel
+ * to the SDMA engine and not available to clients.
+ */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_request_channel(mask, NULL, NULL);
+
+ release_firmware(fw);
+
+ return 0;
+
+err_init:
+ kfree(sdma->script_addrs);
+err_firmware:
+ release_firmware(fw);
+err_cputype:
+ free_irq(irq, sdma);
+err_request_irq:
+ iounmap(sdma->regs);
+err_ioremap:
+ clk_put(sdma->clk);
+err_clk:
+ release_mem_region(iores->start, resource_size(iores));
+err_request_region:
+err_irq:
+ kfree(sdma);
+ return 0;
+}
+
+static int __exit sdma_remove(struct platform_device *pdev)
+{
+ return -EBUSY;
+}
+
+static struct platform_driver sdma_driver = {
+ .driver = {
+ .name = "imx-sdma",
+ },
+ .remove = __exit_p(sdma_remove),
+};
+
+static int __init sdma_module_init(void)
+{
+ return platform_driver_probe(&sdma_driver, sdma_probe);
+}
+subsys_initcall(sdma_module_init);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX SDMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c2591e8d9b6e..338bc4eed1f3 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -25,6 +25,7 @@
*/
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/intel_mid_dma.h>
#define MAX_CHAN 4 /*max ch across controllers*/
@@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
int byte_width = 0, block_ts = 0;
switch (tx_width) {
- case LNW_DMA_WIDTH_8BIT:
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
byte_width = 1;
break;
- case LNW_DMA_WIDTH_16BIT:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
byte_width = 2;
break;
- case LNW_DMA_WIDTH_32BIT:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
default:
byte_width = 4;
break;
@@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
struct middma_device *mid = to_middma_device(midc->chan.device);
/* channel is idle */
- if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) {
+ if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
/*error*/
pr_err("ERR_MDMA: channel is busy in start\n");
/* The tasklet will hopefully advance the queue... */
return;
}
-
+ midc->busy = true;
/*write registers and en*/
iowrite32(first->sar, midc->ch_regs + SAR);
iowrite32(first->dar, midc->ch_regs + DAR);
+ iowrite32(first->lli_phys, midc->ch_regs + LLP);
iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
@@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
(int)first->sar, (int)first->dar, first->cfg_hi,
first->cfg_lo, first->ctl_hi, first->ctl_lo);
+ first->status = DMA_IN_PROGRESS;
iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
- first->status = DMA_IN_PROGRESS;
}
/**
@@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL;
+ struct intel_mid_dma_lli *llitem;
void *param_txd = NULL;
midc->completed = txd->cookie;
callback_txd = txd->callback;
param_txd = txd->callback_param;
- list_move(&desc->desc_node, &midc->free_list);
-
+ if (desc->lli != NULL) {
+ /*clear the DONE bit of completed LLI in memory*/
+ llitem = desc->lli + desc->current_lli;
+ llitem->ctl_hi &= CLEAR_DONE;
+ if (desc->current_lli < desc->lli_length-1)
+ (desc->current_lli)++;
+ else
+ desc->current_lli = 0;
+ }
spin_unlock_bh(&midc->lock);
if (callback_txd) {
pr_debug("MDMA: TXD callback set ... calling\n");
callback_txd(param_txd);
- spin_lock_bh(&midc->lock);
- return;
+ }
+ if (midc->raw_tfr) {
+ desc->status = DMA_SUCCESS;
+ if (desc->lli != NULL) {
+ pci_pool_free(desc->lli_pool, desc->lli,
+ desc->lli_phys);
+ pci_pool_destroy(desc->lli_pool);
+ }
+ list_move(&desc->desc_node, &midc->free_list);
+ midc->busy = false;
}
spin_lock_bh(&midc->lock);
@@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid,
/*tx is complete*/
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
- if (desc->status == DMA_IN_PROGRESS) {
- desc->status = DMA_SUCCESS;
+ if (desc->status == DMA_IN_PROGRESS)
midc_descriptor_complete(midc, desc);
- }
}
return;
-}
+ }
+/**
+ * midc_lli_fill_sg - Helper function to convert
+ * SG list to Linked List Items.
+ *@midc: Channel
+ *@desc: DMA descriptor
+ *@sglist: Pointer to SG list
+ *@sglen: SG list length
+ *@flags: DMA transaction flags
+ *
+ * Walk through the SG list and convert the SG list into Linked
+ * List Items (LLI).
+ */
+static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
+ struct intel_mid_dma_desc *desc,
+ struct scatterlist *sglist,
+ unsigned int sglen,
+ unsigned int flags)
+{
+ struct intel_mid_dma_slave *mids;
+ struct scatterlist *sg;
+ dma_addr_t lli_next, sg_phy_addr;
+ struct intel_mid_dma_lli *lli_bloc_desc;
+ union intel_mid_dma_ctl_lo ctl_lo;
+ union intel_mid_dma_ctl_hi ctl_hi;
+ int i;
+ pr_debug("MDMA: Entered midc_lli_fill_sg\n");
+ mids = midc->mid_slave;
+
+ lli_bloc_desc = desc->lli;
+ lli_next = desc->lli_phys;
+
+ ctl_lo.ctl_lo = desc->ctl_lo;
+ ctl_hi.ctl_hi = desc->ctl_hi;
+ for_each_sg(sglist, sg, sglen, i) {
+ /*Populate CTL_LOW and LLI values*/
+ if (i != sglen - 1) {
+ lli_next = lli_next +
+ sizeof(struct intel_mid_dma_lli);
+ } else {
+ /*Check for circular list, otherwise terminate LLI to ZERO*/
+ if (flags & DMA_PREP_CIRCULAR_LIST) {
+ pr_debug("MDMA: LLI is configured in circular mode\n");
+ lli_next = desc->lli_phys;
+ } else {
+ lli_next = 0;
+ ctl_lo.ctlx.llp_dst_en = 0;
+ ctl_lo.ctlx.llp_src_en = 0;
+ }
+ }
+ /*Populate CTL_HI values*/
+ ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+ desc->width,
+ midc->dma->block_size);
+ /*Populate SAR and DAR values*/
+ sg_phy_addr = sg_phys(sg);
+ if (desc->dirn == DMA_TO_DEVICE) {
+ lli_bloc_desc->sar = sg_phy_addr;
+ lli_bloc_desc->dar = mids->dma_slave.dst_addr;
+ } else if (desc->dirn == DMA_FROM_DEVICE) {
+ lli_bloc_desc->sar = mids->dma_slave.src_addr;
+ lli_bloc_desc->dar = sg_phy_addr;
+ }
+ /*Copy values into block descriptor in system memroy*/
+ lli_bloc_desc->llp = lli_next;
+ lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
+ lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
+
+ lli_bloc_desc++;
+ }
+ /*Copy very first LLI values to descriptor*/
+ desc->ctl_lo = desc->lli->ctl_lo;
+ desc->ctl_hi = desc->lli->ctl_hi;
+ desc->sar = desc->lli->sar;
+ desc->dar = desc->lli->dar;
+
+ return 0;
+}
/*****************************************************************************
DMA engine callback Functions*/
/**
@@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.cookie = cookie;
- if (list_empty(&midc->active_list)) {
- midc_dostart(midc, desc);
+ if (list_empty(&midc->active_list))
list_add_tail(&desc->desc_node, &midc->active_list);
- } else {
+ else
list_add_tail(&desc->desc_node, &midc->queue);
- }
+
+ midc_dostart(midc, desc);
spin_unlock_bh(&midc->lock);
return cookie;
@@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
return ret;
}
+static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
+{
+ struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
+ struct dma_slave_config *slave = (struct dma_slave_config *)arg;
+ struct intel_mid_dma_slave *mid_slave;
+
+ BUG_ON(!midc);
+ BUG_ON(!slave);
+ pr_debug("MDMA: slave control called\n");
+
+ mid_slave = to_intel_mid_dma_slave(slave);
+
+ BUG_ON(!mid_slave);
+
+ midc->mid_slave = mid_slave;
+ return 0;
+}
/**
* intel_mid_dma_device_control - DMA device control
* @chan: chan for DMA control
@@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc;
- LIST_HEAD(list);
+ union intel_mid_dma_cfg_lo cfg_lo;
+
+ if (cmd == DMA_SLAVE_CONFIG)
+ return dma_slave_control(chan, arg);
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
spin_lock_bh(&midc->lock);
- if (midc->in_use == false) {
+ if (midc->busy == false) {
spin_unlock_bh(&midc->lock);
return 0;
}
- list_splice_init(&midc->free_list, &list);
- midc->descs_allocated = 0;
- midc->slave = NULL;
-
+ /*Suspend and disable the channel*/
+ cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+ cfg_lo.cfgx.ch_susp = 1;
+ iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+ iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+ midc->busy = false;
/* Disable interrupts */
disable_dma_interrupt(midc);
+ midc->descs_allocated = 0;
spin_unlock_bh(&midc->lock);
- list_for_each_entry_safe(desc, _desc, &list, desc_node) {
- pr_debug("MDMA: freeing descriptor %p\n", desc);
- pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+ list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
+ if (desc->lli != NULL) {
+ pci_pool_free(desc->lli_pool, desc->lli,
+ desc->lli_phys);
+ pci_pool_destroy(desc->lli_pool);
+ }
+ list_move(&desc->desc_node, &midc->free_list);
}
return 0;
}
-/**
- * intel_mid_dma_prep_slave_sg - Prep slave sg txn
- * @chan: chan for DMA transfer
- * @sgl: scatter gather list
- * @sg_len: length of sg txn
- * @direction: DMA transfer dirtn
- * @flags: DMA flags
- *
- * Do DMA sg txn: NOT supported now
- */
-static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
- unsigned long flags)
-{
- /*not supported now*/
- return NULL;
-}
/**
* intel_mid_dma_prep_memcpy - Prep memcpy txn
@@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
union intel_mid_dma_ctl_hi ctl_hi;
union intel_mid_dma_cfg_lo cfg_lo;
union intel_mid_dma_cfg_hi cfg_hi;
- enum intel_mid_dma_width width = 0;
+ enum dma_slave_buswidth width;
pr_debug("MDMA: Prep for memcpy\n");
- WARN_ON(!chan);
+ BUG_ON(!chan);
if (!len)
return NULL;
- mids = chan->private;
- WARN_ON(!mids);
-
midc = to_intel_mid_dma_chan(chan);
- WARN_ON(!midc);
+ BUG_ON(!midc);
+
+ mids = midc->mid_slave;
+ BUG_ON(!mids);
pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
midc->dma->pci_id, midc->ch_id, len);
pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
- mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width);
+ mids->cfg_mode, mids->dma_slave.direction,
+ mids->hs_mode, mids->dma_slave.src_addr_width);
/*calculate CFG_LO*/
if (mids->hs_mode == LNW_DMA_SW_HS) {
@@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1;
- if (mids->dirn == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_TO_DEVICE) {
cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1;
- } else if (mids->dirn == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1)
@@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_HI*/
ctl_hi.ctlx.reser = 0;
- width = mids->src_width;
+ ctl_hi.ctlx.done = 0;
+ width = mids->dma_slave.src_addr_width;
ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
pr_debug("MDMA:calc len %d for block size %d\n",
@@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_LO*/
ctl_lo.ctl_lo = 0;
ctl_lo.ctlx.int_en = 1;
- ctl_lo.ctlx.dst_tr_width = mids->dst_width;
- ctl_lo.ctlx.src_tr_width = mids->src_width;
- ctl_lo.ctlx.dst_msize = mids->src_msize;
- ctl_lo.ctlx.src_msize = mids->dst_msize;
+ ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
+ ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
+ ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
+ ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
ctl_lo.ctlx.tt_fc = 0;
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0;
} else {
- if (mids->dirn == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_TO_DEVICE) {
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1;
- } else if (mids->dirn == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2;
@@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
desc->ctl_lo = ctl_lo.ctl_lo;
desc->ctl_hi = ctl_hi.ctl_hi;
desc->width = width;
- desc->dirn = mids->dirn;
+ desc->dirn = mids->dma_slave.direction;
+ desc->lli_phys = 0;
+ desc->lli = NULL;
+ desc->lli_pool = NULL;
return &desc->txd;
err_desc_get:
@@ -605,6 +712,85 @@ err_desc_get:
midc_desc_put(midc, desc);
return NULL;
}
+/**
+ * intel_mid_dma_prep_slave_sg - Prep slave sg txn
+ * @chan: chan for DMA transfer
+ * @sgl: scatter gather list
+ * @sg_len: length of sg txn
+ * @direction: DMA transfer dirtn
+ * @flags: DMA flags
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct intel_mid_dma_chan *midc = NULL;
+ struct intel_mid_dma_slave *mids = NULL;
+ struct intel_mid_dma_desc *desc = NULL;
+ struct dma_async_tx_descriptor *txd = NULL;
+ union intel_mid_dma_ctl_lo ctl_lo;
+
+ pr_debug("MDMA: Prep for slave SG\n");
+
+ if (!sg_len) {
+ pr_err("MDMA: Invalid SG length\n");
+ return NULL;
+ }
+ midc = to_intel_mid_dma_chan(chan);
+ BUG_ON(!midc);
+
+ mids = midc->mid_slave;
+ BUG_ON(!mids);
+
+ if (!midc->dma->pimr_mask) {
+ pr_debug("MDMA: SG list is not supported by this controller\n");
+ return NULL;
+ }
+
+ pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
+ sg_len, direction, flags);
+
+ txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+ if (NULL == txd) {
+ pr_err("MDMA: Prep memcpy failed\n");
+ return NULL;
+ }
+ desc = to_intel_mid_dma_desc(txd);
+ desc->dirn = direction;
+ ctl_lo.ctl_lo = desc->ctl_lo;
+ ctl_lo.ctlx.llp_dst_en = 1;
+ ctl_lo.ctlx.llp_src_en = 1;
+ desc->ctl_lo = ctl_lo.ctl_lo;
+ desc->lli_length = sg_len;
+ desc->current_lli = 0;
+ /* DMA coherent memory pool for LLI descriptors*/
+ desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
+ midc->dma->pdev,
+ (sizeof(struct intel_mid_dma_lli)*sg_len),
+ 32, 0);
+ if (NULL == desc->lli_pool) {
+ pr_err("MID_DMA:LLI pool create failed\n");
+ return NULL;
+ }
+
+ desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
+ if (!desc->lli) {
+ pr_err("MID_DMA: LLI alloc failed\n");
+ pci_pool_destroy(desc->lli_pool);
+ return NULL;
+ }
+
+ midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
+ if (flags & DMA_PREP_INTERRUPT) {
+ iowrite32(UNMASK_INTR_REG(midc->ch_id),
+ midc->dma_base + MASK_BLOCK);
+ pr_debug("MDMA:Enabled Block interrupt\n");
+ }
+ return &desc->txd;
+}
/**
* intel_mid_dma_free_chan_resources - Frees dma resources
@@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc;
- if (true == midc->in_use) {
+ if (true == midc->busy) {
/*trying to free ch in use!!!!!*/
pr_err("ERR_MDMA: trying to free ch in use\n");
}
-
+ pm_runtime_put(&mid->pdev->dev);
spin_lock_bh(&midc->lock);
midc->descs_allocated = 0;
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
}
spin_unlock_bh(&midc->lock);
midc->in_use = false;
+ midc->busy = false;
/* Disable CH interrupts */
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
@@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
dma_addr_t phys;
int i = 0;
+ pm_runtime_get_sync(&mid->pdev->dev);
+
+ if (mid->state == SUSPENDED) {
+ if (dma_resume(mid->pdev)) {
+ pr_err("ERR_MDMA: resume failed");
+ return -EFAULT;
+ }
+ }
/* ASSERT: channel is idle */
if (test_ch_en(mid->dma_base, midc->ch_id)) {
/*ch is not idle*/
pr_err("ERR_MDMA: ch not idle\n");
+ pm_runtime_put(&mid->pdev->dev);
return -EIO;
}
midc->completed = chan->cookie = 1;
@@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
if (!desc) {
pr_err("ERR_MDMA: desc failed\n");
+ pm_runtime_put(&mid->pdev->dev);
return -ENOMEM;
/*check*/
}
@@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &midc->free_list);
}
spin_unlock_bh(&midc->lock);
- midc->in_use = false;
+ midc->in_use = true;
+ midc->busy = false;
pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
return i;
}
@@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data)
{
struct middma_device *mid = NULL;
struct intel_mid_dma_chan *midc = NULL;
- u32 status;
+ u32 status, raw_tfr, raw_block;
int i;
mid = (struct middma_device *)data;
@@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data)
return;
}
pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
- status = ioread32(mid->dma_base + RAW_TFR);
- pr_debug("MDMA:RAW_TFR %x\n", status);
+ raw_tfr = ioread32(mid->dma_base + RAW_TFR);
+ raw_block = ioread32(mid->dma_base + RAW_BLOCK);
+ status = raw_tfr | raw_block;
status &= mid->intr_mask;
while (status) {
/*txn interrupt*/
@@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data)
}
pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
status, midc->ch_id, i);
+ midc->raw_tfr = raw_tfr;
+ midc->raw_block = raw_block;
+ spin_lock_bh(&midc->lock);
/*clearing this interrupts first*/
iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
- iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
-
- spin_lock_bh(&midc->lock);
+ if (raw_block) {
+ iowrite32((1 << midc->ch_id),
+ mid->dma_base + CLEAR_BLOCK);
+ }
midc_scan_descriptors(mid, midc);
pr_debug("MDMA:Scan of desc... complete, unmasking\n");
iowrite32(UNMASK_INTR_REG(midc->ch_id),
mid->dma_base + MASK_TFR);
+ if (raw_block) {
+ iowrite32(UNMASK_INTR_REG(midc->ch_id),
+ mid->dma_base + MASK_BLOCK);
+ }
spin_unlock_bh(&midc->lock);
}
@@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data)
static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
{
struct middma_device *mid = data;
- u32 status;
+ u32 tfr_status, err_status;
int call_tasklet = 0;
+ tfr_status = ioread32(mid->dma_base + RAW_TFR);
+ err_status = ioread32(mid->dma_base + RAW_ERR);
+ if (!tfr_status && !err_status)
+ return IRQ_NONE;
+
/*DMA Interrupt*/
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
if (!mid) {
@@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
return -EINVAL;
}
- status = ioread32(mid->dma_base + RAW_TFR);
- pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask);
- status &= mid->intr_mask;
- if (status) {
+ pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
+ tfr_status &= mid->intr_mask;
+ if (tfr_status) {
/*need to disable intr*/
- iowrite32((status << 8), mid->dma_base + MASK_TFR);
- pr_debug("MDMA: Calling tasklet %x\n", status);
+ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
+ iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
+ pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
call_tasklet = 1;
}
- status = ioread32(mid->dma_base + RAW_ERR);
- status &= mid->intr_mask;
- if (status) {
- iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
+ err_status &= mid->intr_mask;
+ if (err_status) {
+ iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
call_tasklet = 1;
}
if (call_tasklet)
@@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
{
struct middma_device *dma = pci_get_drvdata(pdev);
int err, i;
- unsigned int irq_level;
/* DMA coherent memory pool for DMA descriptor allocations */
dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
@@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
/*init CH structures*/
dma->intr_mask = 0;
+ dma->state = RUNNING;
for (i = 0; i < dma->max_chan; i++) {
struct intel_mid_dma_chan *midch = &dma->ch[i];
@@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
/*register irq */
if (dma->pimr_mask) {
- irq_level = IRQF_SHARED;
pr_debug("MDMA:Requesting irq shared for DMAC1\n");
err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
IRQF_SHARED, "INTEL_MID_DMAC1", dma);
@@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
goto err_irq;
} else {
dma->intr_mask = 0x03;
- irq_level = 0;
pr_debug("MDMA:Requesting irq for DMAC2\n");
err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
- 0, "INTEL_MID_DMAC2", dma);
+ IRQF_SHARED, "INTEL_MID_DMAC2", dma);
if (0 != err)
goto err_irq;
}
@@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if (err)
goto err_dma;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
return 0;
err_dma:
@@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/* Power Management */
+/*
+* dma_suspend - PCI suspend function
+*
+* @pci: PCI device structure
+* @state: PM message
+*
+* This function is called by OS when a power event occurs
+*/
+int dma_suspend(struct pci_dev *pci, pm_message_t state)
+{
+ int i;
+ struct middma_device *device = pci_get_drvdata(pci);
+ pr_debug("MDMA: dma_suspend called\n");
+
+ for (i = 0; i < device->max_chan; i++) {
+ if (device->ch[i].in_use)
+ return -EAGAIN;
+ }
+ device->state = SUSPENDED;
+ pci_set_drvdata(pci, device);
+ pci_save_state(pci);
+ pci_disable_device(pci);
+ pci_set_power_state(pci, PCI_D3hot);
+ return 0;
+}
+
+/**
+* dma_resume - PCI resume function
+*
+* @pci: PCI device structure
+*
+* This function is called by OS when a power event occurs
+*/
+int dma_resume(struct pci_dev *pci)
+{
+ int ret;
+ struct middma_device *device = pci_get_drvdata(pci);
+
+ pr_debug("MDMA: dma_resume called\n");
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+ ret = pci_enable_device(pci);
+ if (ret) {
+ pr_err("MDMA: device cant be enabled for %x\n", pci->device);
+ return ret;
+ }
+ device->state = RUNNING;
+ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+ pci_set_drvdata(pci, device);
+ return 0;
+}
+
+static int dma_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ return dma_suspend(pci_dev, PMSG_SUSPEND);
+}
+
+static int dma_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ return dma_resume(pci_dev);
+}
+
+static int dma_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct middma_device *device = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < device->max_chan; i++) {
+ if (device->ch[i].in_use)
+ return -EAGAIN;
+ }
+
+ return pm_schedule_suspend(dev, 0);
+}
+
/******************************************************************************
* PCI stuff
*/
@@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = {
};
MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
+static const struct dev_pm_ops intel_mid_dma_pm = {
+ .runtime_suspend = dma_runtime_suspend,
+ .runtime_resume = dma_runtime_resume,
+ .runtime_idle = dma_runtime_idle,
+};
+
static struct pci_driver intel_mid_dma_pci = {
.name = "Intel MID DMA",
.id_table = intel_mid_dma_ids,
.probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove),
+#ifdef CONFIG_PM
+ .suspend = dma_suspend,
+ .resume = dma_resume,
+ .driver = {
+ .pm = &intel_mid_dma_pm,
+ },
+#endif
};
static int __init intel_mid_dma_init(void)
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index d81aa658ab09..709fecbdde79 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -29,11 +29,12 @@
#include <linux/dmapool.h>
#include <linux/pci_ids.h>
-#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
+#define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
#define REG_BIT0 0x00000001
#define REG_BIT8 0x00000100
-
+#define INT_MASK_WE 0x8
+#define CLEAR_DONE 0xFFFFEFFF
#define UNMASK_INTR_REG(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
@@ -41,6 +42,9 @@
#define ENABLE_CHANNEL(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
+#define DISABLE_CHANNEL(chan_num) \
+ (REG_BIT8 << chan_num)
+
#define DESCS_PER_CHANNEL 16
/*DMA Registers*/
/*registers associated with channel programming*/
@@ -50,6 +54,7 @@
/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
#define SAR 0x00 /* Source Address Register*/
#define DAR 0x08 /* Destination Address Register*/
+#define LLP 0x10 /* Linked List Pointer Register*/
#define CTL_LOW 0x18 /* Control Register*/
#define CTL_HIGH 0x1C /* Control Register*/
#define CFG_LOW 0x40 /* Configuration Register Low*/
@@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo {
union intel_mid_dma_ctl_hi {
struct {
u32 block_ts:12; /*block transfer size*/
- /*configured by DMAC*/
- u32 reser:20;
+ u32 done:1; /*Done - updated by DMAC*/
+ u32 reser:19; /*configured by DMAC*/
} ctlx;
u32 ctl_hi;
@@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi {
u32 cfg_hi;
};
+
/**
* struct intel_mid_dma_chan - internal mid representation of a DMA channel
* @chan: dma_chan strcture represetation for mid chan
@@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi {
* @slave: dma slave struture
* @descs_allocated: total number of decsiptors allocated
* @dma: dma device struture pointer
+ * @busy: bool representing if ch is busy (active txn) or not
* @in_use: bool representing if ch is in use or not
+ * @raw_tfr: raw trf interrupt recieved
+ * @raw_block: raw block interrupt recieved
*/
struct intel_mid_dma_chan {
struct dma_chan chan;
@@ -178,10 +187,13 @@ struct intel_mid_dma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
- struct intel_mid_dma_slave *slave;
unsigned int descs_allocated;
struct middma_device *dma;
+ bool busy;
bool in_use;
+ u32 raw_tfr;
+ u32 raw_block;
+ struct intel_mid_dma_slave *mid_slave;
};
static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
return container_of(chan, struct intel_mid_dma_chan, chan);
}
+enum intel_mid_dma_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
/**
* struct middma_device - internal representation of a DMA device
* @pdev: PCI device
@@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
* @max_chan: max number of chs supported (from drv_data)
* @block_size: Block size of DMA transfer supported (from drv_data)
* @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
+ * @state: dma PM device state
*/
struct middma_device {
struct pci_dev *pdev;
@@ -220,6 +237,7 @@ struct middma_device {
int max_chan;
int block_size;
unsigned int pimr_mask;
+ enum intel_mid_dma_state state;
};
static inline struct middma_device *to_middma_device(struct dma_device *common)
@@ -238,14 +256,27 @@ struct intel_mid_dma_desc {
u32 cfg_lo;
u32 ctl_lo;
u32 ctl_hi;
+ struct pci_pool *lli_pool;
+ struct intel_mid_dma_lli *lli;
+ dma_addr_t lli_phys;
+ unsigned int lli_length;
+ unsigned int current_lli;
dma_addr_t next;
enum dma_data_direction dirn;
enum dma_status status;
- enum intel_mid_dma_width width; /*width of DMA txn*/
+ enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
};
+struct intel_mid_dma_lli {
+ dma_addr_t sar;
+ dma_addr_t dar;
+ dma_addr_t llp;
+ u32 ctl_lo;
+ u32 ctl_hi;
+} __attribute__ ((packed));
+
static inline int test_ch_en(void __iomem *dma, u32 ch_no)
{
u32 en_reg = ioread32(dma + DMA_CHAN_EN);
@@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
{
return container_of(txd, struct intel_mid_dma_desc, txd);
}
+
+static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
+ (struct dma_slave_config *slave)
+{
+ return container_of(slave, struct intel_mid_dma_slave, dma_slave);
+}
+
+
+int dma_resume(struct pci_dev *pci);
+
#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 3533948b88ba..92b679024fed 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -926,6 +926,7 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
static const struct pci_device_id pch_dma_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
+ { 0, },
};
static struct pci_driver pch_dma_driver = {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 17e2600a00cf..fab68a553205 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,11 +1,8 @@
/*
- * driver/dma/ste_dma40.c
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
*/
#include <linux/kernel.h>
@@ -14,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <plat/ste_dma40.h>
@@ -32,6 +30,11 @@
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
+
+/* Max number of links per event group */
+#define D40_LCLA_LINK_PER_EVENT_GRP 128
+#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
+
/* Attempts before giving up to trying to get pages that are aligned */
#define MAX_LCLA_ALLOC_ATTEMPTS 256
@@ -41,7 +44,7 @@
#define D40_ALLOC_LOG_FREE 0
/* Hardware designer of the block */
-#define D40_PERIPHID2_DESIGNER 0x8
+#define D40_HW_DESIGNER 0x8
/**
* enum 40_command - The different commands and/or statuses.
@@ -84,18 +87,17 @@ struct d40_lli_pool {
* @lli_log: Same as above but for logical channels.
* @lli_pool: The pool with two entries pre-allocated.
* @lli_len: Number of llis of current descriptor.
- * @lli_count: Number of transfered llis.
- * @lli_tx_len: Max number of LLIs per transfer, there can be
- * many transfer for one descriptor.
+ * @lli_current: Number of transfered llis.
+ * @lcla_alloc: Number of LCLA entries allocated.
* @txd: DMA engine struct. Used for among other things for communication
* during a transfer.
* @node: List entry.
- * @dir: The transfer direction of this job.
* @is_in_client_list: true if the client owns this descriptor.
+ * @is_hw_linked: true if this job will automatically be continued for
+ * the previous one.
*
* This descriptor is used for both logical and physical transfers.
*/
-
struct d40_desc {
/* LLI physical */
struct d40_phy_lli_bidir lli_phy;
@@ -104,14 +106,14 @@ struct d40_desc {
struct d40_lli_pool lli_pool;
int lli_len;
- int lli_count;
- u32 lli_tx_len;
+ int lli_current;
+ int lcla_alloc;
struct dma_async_tx_descriptor txd;
struct list_head node;
- enum dma_data_direction dir;
bool is_in_client_list;
+ bool is_hw_linked;
};
/**
@@ -123,17 +125,14 @@ struct d40_desc {
* @pages: The number of pages needed for all physical channels.
* Only used later for clean-up on error
* @lock: Lock to protect the content in this struct.
- * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
- * @num_blocks: The number of entries of alloc_map. Equals to the
- * number of physical channels.
+ * @alloc_map: big map over which LCLA entry is own by which job.
*/
struct d40_lcla_pool {
void *base;
void *base_unaligned;
int pages;
spinlock_t lock;
- u32 *alloc_map;
- int num_blocks;
+ struct d40_desc **alloc_map;
};
/**
@@ -146,9 +145,7 @@ struct d40_lcla_pool {
* this physical channel. Can also be free or physically allocated.
* @allocated_dst: Same as for src but is dst.
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
- * event line number. Both allocated_src and allocated_dst can not be
- * allocated to a physical channel, since the interrupt handler has then
- * no way of figure out which one the interrupt belongs to.
+ * event line number.
*/
struct d40_phy_res {
spinlock_t lock;
@@ -178,6 +175,7 @@ struct d40_base;
* @active: Active descriptor.
* @queue: Queued jobs.
* @dma_cfg: The client configuration of this dma channel.
+ * @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct.
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
@@ -201,12 +199,12 @@ struct d40_chan {
struct list_head active;
struct list_head queue;
struct stedma40_chan_cfg dma_cfg;
+ bool configured;
struct d40_base *base;
/* Default register configurations */
u32 src_def_cfg;
u32 dst_def_cfg;
struct d40_def_lcsp log_def;
- struct d40_lcla_elem lcla;
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
@@ -234,7 +232,6 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
- * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -340,9 +337,6 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
align);
d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
align);
-
- d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
- d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
}
return 0;
@@ -357,22 +351,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d)
d40d->lli_log.dst = NULL;
d40d->lli_phy.src = NULL;
d40d->lli_phy.dst = NULL;
- d40d->lli_phy.src_addr = 0;
- d40d->lli_phy.dst_addr = 0;
}
-static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
- struct d40_desc *desc)
+static int d40_lcla_alloc_one(struct d40_chan *d40c,
+ struct d40_desc *d40d)
{
- dma_cookie_t cookie = d40c->chan.cookie;
+ unsigned long flags;
+ int i;
+ int ret = -EINVAL;
+ int p;
- if (++cookie < 0)
- cookie = 1;
+ spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
+
+ p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
- d40c->chan.cookie = cookie;
- desc->txd.cookie = cookie;
+ /*
+ * Allocate both src and dst at the same time, therefore the half
+ * start on 1 since 0 can't be used since zero is used as end marker.
+ */
+ for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
+ if (!d40c->base->lcla_pool.alloc_map[p + i]) {
+ d40c->base->lcla_pool.alloc_map[p + i] = d40d;
+ d40d->lcla_alloc++;
+ ret = i;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
+
+ return ret;
+}
+
+static int d40_lcla_free_all(struct d40_chan *d40c,
+ struct d40_desc *d40d)
+{
+ unsigned long flags;
+ int i;
+ int ret = -EINVAL;
+
+ if (d40c->log_num == D40_PHY_CHAN)
+ return 0;
+
+ spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
+
+ for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
+ if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
+ D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
+ d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
+ D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
+ d40d->lcla_alloc--;
+ if (d40d->lcla_alloc == 0) {
+ ret = 0;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
+
+ return ret;
- return cookie;
}
static void d40_desc_remove(struct d40_desc *d40d)
@@ -382,28 +421,35 @@ static void d40_desc_remove(struct d40_desc *d40d)
static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
{
- struct d40_desc *d;
- struct d40_desc *_d;
+ struct d40_desc *desc = NULL;
if (!list_empty(&d40c->client)) {
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
d40_pool_lli_free(d);
d40_desc_remove(d);
+ desc = d;
+ memset(desc, 0, sizeof(*desc));
break;
}
- } else {
- d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
- if (d != NULL) {
- memset(d, 0, sizeof(struct d40_desc));
- INIT_LIST_HEAD(&d->node);
- }
}
- return d;
+
+ if (!desc)
+ desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
+
+ if (desc)
+ INIT_LIST_HEAD(&desc->node);
+
+ return desc;
}
static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
{
+
+ d40_lcla_free_all(d40c, d40d);
kmem_cache_free(d40c->base->desc_slab, d40d);
}
@@ -412,6 +458,59 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
list_add_tail(&desc->node, &d40c->active);
}
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ int curr_lcla = -EINVAL, next_lcla;
+
+ if (d40c->log_num == D40_PHY_CHAN) {
+ d40_phy_lli_write(d40c->base->virtbase,
+ d40c->phy_chan->num,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.src);
+ d40d->lli_current = d40d->lli_len;
+ } else {
+
+ if ((d40d->lli_len - d40d->lli_current) > 1)
+ curr_lcla = d40_lcla_alloc_one(d40c, d40d);
+
+ d40_log_lli_lcpa_write(d40c->lcpa,
+ &d40d->lli_log.dst[d40d->lli_current],
+ &d40d->lli_log.src[d40d->lli_current],
+ curr_lcla);
+
+ d40d->lli_current++;
+ for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
+ struct d40_log_lli *lcla;
+
+ if (d40d->lli_current + 1 < d40d->lli_len)
+ next_lcla = d40_lcla_alloc_one(d40c, d40d);
+ else
+ next_lcla = -EINVAL;
+
+ lcla = d40c->base->lcla_pool.base +
+ d40c->phy_chan->num * 1024 +
+ 8 * curr_lcla * 2;
+
+ d40_log_lli_lcla_write(lcla,
+ &d40d->lli_log.dst[d40d->lli_current],
+ &d40d->lli_log.src[d40d->lli_current],
+ next_lcla);
+
+ (void) dma_map_single(d40c->base->dev, lcla,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+
+ curr_lcla = next_lcla;
+
+ if (curr_lcla == -EINVAL) {
+ d40d->lli_current++;
+ break;
+ }
+
+ }
+ }
+}
+
static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
{
struct d40_desc *d;
@@ -443,68 +542,26 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d;
}
-/* Support functions for logical channels */
-
-static int d40_lcla_id_get(struct d40_chan *d40c)
+static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
{
- int src_id = 0;
- int dst_id = 0;
- struct d40_log_lli *lcla_lidx_base =
- d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
- int i;
- int lli_per_log = d40c->base->plat_data->llis_per_log;
- unsigned long flags;
-
- if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
- return 0;
-
- if (d40c->base->lcla_pool.num_blocks > 32)
- return -EINVAL;
-
- spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
-
- for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
- if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
- (0x1 << i))) {
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
- (0x1 << i);
- break;
- }
- }
- src_id = i;
- if (src_id >= d40c->base->lcla_pool.num_blocks)
- goto err;
+ struct d40_desc *d;
- for (; i < d40c->base->lcla_pool.num_blocks; i++) {
- if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
- (0x1 << i))) {
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
- (0x1 << i);
+ if (list_empty(&d40c->queue))
+ return NULL;
+ list_for_each_entry(d, &d40c->queue, node)
+ if (list_is_last(&d->node, &d40c->queue))
break;
- }
- }
-
- dst_id = i;
- if (dst_id == src_id)
- goto err;
-
- d40c->lcla.src_id = src_id;
- d40c->lcla.dst_id = dst_id;
- d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
- d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
-
- spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
- return 0;
-err:
- spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
- return -EINVAL;
+ return d;
}
+/* Support functions for logical channels */
+
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
{
- int status, i;
+ u32 status;
+ int i;
void __iomem *active_reg;
int ret = 0;
unsigned long flags;
@@ -567,35 +624,19 @@ done:
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
- unsigned long flags;
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
d40_desc_remove(d40d);
-
- /* Return desc to free-list */
d40_desc_free(d40c, d40d);
}
/* Release queued descriptors waiting for transfer */
while ((d40d = d40_first_queued(d40c))) {
d40_desc_remove(d40d);
-
- /* Return desc to free-list */
d40_desc_free(d40c, d40d);
}
- spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
-
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
- (~(0x1 << d40c->lcla.dst_id));
- d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
- (~(0x1 << d40c->lcla.src_id));
-
- d40c->lcla.src_id = -1;
- d40c->lcla.dst_id = -1;
-
- spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
d40c->pending_tx = 0;
d40c->busy = false;
@@ -640,45 +681,47 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
static u32 d40_chan_has_events(struct d40_chan *d40c)
{
- u32 val = 0;
+ u32 val;
- /* If SSLNK or SDLNK is zero all events are disabled */
- if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SSLNK);
-
- if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
- val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK);
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
return val;
}
-static void d40_config_enable_lidx(struct d40_chan *d40c)
+static u32 d40_get_prmo(struct d40_chan *d40c)
{
- /* Set LIDX for lcla */
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
-
- writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
- D40_SREG_ELEM_LOG_LIDX_MASK,
- d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+ static const unsigned int phy_map[] = {
+ [STEDMA40_PCHAN_BASIC_MODE]
+ = D40_DREG_PRMO_PCHAN_BASIC,
+ [STEDMA40_PCHAN_MODULO_MODE]
+ = D40_DREG_PRMO_PCHAN_MODULO,
+ [STEDMA40_PCHAN_DOUBLE_DST_MODE]
+ = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
+ };
+ static const unsigned int log_map[] = {
+ [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
+ = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
+ [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
+ = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
+ [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
+ = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
+ };
+
+ if (d40c->log_num == D40_PHY_CHAN)
+ return phy_map[d40c->dma_cfg.mode_opt];
+ else
+ return log_map[d40c->dma_cfg.mode_opt];
}
-static int d40_config_write(struct d40_chan *d40c)
+static void d40_config_write(struct d40_chan *d40c)
{
u32 addr_base;
u32 var;
- int res;
-
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res)
- return res;
/* Odd addresses are even addresses + 4 */
addr_base = (d40c->phy_chan->num % 2) * 4;
@@ -688,8 +731,7 @@ static int d40_config_write(struct d40_chan *d40c)
writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
/* Setup operational mode option register */
- var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
- 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
+ var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
@@ -704,41 +746,181 @@ static int d40_config_write(struct d40_chan *d40c)
d40c->phy_chan->num * D40_DREG_PCDELTA +
D40_CHAN_REG_SDCFG);
- d40_config_enable_lidx(d40c);
+ /* Set LIDX for lcla */
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT);
+
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSELT);
+
+ }
+}
+
+static u32 d40_residue(struct d40_chan *d40c)
+{
+ u32 num_elt;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+ >> D40_MEM_LCSP2_ECNT_POS;
+ else
+ num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT) &
+ D40_SREG_ELEM_PHY_ECNT_MASK) >>
+ D40_SREG_ELEM_PHY_ECNT_POS;
+ return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+}
+
+static bool d40_tx_is_linked(struct d40_chan *d40c)
+{
+ bool is_link;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
+ else
+ is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK) &
+ D40_SREG_LNK_PHYS_LNK_MASK;
+ return is_link;
+}
+
+static int d40_pause(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ if (!d40c->busy)
+ return 0;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res == 0) {
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_config_set_event(d40c, false);
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ }
}
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
-static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+static int d40_resume(struct dma_chan *chan)
{
- if (d40d->lli_phy.dst && d40d->lli_phy.src) {
- d40_phy_lli_write(d40c->base->virtbase,
- d40c->phy_chan->num,
- d40d->lli_phy.dst,
- d40d->lli_phy.src);
- } else if (d40d->lli_log.dst && d40d->lli_log.src) {
- struct d40_log_lli *src = d40d->lli_log.src;
- struct d40_log_lli *dst = d40d->lli_log.dst;
- int s;
-
- src += d40d->lli_count;
- dst += d40d->lli_count;
- s = d40_log_lli_write(d40c->lcpa,
- d40c->lcla.src, d40c->lcla.dst,
- dst, src,
- d40c->base->plat_data->llis_per_log);
-
- /* If s equals to zero, the job is not linked */
- if (s > 0) {
- (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
- s * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
- (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
- s * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ if (!d40c->busy)
+ return 0;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->base->rev == 0)
+ if (d40c->log_num != D40_PHY_CHAN) {
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_SUSPEND_REQ);
+ goto no_suspend;
}
+
+ /* If bytes left to transfer or linked tx resume job */
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ d40_config_set_event(d40c, true);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ }
+
+no_suspend:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ /* TODO: Write */
+}
+
+static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ struct d40_desc *d40d_prev = NULL;
+ int i;
+ u32 val;
+
+ if (!list_empty(&d40c->queue))
+ d40d_prev = d40_last_queued(d40c);
+ else if (!list_empty(&d40c->active))
+ d40d_prev = d40_first_active_get(d40c);
+
+ if (!d40d_prev)
+ return;
+
+ /* Here we try to join this job with previous jobs */
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ /* Figure out which link we're currently transmitting */
+ for (i = 0; i < d40d_prev->lli_len; i++)
+ if (val == d40d_prev->lli_phy.src[i].reg_lnk)
+ break;
+
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
+
+ if (i == (d40d_prev->lli_len - 1) && val > 0) {
+ /* Change the current one */
+ writel(virt_to_phys(d40d->lli_phy.src),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ writel(virt_to_phys(d40d->lli_phy.dst),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+
+ d40d->is_hw_linked = true;
+
+ } else if (i < d40d_prev->lli_len) {
+ (void) dma_unmap_single(d40c->base->dev,
+ virt_to_phys(d40d_prev->lli_phy.src),
+ d40d_prev->lli_pool.size,
+ DMA_TO_DEVICE);
+
+ /* Keep the settings */
+ val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
+ ~D40_SREG_LNK_PHYS_LNK_MASK;
+ d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
+ val | virt_to_phys(d40d->lli_phy.src);
+
+ val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
+ ~D40_SREG_LNK_PHYS_LNK_MASK;
+ d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
+ val | virt_to_phys(d40d->lli_phy.dst);
+
+ (void) dma_map_single(d40c->base->dev,
+ d40d_prev->lli_phy.src,
+ d40d_prev->lli_pool.size,
+ DMA_TO_DEVICE);
+ d40d->is_hw_linked = true;
}
- d40d->lli_count += d40d->lli_tx_len;
}
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -749,14 +931,28 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
unsigned long flags;
+ (void) d40_pause(&d40c->chan);
+
spin_lock_irqsave(&d40c->lock, flags);
- tx->cookie = d40_assign_cookie(d40c, d40d);
+ d40c->chan.cookie++;
+
+ if (d40c->chan.cookie < 0)
+ d40c->chan.cookie = 1;
+
+ d40d->txd.cookie = d40c->chan.cookie;
+
+ if (d40c->log_num == D40_PHY_CHAN)
+ d40_tx_submit_phy(d40c, d40d);
+ else
+ d40_tx_submit_log(d40c, d40d);
d40_desc_queue(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
+ (void) d40_resume(&d40c->chan);
+
return tx->cookie;
}
@@ -796,14 +992,21 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
/* Add to active queue */
d40_desc_submit(d40c, d40d);
- /* Initiate DMA job */
- d40_desc_load(d40c, d40d);
+ /*
+ * If this job is already linked in hw,
+ * do not submit it.
+ */
- /* Start dma job */
- err = d40_start(d40c);
+ if (!d40d->is_hw_linked) {
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
- if (err)
- return NULL;
+ /* Start dma job */
+ err = d40_start(d40c);
+
+ if (err)
+ return NULL;
+ }
}
return d40d;
@@ -814,17 +1017,15 @@ static void dma_tc_handle(struct d40_chan *d40c)
{
struct d40_desc *d40d;
- if (!d40c->phy_chan)
- return;
-
/* Get first active entry from list */
d40d = d40_first_active_get(d40c);
if (d40d == NULL)
return;
- if (d40d->lli_count < d40d->lli_len) {
+ d40_lcla_free_all(d40c, d40d);
+ if (d40d->lli_current < d40d->lli_len) {
d40_desc_load(d40c, d40d);
/* Start dma job */
(void) d40_start(d40c);
@@ -842,7 +1043,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
static void dma_tasklet(unsigned long data)
{
struct d40_chan *d40c = (struct d40_chan *) data;
- struct d40_desc *d40d_fin;
+ struct d40_desc *d40d;
unsigned long flags;
dma_async_tx_callback callback;
void *callback_param;
@@ -850,12 +1051,12 @@ static void dma_tasklet(unsigned long data)
spin_lock_irqsave(&d40c->lock, flags);
/* Get first active entry from list */
- d40d_fin = d40_first_active_get(d40c);
+ d40d = d40_first_active_get(d40c);
- if (d40d_fin == NULL)
+ if (d40d == NULL)
goto err;
- d40c->completed = d40d_fin->txd.cookie;
+ d40c->completed = d40d->txd.cookie;
/*
* If terminating a channel pending_tx is set to zero.
@@ -867,19 +1068,19 @@ static void dma_tasklet(unsigned long data)
}
/* Callback to client */
- callback = d40d_fin->txd.callback;
- callback_param = d40d_fin->txd.callback_param;
-
- if (async_tx_test_ack(&d40d_fin->txd)) {
- d40_pool_lli_free(d40d_fin);
- d40_desc_remove(d40d_fin);
- /* Return desc to free-list */
- d40_desc_free(d40c, d40d_fin);
+ callback = d40d->txd.callback;
+ callback_param = d40d->txd.callback_param;
+
+ if (async_tx_test_ack(&d40d->txd)) {
+ d40_pool_lli_free(d40d);
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
} else {
- if (!d40d_fin->is_in_client_list) {
- d40_desc_remove(d40d_fin);
- list_add_tail(&d40d_fin->node, &d40c->client);
- d40d_fin->is_in_client_list = true;
+ if (!d40d->is_in_client_list) {
+ d40_desc_remove(d40d);
+ d40_lcla_free_all(d40c, d40d);
+ list_add_tail(&d40d->node, &d40c->client);
+ d40d->is_in_client_list = true;
}
}
@@ -890,7 +1091,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback)
+ if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
callback(callback_param);
return;
@@ -919,7 +1120,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
int i;
u32 regs[ARRAY_SIZE(il)];
- u32 tmp;
u32 idx;
u32 row;
long chan = -1;
@@ -946,9 +1146,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
idx = chan & (BITS_PER_LONG - 1);
/* ACK interrupt */
- tmp = readl(base->virtbase + il[row].clr);
- tmp |= 1 << idx;
- writel(tmp, base->virtbase + il[row].clr);
+ writel(1 << idx, base->virtbase + il[row].clr);
if (il[row].offset == D40_PHY_CHAN)
d40c = base->lookup_phy_chans[idx];
@@ -971,24 +1169,47 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-
static int d40_validate_conf(struct d40_chan *d40c,
struct stedma40_chan_cfg *conf)
{
int res = 0;
u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
- bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
- == STEDMA40_CHANNEL_IN_LOG_MODE;
+ bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
+
+ if (!conf->dir) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
+ d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
+ d40c->runtime_addr == 0) {
+
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid TX channel address (%d)\n",
+ __func__, conf->dst_dev_type);
+ res = -EINVAL;
+ }
+
+ if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
+ d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
+ d40c->runtime_addr == 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid RX channel address (%d)\n",
+ __func__, conf->src_dev_type);
+ res = -EINVAL;
+ }
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
+ if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
dst_event_group == STEDMA40_DEV_DST_MEMORY) {
dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
__func__);
res = -EINVAL;
}
- if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
+ if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
src_event_group == STEDMA40_DEV_SRC_MEMORY) {
dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
__func__);
@@ -1082,7 +1303,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
spin_lock_irqsave(&phy->lock, flags);
if (!log_event_line) {
- /* Physical interrupts are masked per physical full channel */
phy->allocated_dst = D40_ALLOC_FREE;
phy->allocated_src = D40_ALLOC_FREE;
is_free = true;
@@ -1119,10 +1339,7 @@ static int d40_allocate_channel(struct d40_chan *d40c)
int j;
int log_num;
bool is_src;
- bool is_log = (d40c->dma_cfg.channel_type &
- STEDMA40_CHANNEL_IN_OPER_MODE)
- == STEDMA40_CHANNEL_IN_LOG_MODE;
-
+ bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
phys = d40c->base->phy_res;
@@ -1251,7 +1468,6 @@ static int d40_free_dma(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node) {
d40_pool_lli_free(d);
d40_desc_remove(d);
- /* Return desc to free-list */
d40_desc_free(d40c, d);
}
@@ -1324,37 +1540,12 @@ static int d40_free_dma(struct d40_chan *d40c)
return res;
}
d40c->phy_chan = NULL;
- /* Invalidate channel type */
- d40c->dma_cfg.channel_type = 0;
+ d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
return 0;
}
-static int d40_pause(struct dma_chan *chan)
-{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
- int res;
- unsigned long flags;
-
- spin_lock_irqsave(&d40c->lock, flags);
-
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res == 0) {
- if (d40c->log_num != D40_PHY_CHAN) {
- d40_config_set_event(d40c, false);
- /* Resume the other logical channels if any */
- if (d40_chan_has_events(d40c))
- res = d40_channel_execute_command(d40c,
- D40_DMA_RUN);
- }
- }
-
- spin_unlock_irqrestore(&d40c->lock, flags);
- return res;
-}
-
static bool d40_is_paused(struct d40_chan *d40c)
{
bool is_paused = false;
@@ -1381,16 +1572,22 @@ static bool d40_is_paused(struct d40_chan *d40c)
}
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- else {
+ status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ } else {
dev_err(&d40c->chan.dev->device,
"[%s] Unknown direction\n", __func__);
goto _exit;
}
- status = d40_chan_has_events(d40c);
+
status = (status & D40_EVENTLINE_MASK(event)) >>
D40_EVENTLINE_POS(event);
@@ -1403,64 +1600,6 @@ _exit:
}
-static bool d40_tx_is_linked(struct d40_chan *d40c)
-{
- bool is_link;
-
- if (d40c->log_num != D40_PHY_CHAN)
- is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
- else
- is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDLNK) &
- D40_SREG_LNK_PHYS_LNK_MASK;
- return is_link;
-}
-
-static u32 d40_residue(struct d40_chan *d40c)
-{
- u32 num_elt;
-
- if (d40c->log_num != D40_PHY_CHAN)
- num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
- >> D40_MEM_LCSP2_ECNT_POS;
- else
- num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
- d40c->phy_chan->num * D40_DREG_PCDELTA +
- D40_CHAN_REG_SDELT) &
- D40_SREG_ELEM_PHY_ECNT_MASK) >>
- D40_SREG_ELEM_PHY_ECNT_POS;
- return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
-}
-
-static int d40_resume(struct dma_chan *chan)
-{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
- int res = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&d40c->lock, flags);
-
- if (d40c->base->rev == 0)
- if (d40c->log_num != D40_PHY_CHAN) {
- res = d40_channel_execute_command(d40c,
- D40_DMA_SUSPEND_REQ);
- goto no_suspend;
- }
-
- /* If bytes left to transfer or linked tx resume job */
- if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
- if (d40c->log_num != D40_PHY_CHAN)
- d40_config_set_event(d40c, true);
- res = d40_channel_execute_command(d40c, D40_DMA_RUN);
- }
-
-no_suspend:
- spin_unlock_irqrestore(&d40c->lock, flags);
- return res;
-}
-
static u32 stedma40_residue(struct dma_chan *chan)
{
struct d40_chan *d40c =
@@ -1475,51 +1614,6 @@ static u32 stedma40_residue(struct dma_chan *chan)
return bytes_left;
}
-/* Public DMA functions in addition to the DMA engine framework */
-
-int stedma40_set_psize(struct dma_chan *chan,
- int src_psize,
- int dst_psize)
-{
- struct d40_chan *d40c =
- container_of(chan, struct d40_chan, chan);
- unsigned long flags;
-
- spin_lock_irqsave(&d40c->lock, flags);
-
- if (d40c->log_num != D40_PHY_CHAN) {
- d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
- d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
- d40c->log_def.lcsp1 |= src_psize <<
- D40_MEM_LCSP1_SCFG_PSIZE_POS;
- d40c->log_def.lcsp3 |= dst_psize <<
- D40_MEM_LCSP1_SCFG_PSIZE_POS;
- goto out;
- }
-
- if (src_psize == STEDMA40_PSIZE_PHY_1)
- d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
- else {
- d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
- d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
- D40_SREG_CFG_PSIZE_POS);
- d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
- }
-
- if (dst_psize == STEDMA40_PSIZE_PHY_1)
- d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
- else {
- d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
- d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
- D40_SREG_CFG_PSIZE_POS);
- d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
- }
-out:
- spin_unlock_irqrestore(&d40c->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(stedma40_set_psize);
-
struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
struct scatterlist *sgl_dst,
struct scatterlist *sgl_src,
@@ -1545,21 +1639,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
goto err;
d40d->lli_len = sgl_len;
- d40d->lli_tx_len = d40d->lli_len;
+ d40d->lli_current = 0;
d40d->txd.flags = dma_flags;
if (d40c->log_num != D40_PHY_CHAN) {
- if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
- d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
-
- if (sgl_len > 1)
- /*
- * Check if there is space available in lcla. If not,
- * split list into 1-length and run only in lcpa
- * space.
- */
- if (d40_lcla_id_get(d40c) != 0)
- d40d->lli_tx_len = 1;
if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
dev_err(&d40c->chan.dev->device,
@@ -1567,27 +1650,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
goto err;
}
- (void) d40_log_sg_to_lli(d40c->lcla.src_id,
- sgl_src,
+ (void) d40_log_sg_to_lli(sgl_src,
sgl_len,
d40d->lli_log.src,
d40c->log_def.lcsp1,
- d40c->dma_cfg.src_info.data_width,
- dma_flags & DMA_PREP_INTERRUPT,
- d40d->lli_tx_len,
- d40c->base->plat_data->llis_per_log);
+ d40c->dma_cfg.src_info.data_width);
- (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
- sgl_dst,
+ (void) d40_log_sg_to_lli(sgl_dst,
sgl_len,
d40d->lli_log.dst,
d40c->log_def.lcsp3,
- d40c->dma_cfg.dst_info.data_width,
- dma_flags & DMA_PREP_INTERRUPT,
- d40d->lli_tx_len,
- d40c->base->plat_data->llis_per_log);
-
-
+ d40c->dma_cfg.dst_info.data_width);
} else {
if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
dev_err(&d40c->chan.dev->device,
@@ -1599,11 +1672,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
sgl_len,
0,
d40d->lli_phy.src,
- d40d->lli_phy.src_addr,
+ virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.src_info.psize,
- true);
+ d40c->dma_cfg.src_info.psize);
if (res < 0)
goto err;
@@ -1612,11 +1684,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
sgl_len,
0,
d40d->lli_phy.dst,
- d40d->lli_phy.dst_addr,
+ virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.dst_info.psize,
- true);
+ d40c->dma_cfg.dst_info.psize);
if (res < 0)
goto err;
@@ -1633,6 +1704,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
return &d40d->txd;
err:
+ if (d40d)
+ d40_desc_free(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
return NULL;
}
@@ -1652,6 +1725,9 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
} else
err = d40_config_memcpy(d40c);
+ if (!err)
+ d40c->configured = true;
+
return err == 0;
}
EXPORT_SYMBOL(stedma40_filter);
@@ -1668,11 +1744,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
d40c->completed = chan->cookie = 1;
- /*
- * If no dma configuration is set (channel_type == 0)
- * use default configuration (memcpy)
- */
- if (d40c->dma_cfg.channel_type == 0) {
+ /* If no dma configuration is set use default configuration (memcpy) */
+ if (!d40c->configured) {
err = d40_config_memcpy(d40c);
if (err) {
dev_err(&d40c->chan.dev->device,
@@ -1712,14 +1785,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
* resource is free. In case of multiple logical channels
* on the same physical resource, only the first write is necessary.
*/
- if (is_free_phy) {
- err = d40_config_write(d40c);
- if (err) {
- dev_err(&d40c->chan.dev->device,
- "[%s] Failed to configure channel\n",
- __func__);
- }
- }
+ if (is_free_phy)
+ d40_config_write(d40c);
fail:
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
@@ -1790,23 +1857,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
goto err;
}
d40d->lli_len = 1;
- d40d->lli_tx_len = 1;
+ d40d->lli_current = 0;
d40_log_fill_lli(d40d->lli_log.src,
src,
size,
- 0,
d40c->log_def.lcsp1,
d40c->dma_cfg.src_info.data_width,
- false, true);
+ true);
d40_log_fill_lli(d40d->lli_log.dst,
dst,
size,
- 0,
d40c->log_def.lcsp3,
d40c->dma_cfg.dst_info.data_width,
- true, true);
+ true);
} else {
@@ -1851,12 +1916,25 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
err_fill_lli:
dev_err(&d40c->chan.dev->device,
"[%s] Failed filling in PHY LLI\n", __func__);
- d40_pool_lli_free(d40d);
err:
+ if (d40d)
+ d40_desc_free(d40c, d40d);
spin_unlock_irqrestore(&d40c->lock, flags);
return NULL;
}
+static struct dma_async_tx_descriptor *
+d40_prep_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long dma_flags)
+{
+ if (dst_nents != src_nents)
+ return NULL;
+
+ return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
+}
+
static int d40_prep_slave_sg_log(struct d40_desc *d40d,
struct d40_chan *d40c,
struct scatterlist *sgl,
@@ -1874,19 +1952,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
}
d40d->lli_len = sg_len;
- if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
- d40d->lli_tx_len = d40d->lli_len;
- else
- d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
-
- if (sg_len > 1)
- /*
- * Check if there is space available in lcla.
- * If not, split list into 1-length and run only
- * in lcpa space.
- */
- if (d40_lcla_id_get(d40c) != 0)
- d40d->lli_tx_len = 1;
+ d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE)
if (d40c->runtime_addr)
@@ -1902,16 +1968,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
else
return -EINVAL;
- total_size = d40_log_sg_to_dev(&d40c->lcla,
- sgl, sg_len,
+ total_size = d40_log_sg_to_dev(sgl, sg_len,
&d40d->lli_log,
&d40c->log_def,
d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.data_width,
direction,
- dma_flags & DMA_PREP_INTERRUPT,
- dev_addr, d40d->lli_tx_len,
- d40c->base->plat_data->llis_per_log);
+ dev_addr);
if (total_size < 0)
return -EINVAL;
@@ -1937,7 +2000,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
}
d40d->lli_len = sgl_len;
- d40d->lli_tx_len = sgl_len;
+ d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE) {
dst_dev_addr = 0;
@@ -1958,11 +2021,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
sgl_len,
src_dev_addr,
d40d->lli_phy.src,
- d40d->lli_phy.src_addr,
+ virt_to_phys(d40d->lli_phy.src),
d40c->src_def_cfg,
d40c->dma_cfg.src_info.data_width,
- d40c->dma_cfg.src_info.psize,
- true);
+ d40c->dma_cfg.src_info.psize);
if (res < 0)
return res;
@@ -1970,11 +2032,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
sgl_len,
dst_dev_addr,
d40d->lli_phy.dst,
- d40d->lli_phy.dst_addr,
+ virt_to_phys(d40d->lli_phy.dst),
d40c->dst_def_cfg,
d40c->dma_cfg.dst_info.data_width,
- d40c->dma_cfg.dst_info.psize,
- true);
+ d40c->dma_cfg.dst_info.psize);
if (res < 0)
return res;
@@ -2001,17 +2062,11 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
return ERR_PTR(-EINVAL);
}
- if (d40c->dma_cfg.pre_transfer)
- d40c->dma_cfg.pre_transfer(chan,
- d40c->dma_cfg.pre_transfer_data,
- sg_dma_len(sgl));
-
spin_lock_irqsave(&d40c->lock, flags);
d40d = d40_desc_get(d40c);
- spin_unlock_irqrestore(&d40c->lock, flags);
if (d40d == NULL)
- return NULL;
+ goto err;
if (d40c->log_num != D40_PHY_CHAN)
err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
@@ -2024,7 +2079,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
"[%s] Failed to prepare %s slave sg job: %d\n",
__func__,
d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
- return NULL;
+ goto err;
}
d40d->txd.flags = dma_flags;
@@ -2033,7 +2088,14 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
d40d->txd.tx_submit = d40_tx_submit;
+ spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd;
+
+err:
+ if (d40d)
+ d40_desc_free(d40c, d40d);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return NULL;
}
static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2166,25 +2228,43 @@ static void d40_set_runtime_config(struct dma_chan *chan,
return;
}
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_LOG_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_LOG_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_LOG_4;
- else
- psize = STEDMA40_PSIZE_LOG_1;
+ if (d40c->log_num != D40_PHY_CHAN) {
+ if (config_maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (config_maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (config_maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+ } else {
+ if (config_maxburst >= 16)
+ psize = STEDMA40_PSIZE_PHY_16;
+ else if (config_maxburst >= 8)
+ psize = STEDMA40_PSIZE_PHY_8;
+ else if (config_maxburst >= 4)
+ psize = STEDMA40_PSIZE_PHY_4;
+ else
+ psize = STEDMA40_PSIZE_PHY_1;
+ }
/* Set up all the endpoint configs */
cfg->src_info.data_width = addr_width;
cfg->src_info.psize = psize;
- cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->src_info.big_endian = false;
cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
cfg->dst_info.data_width = addr_width;
cfg->dst_info.psize = psize;
- cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
+ cfg->dst_info.big_endian = false;
cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ /* Fill in register values */
+ if (d40c->log_num != D40_PHY_CHAN)
+ d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
+ else
+ d40_phy_cfg(cfg, &d40c->src_def_cfg,
+ &d40c->dst_def_cfg, false);
+
/* These settings will take precedence later */
d40c->runtime_addr = config_addr;
d40c->runtime_direction = config->direction;
@@ -2247,10 +2327,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
d40c->base = base;
d40c->chan.device = dma;
- /* Invalidate lcla element */
- d40c->lcla.src_id = -1;
- d40c->lcla.dst_id = -1;
-
spin_lock_init(&d40c->lock);
d40c->log_num = D40_PHY_CHAN;
@@ -2281,6 +2357,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_dma_sg = d40_prep_sg;
base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
base->dma_slave.device_tx_status = d40_tx_status;
base->dma_slave.device_issue_pending = d40_issue_pending;
@@ -2301,10 +2378,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_dma_sg = d40_prep_sg;
base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
base->dma_memcpy.device_tx_status = d40_tx_status;
base->dma_memcpy.device_issue_pending = d40_issue_pending;
@@ -2331,10 +2410,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+ dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
base->dma_both.device_free_chan_resources = d40_free_chan_resources;
base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_dma_sg = d40_prep_sg;
base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
base->dma_both.device_tx_status = d40_tx_status;
base->dma_both.device_issue_pending = d40_issue_pending;
@@ -2387,9 +2468,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark disabled channels as occupied */
for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
- base->phy_res[i].allocated_src = D40_ALLOC_PHY;
- base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
- num_phy_chans_avail--;
+ int chan = base->plat_data->disabled_channels[i];
+
+ base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
+ base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+ num_phy_chans_avail--;
}
dev_info(base->dev, "%d of %d physical DMA channels available\n",
@@ -2441,6 +2524,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
int num_phy_chans;
int i;
u32 val;
+ u32 rev;
clk = clk_get(&pdev->dev, NULL);
@@ -2479,21 +2563,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
}
}
- /* Get silicon revision */
+ /* Get silicon revision and designer */
val = readl(virtbase + D40_DREG_PERIPHID2);
- if ((val & 0xf) != D40_PERIPHID2_DESIGNER) {
+ if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
+ D40_HW_DESIGNER) {
dev_err(&pdev->dev,
"[%s] Unknown designer! Got %x wanted %x\n",
- __func__, val & 0xf, D40_PERIPHID2_DESIGNER);
+ __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
+ D40_HW_DESIGNER);
goto failure;
}
+ rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
+ D40_DREG_PERIPHID2_REV_POS;
+
/* The number of physical channels on this HW */
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
- (val >> 4) & 0xf, res->start);
+ rev, res->start);
plat_data = pdev->dev.platform_data;
@@ -2515,7 +2604,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->rev = (val >> 4) & 0xf;
+ base->rev = rev;
base->clk = clk;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
@@ -2549,7 +2638,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!base->lookup_log_chans)
goto failure;
}
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
+
+ base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
+ sizeof(struct d40_desc *) *
+ D40_LCLA_LINK_PER_EVENT_GRP,
GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -2563,7 +2655,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
return base;
failure:
- if (clk) {
+ if (!IS_ERR(clk)) {
clk_disable(clk);
clk_put(clk);
}
@@ -2700,8 +2792,10 @@ static int __init d40_lcla_allocate(struct d40_base *base)
if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
base->lcla_pool.base = (void *)page_list[i];
} else {
- /* After many attempts, no succees with finding the correct
- * alignment try with allocating a big buffer */
+ /*
+ * After many attempts and no succees with finding the correct
+ * alignment, try with allocating a big buffer.
+ */
dev_warn(base->dev,
"[%s] Failed to get %d pages @ 18 bit align.\n",
__func__, base->lcla_pool.pages);
@@ -2794,8 +2888,6 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->lcla_pool.lock);
- base->lcla_pool.num_blocks = base->num_phy_chans;
-
base->irq = platform_get_irq(pdev, 0);
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
@@ -2823,8 +2915,9 @@ failure:
if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
free_pages((unsigned long)base->lcla_pool.base,
base->lcla_pool.pages);
- if (base->lcla_pool.base_unaligned)
- kfree(base->lcla_pool.base_unaligned);
+
+ kfree(base->lcla_pool.base_unaligned);
+
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index d937f76d6e2e..8557cb88b255 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,10 +1,8 @@
/*
- * driver/dma/ste_dma40_ll.c
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#include <linux/kernel.h>
@@ -39,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
- l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
- l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
- l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
*lcsp1 = l1;
*lcsp3 = l3;
@@ -113,13 +108,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
+ if (cfg->high_priority) {
src |= 1 << D40_SREG_CFG_PRI_POS;
dst |= 1 << D40_SREG_CFG_PRI_POS;
}
- src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
- dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
+ if (cfg->src_info.big_endian)
+ src |= 1 << D40_SREG_CFG_LBE_POS;
+ if (cfg->dst_info.big_endian)
+ dst |= 1 << D40_SREG_CFG_LBE_POS;
*src_cfg = src;
*dst_cfg = dst;
@@ -197,8 +194,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
dma_addr_t lli_phys,
u32 reg_cfg,
u32 data_width,
- int psize,
- bool term_int)
+ int psize)
{
int total_size = 0;
int i;
@@ -238,7 +234,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
}
return total_size;
- err:
+err:
return err;
}
@@ -271,11 +267,59 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* DMA logical lli operations */
+static void d40_log_lli_link(struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next)
+{
+ u32 slos = 0;
+ u32 dlos = 0;
+
+ if (next != -EINVAL) {
+ slos = next * 2;
+ dlos = next * 2 + 1;
+ } else {
+ lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
+ lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
+ }
+
+ lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
+ (slos << D40_MEM_LCSP1_SLOS_POS);
+
+ lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
+ (dlos << D40_MEM_LCSP1_SLOS_POS);
+}
+
+void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next)
+{
+ d40_log_lli_link(lli_dst, lli_src, next);
+
+ writel(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+}
+
+void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next)
+{
+ d40_log_lli_link(lli_dst, lli_src, next);
+
+ writel(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+}
+
void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t data, u32 data_size,
- u32 lli_next_off, u32 reg_cfg,
+ u32 reg_cfg,
u32 data_width,
- bool term_int, bool addr_inc)
+ bool addr_inc)
{
lli->lcsp13 = reg_cfg;
@@ -290,165 +334,69 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
if (addr_inc)
lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
- lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
- /* If this scatter list entry is the last one, no next link */
- lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
- D40_MEM_LCSP1_SLOS_MASK;
-
- if (term_int)
- lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
- else
- lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
}
-int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
- struct scatterlist *sg,
+int d40_log_sg_to_dev(struct scatterlist *sg,
int sg_len,
struct d40_log_lli_bidir *lli,
struct d40_def_lcsp *lcsp,
u32 src_data_width,
u32 dst_data_width,
enum dma_data_direction direction,
- bool term_int, dma_addr_t dev_addr, int max_len,
- int llis_per_log)
+ dma_addr_t dev_addr)
{
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
- u32 next_lli_off_dst = 0;
- u32 next_lli_off_src = 0;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
- /*
- * If this scatter list entry is the last one or
- * max length, terminate link.
- */
- if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
- next_lli_off_src = 0;
- next_lli_off_dst = 0;
- } else {
- if (next_lli_off_dst == 0 &&
- next_lli_off_src == 0) {
- /* The first lli will be at next_lli_off */
- next_lli_off_dst = (lcla->dst_id *
- llis_per_log + 1);
- next_lli_off_src = (lcla->src_id *
- llis_per_log + 1);
- } else {
- next_lli_off_dst++;
- next_lli_off_src++;
- }
- }
-
if (direction == DMA_TO_DEVICE) {
d40_log_fill_lli(&lli->src[i],
sg_phys(current_sg),
sg_dma_len(current_sg),
- next_lli_off_src,
lcsp->lcsp1, src_data_width,
- false,
true);
d40_log_fill_lli(&lli->dst[i],
dev_addr,
sg_dma_len(current_sg),
- next_lli_off_dst,
lcsp->lcsp3, dst_data_width,
- /* No next == terminal interrupt */
- term_int && !next_lli_off_dst,
false);
} else {
d40_log_fill_lli(&lli->dst[i],
sg_phys(current_sg),
sg_dma_len(current_sg),
- next_lli_off_dst,
lcsp->lcsp3, dst_data_width,
- /* No next == terminal interrupt */
- term_int && !next_lli_off_dst,
true);
d40_log_fill_lli(&lli->src[i],
dev_addr,
sg_dma_len(current_sg),
- next_lli_off_src,
lcsp->lcsp1, src_data_width,
- false,
false);
}
}
return total_size;
}
-int d40_log_sg_to_lli(int lcla_id,
- struct scatterlist *sg,
+int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width,
- bool term_int, int max_len, int llis_per_log)
+ u32 data_width)
{
int total_size = 0;
struct scatterlist *current_sg = sg;
int i;
- u32 next_lli_off = 0;
for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg);
- /*
- * If this scatter list entry is the last one or
- * max length, terminate link.
- */
- if (sg_len - 1 == i || ((i+1) % max_len == 0))
- next_lli_off = 0;
- else {
- if (next_lli_off == 0)
- /* The first lli will be at next_lli_off */
- next_lli_off = lcla_id * llis_per_log + 1;
- else
- next_lli_off++;
- }
-
d40_log_fill_lli(&lli_sg[i],
sg_phys(current_sg),
sg_dma_len(current_sg),
- next_lli_off,
lcsp13, data_width,
- term_int && !next_lli_off,
true);
}
return total_size;
}
-
-int d40_log_lli_write(struct d40_log_lli_full *lcpa,
- struct d40_log_lli *lcla_src,
- struct d40_log_lli *lcla_dst,
- struct d40_log_lli *lli_dst,
- struct d40_log_lli *lli_src,
- int llis_per_log)
-{
- u32 slos;
- u32 dlos;
- int i;
-
- writel(lli_src->lcsp02, &lcpa->lcsp0);
- writel(lli_src->lcsp13, &lcpa->lcsp1);
- writel(lli_dst->lcsp02, &lcpa->lcsp2);
- writel(lli_dst->lcsp13, &lcpa->lcsp3);
-
- slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
- dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
-
- for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
- writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
- writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
- writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
- writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
-
- slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
- dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
- }
-
- return i;
-
-}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9c0fa2f5fe57..9e419b907544 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -1,10 +1,8 @@
/*
- * driver/dma/ste_dma40_ll.h
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#ifndef STE_DMA40_LL_H
#define STE_DMA40_LL_H
@@ -132,6 +130,13 @@
#define D40_DREG_PRMSO 0x014
#define D40_DREG_PRMOE 0x018
#define D40_DREG_PRMOO 0x01C
+#define D40_DREG_PRMO_PCHAN_BASIC 0x1
+#define D40_DREG_PRMO_PCHAN_MODULO 0x2
+#define D40_DREG_PRMO_PCHAN_DOUBLE_DST 0x3
+#define D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG 0x1
+#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY 0x2
+#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG 0x3
+
#define D40_DREG_LCPA 0x020
#define D40_DREG_LCLA 0x024
#define D40_DREG_ACTIVE 0x050
@@ -163,6 +168,9 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
+#define D40_DREG_PERIPHID2_REV_POS 4
+#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
+#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
@@ -199,8 +207,6 @@ struct d40_phy_lli {
*
* @src: Register settings for src channel.
* @dst: Register settings for dst channel.
- * @dst_addr: Physical destination address.
- * @src_addr: Physical source address.
*
* All DMA transfers have a source and a destination.
*/
@@ -208,8 +214,6 @@ struct d40_phy_lli {
struct d40_phy_lli_bidir {
struct d40_phy_lli *src;
struct d40_phy_lli *dst;
- dma_addr_t dst_addr;
- dma_addr_t src_addr;
};
@@ -271,29 +275,16 @@ struct d40_def_lcsp {
u32 lcsp1;
};
-/**
- * struct d40_lcla_elem - Info for one LCA element.
- *
- * @src_id: logical channel src id
- * @dst_id: logical channel dst id
- * @src: LCPA formated src parameters
- * @dst: LCPA formated dst parameters
- *
- */
-struct d40_lcla_elem {
- int src_id;
- int dst_id;
- struct d40_log_lli *src;
- struct d40_log_lli *dst;
-};
-
/* Physical channels */
void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
- u32 *src_cfg, u32 *dst_cfg, bool is_log);
+ u32 *src_cfg,
+ u32 *dst_cfg,
+ bool is_log);
void d40_log_cfg(struct stedma40_chan_cfg *cfg,
- u32 *lcsp1, u32 *lcsp2);
+ u32 *lcsp1,
+ u32 *lcsp2);
int d40_phy_sg_to_lli(struct scatterlist *sg,
int sg_len,
@@ -302,8 +293,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
dma_addr_t lli_phys,
u32 reg_cfg,
u32 data_width,
- int psize,
- bool term_int);
+ int psize);
int d40_phy_fill_lli(struct d40_phy_lli *lli,
dma_addr_t data,
@@ -323,35 +313,35 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* Logical channels */
void d40_log_fill_lli(struct d40_log_lli *lli,
- dma_addr_t data, u32 data_size,
- u32 lli_next_off, u32 reg_cfg,
+ dma_addr_t data,
+ u32 data_size,
+ u32 reg_cfg,
u32 data_width,
- bool term_int, bool addr_inc);
+ bool addr_inc);
-int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
- struct scatterlist *sg,
+int d40_log_sg_to_dev(struct scatterlist *sg,
int sg_len,
struct d40_log_lli_bidir *lli,
struct d40_def_lcsp *lcsp,
u32 src_data_width,
u32 dst_data_width,
enum dma_data_direction direction,
- bool term_int, dma_addr_t dev_addr, int max_len,
- int llis_per_log);
-
-int d40_log_lli_write(struct d40_log_lli_full *lcpa,
- struct d40_log_lli *lcla_src,
- struct d40_log_lli *lcla_dst,
- struct d40_log_lli *lli_dst,
- struct d40_log_lli *lli_src,
- int llis_per_log);
-
-int d40_log_sg_to_lli(int lcla_id,
- struct scatterlist *sg,
+ dma_addr_t dev_addr);
+
+int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len,
struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/
- u32 data_width,
- bool term_int, int max_len, int llis_per_log);
+ u32 data_width);
+
+void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next);
+
+void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int next);
#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 2ec1ed56f204..3b88a4e7c98a 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -759,7 +759,7 @@ static int __devinit td_probe(struct platform_device *pdev)
pdata->channels + i;
/* even channels are RX, odd are TX */
- if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
+ if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL;
goto err_tasklet_kill;
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index ce7146677e9b..d7ca43a828bd 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -42,8 +42,10 @@
#if PAGE_SHIFT < 20
#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#define MiB_TO_PAGES(mb) ((mb) >> (20 - PAGE_SHIFT))
#else /* PAGE_SHIFT > 20 */
#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
#endif
#define edac_printk(level, prefix, fmt, arg...) \
@@ -328,7 +330,7 @@ struct csrow_info {
struct mcidev_sysfs_group {
const char *name; /* group name */
- struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
+ const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
};
struct mcidev_sysfs_group_kobj {
@@ -336,7 +338,7 @@ struct mcidev_sysfs_group_kobj {
struct kobject kobj; /* kobj for the group */
- struct mcidev_sysfs_group *grp; /* group description table */
+ const struct mcidev_sysfs_group *grp; /* group description table */
struct mem_ctl_info *mci; /* the parent */
};
@@ -347,7 +349,7 @@ struct mcidev_sysfs_group_kobj {
struct mcidev_sysfs_attribute {
/* It should use either attr or grp */
struct attribute attr;
- struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
+ const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
/* Ops for show/store values at the attribute - not used on group */
ssize_t (*show)(struct mem_ctl_info *,char *);
@@ -440,7 +442,7 @@ struct mem_ctl_info {
* If attributes are desired, then set to array of attributes
* If no attributes are desired, leave NULL
*/
- struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
+ const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
/* work struct for this MC */
struct delayed_work work;
@@ -810,6 +812,7 @@ extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
extern int edac_mc_add_mc(struct mem_ctl_info *mci);
extern void edac_mc_free(struct mem_ctl_info *mci);
extern struct mem_ctl_info *edac_mc_find(int idx);
+extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 6b21e25f7a84..ba6586a69ccc 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -207,6 +207,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
}
mci->op_state = OP_ALLOC;
+ INIT_LIST_HEAD(&mci->grp_kobj_list);
/*
* Initialize the 'root' kobj for the edac_mc controller
@@ -234,18 +235,24 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc);
*/
void edac_mc_free(struct mem_ctl_info *mci)
{
+ debugf1("%s()\n", __func__);
+
edac_mc_unregister_sysfs_main_kobj(mci);
+
+ /* free the mci instance memory here */
+ kfree(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
-/*
+/**
* find_mci_by_dev
*
* scan list of controllers looking for the one that manages
* the 'dev' device
+ * @dev: pointer to a struct device related with the MCI
*/
-static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
+struct mem_ctl_info *find_mci_by_dev(struct device *dev)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -261,6 +268,7 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
return NULL;
}
+EXPORT_SYMBOL_GPL(find_mci_by_dev);
/*
* handler for EDAC to check if NMI type handler has asserted interrupt
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index a4135860149b..dce61f7ba38b 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -631,9 +631,6 @@ static void edac_mci_control_release(struct kobject *kobj)
/* decrement the module ref count */
module_put(mci->owner);
-
- /* free the mci instance memory here */
- kfree(mci);
}
static struct kobj_type ktype_mci = {
@@ -713,6 +710,8 @@ fail_out:
*/
void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
{
+ debugf1("%s()\n", __func__);
+
/* delete the kobj from the mc_kset */
kobject_put(&mci->edac_mci_kobj);
}
@@ -760,8 +759,6 @@ static void edac_inst_grp_release(struct kobject *kobj)
grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
mci = grp->mci;
-
- kobject_put(&mci->edac_mci_kobj);
}
/* Intermediate show/store table */
@@ -784,7 +781,7 @@ static struct kobj_type ktype_inst_grp = {
* object tree.
*/
static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
- struct mcidev_sysfs_attribute *sysfs_attrib,
+ const struct mcidev_sysfs_attribute *sysfs_attrib,
struct kobject *kobj)
{
int err;
@@ -792,6 +789,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
debugf1("%s()\n", __func__);
while (sysfs_attrib) {
+ debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
@@ -799,10 +797,9 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
if (!grp_kobj)
return -ENOMEM;
- list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
-
grp_kobj->grp = sysfs_attrib->grp;
grp_kobj->mci = mci;
+ list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
debugf0("%s() grp %s, mci %p\n", __func__,
sysfs_attrib->grp->name, mci);
@@ -811,26 +808,28 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
&ktype_inst_grp,
&mci->edac_mci_kobj,
sysfs_attrib->grp->name);
- if (err)
+ if (err < 0) {
+ printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
return err;
-
+ }
err = edac_create_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj);
- if (err)
+ if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
debugf0("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
+ if (err < 0) {
+ printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
+ return err;
+ }
} else
break;
- if (err) {
- return err;
- }
sysfs_attrib++;
}
@@ -843,7 +842,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
* directory of this mci instance.
*/
static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
- struct mcidev_sysfs_attribute *sysfs_attrib,
+ const struct mcidev_sysfs_attribute *sysfs_attrib,
struct kobject *kobj, int count)
{
struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
@@ -855,13 +854,24 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
* Remove first all the atributes
*/
while (sysfs_attrib) {
+ debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
- list_for_each_entry(grp_kobj, &mci->grp_kobj_list,
- list)
- if (grp_kobj->grp == sysfs_attrib->grp)
+ debugf1("%s() seeking for group %s\n",
+ __func__, sysfs_attrib->grp->name);
+ list_for_each_entry(grp_kobj,
+ &mci->grp_kobj_list, list) {
+ debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
+ if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
+ debugf0("%s() group %s\n", __func__,
+ sysfs_attrib->grp->name);
+ kobject_put(&grp_kobj->kobj);
+ }
+ }
+ debugf1("%s() end of seeking for group %s\n",
+ __func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
debugf0("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
@@ -871,15 +881,14 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
sysfs_attrib++;
}
- /*
- * Now that all attributes got removed, it is save to remove all groups
- */
- if (!count)
- list_for_each_entry_safe(grp_kobj, tmp, &mci->grp_kobj_list,
- list) {
- debugf0("%s() grp %s\n", __func__, grp_kobj->grp->name);
- kobject_put(&grp_kobj->kobj);
- }
+ /* Remove the group objects */
+ if (count)
+ return;
+ list_for_each_entry_safe(grp_kobj, tmp,
+ &mci->grp_kobj_list, list) {
+ list_del(&grp_kobj->list);
+ kfree(grp_kobj);
+ }
}
@@ -971,6 +980,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
+ debugf0("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
@@ -978,20 +988,20 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
}
}
- debugf0("%s() remove_link\n", __func__);
+ /* remove this mci instance's attribtes */
+ if (mci->mc_driver_sysfs_attributes) {
+ debugf0("%s() unregister mci private attributes\n", __func__);
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj, 0);
+ }
/* remove the symlink */
+ debugf0("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
- debugf0("%s() remove_mci_instance\n", __func__);
-
- /* remove this mci instance's attribtes */
- edac_remove_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes,
- &mci->edac_mci_kobj, 0);
- debugf0("%s() unregister this mci kobj\n", __func__);
-
/* unregister this instance's kobject */
+ debugf0("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 0fd5b85a0f75..362861c15779 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -39,6 +39,14 @@
#include "edac_core.h"
+/* Static vars */
+static LIST_HEAD(i7core_edac_list);
+static DEFINE_MUTEX(i7core_edac_lock);
+static int probed;
+
+static int use_pci_fixup;
+module_param(use_pci_fixup, int, 0444);
+MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
/*
* This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
* registers start at bus 255, and are not reported by BIOS.
@@ -212,8 +220,8 @@ struct pci_id_descr {
};
struct pci_id_table {
- struct pci_id_descr *descr;
- int n_devs;
+ const struct pci_id_descr *descr;
+ int n_devs;
};
struct i7core_dev {
@@ -235,8 +243,6 @@ struct i7core_pvt {
struct i7core_inject inject;
struct i7core_channel channel[NUM_CHANS];
- int channels; /* Number of active channels */
-
int ce_count_available;
int csrow_map[NUM_CHANS][MAX_DIMMS];
@@ -261,22 +267,22 @@ struct i7core_pvt {
/* Count indicator to show errors not got */
unsigned mce_overrun;
-};
-/* Static vars */
-static LIST_HEAD(i7core_edac_list);
-static DEFINE_MUTEX(i7core_edac_lock);
+ /* Struct to control EDAC polling */
+ struct edac_pci_ctl_info *i7core_pci;
+};
#define PCI_DESCR(device, function, device_id) \
.dev = (device), \
.func = (function), \
.dev_id = (device_id)
-struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
+static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
/* Memory controller */
{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
- /* Exists only for RDIMM */
+
+ /* Exists only for RDIMM */
{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
@@ -297,19 +303,9 @@ struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
-
- /* Generic Non-core registers */
- /*
- * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
- * On Xeon 55xx, however, it has a different id (8086:2c40). So,
- * the probing code needs to test for the other address in case of
- * failure of this one
- */
- { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
-
};
-struct pci_id_descr pci_dev_descr_lynnfield[] = {
+static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
{ PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
{ PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
{ PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
@@ -323,15 +319,9 @@ struct pci_id_descr pci_dev_descr_lynnfield[] = {
{ PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
{ PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
{ PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
-
- /*
- * This is the PCI device has an alternate address on some
- * processors like Core i7 860
- */
- { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
};
-struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
+static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
/* Memory controller */
{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
@@ -356,17 +346,14 @@ struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
-
- /* Generic Non-core registers */
- { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
-
};
-#define PCI_ID_TABLE_ENTRY(A) { A, ARRAY_SIZE(A) }
-struct pci_id_table pci_dev_table[] = {
+#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+static const struct pci_id_table pci_dev_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
+ {0,} /* 0 terminated list. */
};
/*
@@ -378,8 +365,6 @@ static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
{0,} /* 0 terminated list. */
};
-static struct edac_pci_ctl_info *i7core_pci;
-
/****************************************************************************
Anciliary status routines
****************************************************************************/
@@ -442,6 +427,36 @@ static struct i7core_dev *get_i7core_dev(u8 socket)
return NULL;
}
+static struct i7core_dev *alloc_i7core_dev(u8 socket,
+ const struct pci_id_table *table)
+{
+ struct i7core_dev *i7core_dev;
+
+ i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
+ if (!i7core_dev)
+ return NULL;
+
+ i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
+ GFP_KERNEL);
+ if (!i7core_dev->pdev) {
+ kfree(i7core_dev);
+ return NULL;
+ }
+
+ i7core_dev->socket = socket;
+ i7core_dev->n_devs = table->n_devs;
+ list_add_tail(&i7core_dev->list, &i7core_edac_list);
+
+ return i7core_dev;
+}
+
+static void free_i7core_dev(struct i7core_dev *i7core_dev)
+{
+ list_del(&i7core_dev->list);
+ kfree(i7core_dev->pdev);
+ kfree(i7core_dev);
+}
+
/****************************************************************************
Memory check routines
****************************************************************************/
@@ -484,7 +499,7 @@ static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
* to add a fake description for csrows.
* So, this driver is attributing one DIMM memory for one csrow.
*/
-static int i7core_get_active_channels(u8 socket, unsigned *channels,
+static int i7core_get_active_channels(const u8 socket, unsigned *channels,
unsigned *csrows)
{
struct pci_dev *pdev = NULL;
@@ -545,12 +560,13 @@ static int i7core_get_active_channels(u8 socket, unsigned *channels,
return 0;
}
-static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
+static int get_dimm_config(const struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
struct csrow_info *csr;
struct pci_dev *pdev;
int i, j;
+ int csrow = 0;
unsigned long last_page = 0;
enum edac_type mode;
enum mem_type mtype;
@@ -664,13 +680,9 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
RANKOFFSET(dimm_dod[j]),
banks, ranks, rows, cols);
-#if PAGE_SHIFT > 20
- npages = size >> (PAGE_SHIFT - 20);
-#else
- npages = size << (20 - PAGE_SHIFT);
-#endif
+ npages = MiB_TO_PAGES(size);
- csr = &mci->csrows[*csrow];
+ csr = &mci->csrows[csrow];
csr->first_page = last_page + 1;
last_page += npages;
csr->last_page = last_page;
@@ -678,13 +690,13 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
csr->page_mask = 0;
csr->grain = 8;
- csr->csrow_idx = *csrow;
+ csr->csrow_idx = csrow;
csr->nr_channels = 1;
csr->channels[0].chan_idx = i;
csr->channels[0].ce_count = 0;
- pvt->csrow_map[i][j] = *csrow;
+ pvt->csrow_map[i][j] = csrow;
switch (banks) {
case 4:
@@ -703,7 +715,7 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
csr->edac_mode = mode;
csr->mtype = mtype;
- (*csrow)++;
+ csrow++;
}
pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -736,7 +748,7 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
we're disabling error injection on all write calls to the sysfs nodes that
controls the error code injection.
*/
-static int disable_inject(struct mem_ctl_info *mci)
+static int disable_inject(const struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
@@ -921,7 +933,7 @@ DECLARE_ADDR_MATCH(bank, 32);
DECLARE_ADDR_MATCH(page, 0x10000);
DECLARE_ADDR_MATCH(col, 0x4000);
-static int write_and_test(struct pci_dev *dev, int where, u32 val)
+static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
{
u32 read;
int count;
@@ -1120,35 +1132,34 @@ DECLARE_COUNTER(2);
* Sysfs struct
*/
-
-static struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
+static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
ATTR_ADDR_MATCH(channel),
ATTR_ADDR_MATCH(dimm),
ATTR_ADDR_MATCH(rank),
ATTR_ADDR_MATCH(bank),
ATTR_ADDR_MATCH(page),
ATTR_ADDR_MATCH(col),
- { .attr = { .name = NULL } }
+ { } /* End of list */
};
-static struct mcidev_sysfs_group i7core_inject_addrmatch = {
+static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
.name = "inject_addrmatch",
.mcidev_attr = i7core_addrmatch_attrs,
};
-static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
+static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
ATTR_COUNTER(0),
ATTR_COUNTER(1),
ATTR_COUNTER(2),
{ .attr = { .name = NULL } }
};
-static struct mcidev_sysfs_group i7core_udimm_counters = {
+static const struct mcidev_sysfs_group i7core_udimm_counters = {
.name = "all_channel_counts",
.mcidev_attr = i7core_udimm_counters_attrs,
};
-static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
+static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
{
.attr = {
.name = "inject_section",
@@ -1180,8 +1191,44 @@ static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
.show = i7core_inject_enable_show,
.store = i7core_inject_enable_store,
},
- { .attr = { .name = NULL } }, /* Reserved for udimm counters */
- { .attr = { .name = NULL } }
+ { } /* End of list */
+};
+
+static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
+ {
+ .attr = {
+ .name = "inject_section",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_section_show,
+ .store = i7core_inject_section_store,
+ }, {
+ .attr = {
+ .name = "inject_type",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_type_show,
+ .store = i7core_inject_type_store,
+ }, {
+ .attr = {
+ .name = "inject_eccmask",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_eccmask_show,
+ .store = i7core_inject_eccmask_store,
+ }, {
+ .grp = &i7core_inject_addrmatch,
+ }, {
+ .attr = {
+ .name = "inject_enable",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_enable_show,
+ .store = i7core_inject_enable_store,
+ }, {
+ .grp = &i7core_udimm_counters,
+ },
+ { } /* End of list */
};
/****************************************************************************
@@ -1189,7 +1236,7 @@ static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
****************************************************************************/
/*
- * i7core_put_devices 'put' all the devices that we have
+ * i7core_put_all_devices 'put' all the devices that we have
* reserved via 'get'
*/
static void i7core_put_devices(struct i7core_dev *i7core_dev)
@@ -1206,23 +1253,23 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev)
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
- kfree(i7core_dev->pdev);
- list_del(&i7core_dev->list);
- kfree(i7core_dev);
}
static void i7core_put_all_devices(void)
{
struct i7core_dev *i7core_dev, *tmp;
- list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list)
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
i7core_put_devices(i7core_dev);
+ free_i7core_dev(i7core_dev);
+ }
}
-static void __init i7core_xeon_pci_fixup(struct pci_id_table *table)
+static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
{
struct pci_dev *pdev = NULL;
int i;
+
/*
* On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
* aren't announced by acpi. So, we need to use a legacy scan probing
@@ -1257,16 +1304,18 @@ static unsigned i7core_pci_lastbus(void)
}
/*
- * i7core_get_devices Find and perform 'get' operation on the MCH's
+ * i7core_get_all_devices Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
*
* Need to 'get' device 16 func 1 and func 2
*/
-int i7core_get_onedevice(struct pci_dev **prev, int devno,
- struct pci_id_descr *dev_descr, unsigned n_devs,
- unsigned last_bus)
+static int i7core_get_onedevice(struct pci_dev **prev,
+ const struct pci_id_table *table,
+ const unsigned devno,
+ const unsigned last_bus)
{
struct i7core_dev *i7core_dev;
+ const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
u8 bus = 0;
@@ -1275,20 +1324,6 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
dev_descr->dev_id, *prev);
- /*
- * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
- * is at addr 8086:2c40, instead of 8086:2c41. So, we need
- * to probe for the alternate address in case of failure
- */
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
-
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
- *prev);
-
if (!pdev) {
if (*prev) {
*prev = pdev;
@@ -1315,18 +1350,11 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
i7core_dev = get_i7core_dev(socket);
if (!i7core_dev) {
- i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
- if (!i7core_dev)
- return -ENOMEM;
- i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * n_devs,
- GFP_KERNEL);
- if (!i7core_dev->pdev) {
- kfree(i7core_dev);
+ i7core_dev = alloc_i7core_dev(socket, table);
+ if (!i7core_dev) {
+ pci_dev_put(pdev);
return -ENOMEM;
}
- i7core_dev->socket = socket;
- i7core_dev->n_devs = n_devs;
- list_add_tail(&i7core_dev->list, &i7core_edac_list);
}
if (i7core_dev->pdev[devno]) {
@@ -1368,27 +1396,31 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ /*
+ * As stated on drivers/pci/search.c, the reference count for
+ * @from is always decremented if it is not %NULL. So, as we need
+ * to get all devices up to null, we need to do a get for the device
+ */
+ pci_dev_get(pdev);
+
*prev = pdev;
return 0;
}
-static int i7core_get_devices(struct pci_id_table *table)
+static int i7core_get_all_devices(void)
{
int i, rc, last_bus;
struct pci_dev *pdev = NULL;
- struct pci_id_descr *dev_descr;
+ const struct pci_id_table *table = pci_dev_table;
last_bus = i7core_pci_lastbus();
while (table && table->descr) {
- dev_descr = table->descr;
for (i = 0; i < table->n_devs; i++) {
pdev = NULL;
do {
- rc = i7core_get_onedevice(&pdev, i,
- &dev_descr[i],
- table->n_devs,
+ rc = i7core_get_onedevice(&pdev, table, i,
last_bus);
if (rc < 0) {
if (i == 0) {
@@ -1404,7 +1436,6 @@ static int i7core_get_devices(struct pci_id_table *table)
}
return 0;
- return 0;
}
static int mci_bind_devs(struct mem_ctl_info *mci,
@@ -1414,10 +1445,6 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
struct pci_dev *pdev;
int i, func, slot;
- /* Associates i7core_dev and mci for future usage */
- pvt->i7core_dev = i7core_dev;
- i7core_dev->mci = mci;
-
pvt->is_registered = 0;
for (i = 0; i < i7core_dev->n_devs; i++) {
pdev = i7core_dev->pdev[i];
@@ -1448,15 +1475,6 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
pvt->is_registered = 1;
}
- /*
- * Add extra nodes to count errors on udimm
- * For registered memory, this is not needed, since the counters
- * are already displayed at the standard locations
- */
- if (!pvt->is_registered)
- i7core_sysfs_attrs[ARRAY_SIZE(i7core_sysfs_attrs)-2].grp =
- &i7core_udimm_counters;
-
return 0;
error:
@@ -1470,7 +1488,9 @@ error:
Error check routines
****************************************************************************/
static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
- int chan, int dimm, int add)
+ const int chan,
+ const int dimm,
+ const int add)
{
char *msg;
struct i7core_pvt *pvt = mci->pvt_info;
@@ -1487,7 +1507,10 @@ static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
}
static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
- int chan, int new0, int new1, int new2)
+ const int chan,
+ const int new0,
+ const int new1,
+ const int new2)
{
struct i7core_pvt *pvt = mci->pvt_info;
int add0 = 0, add1 = 0, add2 = 0;
@@ -1641,7 +1664,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
* fields
*/
static void i7core_mce_output_error(struct mem_ctl_info *mci,
- struct mce *m)
+ const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
char *type, *optype, *err, *msg;
@@ -1845,28 +1868,85 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
return 1;
}
-static int i7core_register_mci(struct i7core_dev *i7core_dev,
- int num_channels, int num_csrows)
+static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
+{
+ pvt->i7core_pci = edac_pci_create_generic_ctl(
+ &pvt->i7core_dev->pdev[0]->dev,
+ EDAC_MOD_STR);
+ if (unlikely(!pvt->i7core_pci))
+ pr_warn("Unable to setup PCI error report via EDAC\n");
+}
+
+static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
+{
+ if (likely(pvt->i7core_pci))
+ edac_pci_release_generic_ctl(pvt->i7core_pci);
+ else
+ i7core_printk(KERN_ERR,
+ "Couldn't find mem_ctl_info for socket %d\n",
+ pvt->i7core_dev->socket);
+ pvt->i7core_pci = NULL;
+}
+
+static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
+{
+ struct mem_ctl_info *mci = i7core_dev->mci;
+ struct i7core_pvt *pvt;
+
+ if (unlikely(!mci || !mci->pvt_info)) {
+ debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
+ __func__, &i7core_dev->pdev[0]->dev);
+
+ i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
+ return;
+ }
+
+ pvt = mci->pvt_info;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &i7core_dev->pdev[0]->dev);
+
+ /* Disable MCE NMI handler */
+ edac_mce_unregister(&pvt->edac_mce);
+
+ /* Disable EDAC polling */
+ i7core_pci_ctl_release(pvt);
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->dev);
+
+ debugf1("%s: free mci struct\n", mci->ctl_name);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ i7core_dev->mci = NULL;
+}
+
+static int i7core_register_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci;
struct i7core_pvt *pvt;
- int csrow = 0;
- int rc;
+ int rc, channels, csrows;
+
+ /* Check the number of active and not disabled channels */
+ rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
+ if (unlikely(rc < 0))
+ return rc;
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
- i7core_dev->socket);
+ mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
if (unlikely(!mci))
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
-
- /* record ptr to the generic device */
- mci->dev = &i7core_dev->pdev[0]->dev;
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &i7core_dev->pdev[0]->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
+ /* Associates i7core_dev and mci for future usage */
+ pvt->i7core_dev = i7core_dev;
+ i7core_dev->mci = mci;
+
/*
* FIXME: how to handle RDDR3 at MCI level? It is possible to have
* Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
@@ -1881,17 +1961,23 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev,
i7core_dev->socket);
mci->dev_name = pci_name(i7core_dev->pdev[0]);
mci->ctl_page_to_phys = NULL;
- mci->mc_driver_sysfs_attributes = i7core_sysfs_attrs;
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i7core_check_error;
/* Store pci devices at mci for faster access */
rc = mci_bind_devs(mci, i7core_dev);
if (unlikely(rc < 0))
- goto fail;
+ goto fail0;
+
+ if (pvt->is_registered)
+ mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
+ else
+ mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
/* Get dimm basic config */
- get_dimm_config(mci, &csrow);
+ get_dimm_config(mci);
+ /* record ptr to the generic device */
+ mci->dev = &i7core_dev->pdev[0]->dev;
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = i7core_check_error;
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
@@ -1902,19 +1988,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev,
*/
rc = -EINVAL;
- goto fail;
- }
-
- /* allocating generic PCI control info */
- i7core_pci = edac_pci_create_generic_ctl(&i7core_dev->pdev[0]->dev,
- EDAC_MOD_STR);
- if (unlikely(!i7core_pci)) {
- printk(KERN_WARNING
- "%s(): Unable to create PCI control\n",
- __func__);
- printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n",
- __func__);
+ goto fail0;
}
/* Default error mask is any memory */
@@ -1925,19 +1999,28 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev,
pvt->inject.page = -1;
pvt->inject.col = -1;
+ /* allocating generic PCI control info */
+ i7core_pci_ctl_create(pvt);
+
/* Registers on edac_mce in order to receive memory errors */
pvt->edac_mce.priv = mci;
pvt->edac_mce.check_error = i7core_mce_check_error;
-
rc = edac_mce_register(&pvt->edac_mce);
if (unlikely(rc < 0)) {
debugf0("MC: " __FILE__
": %s(): failed edac_mce_register()\n", __func__);
+ goto fail1;
}
-fail:
- if (rc < 0)
- edac_mc_free(mci);
+ return 0;
+
+fail1:
+ i7core_pci_ctl_release(pvt);
+ edac_mc_del_mc(mci->dev);
+fail0:
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ i7core_dev->mci = NULL;
return rc;
}
@@ -1949,8 +2032,6 @@ fail:
* < 0 for error code
*/
-static int probed = 0;
-
static int __devinit i7core_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1965,25 +2046,16 @@ static int __devinit i7core_probe(struct pci_dev *pdev,
*/
if (unlikely(probed >= 1)) {
mutex_unlock(&i7core_edac_lock);
- return -EINVAL;
+ return -ENODEV;
}
probed++;
- rc = i7core_get_devices(pci_dev_table);
+ rc = i7core_get_all_devices();
if (unlikely(rc < 0))
goto fail0;
list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
- int channels;
- int csrows;
-
- /* Check the number of active and not disabled channels */
- rc = i7core_get_active_channels(i7core_dev->socket,
- &channels, &csrows);
- if (unlikely(rc < 0))
- goto fail1;
-
- rc = i7core_register_mci(i7core_dev, channels, csrows);
+ rc = i7core_register_mci(i7core_dev);
if (unlikely(rc < 0))
goto fail1;
}
@@ -1994,6 +2066,9 @@ static int __devinit i7core_probe(struct pci_dev *pdev,
return 0;
fail1:
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list)
+ i7core_unregister_mci(i7core_dev);
+
i7core_put_all_devices();
fail0:
mutex_unlock(&i7core_edac_lock);
@@ -2006,14 +2081,10 @@ fail0:
*/
static void __devexit i7core_remove(struct pci_dev *pdev)
{
- struct mem_ctl_info *mci;
- struct i7core_dev *i7core_dev, *tmp;
+ struct i7core_dev *i7core_dev;
debugf0(__FILE__ ": %s()\n", __func__);
- if (i7core_pci)
- edac_pci_release_generic_ctl(i7core_pci);
-
/*
* we have a trouble here: pdev value for removal will be wrong, since
* it will point to the X58 register used to detect that the machine
@@ -2023,22 +2094,18 @@ static void __devexit i7core_remove(struct pci_dev *pdev)
*/
mutex_lock(&i7core_edac_lock);
- list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
- mci = edac_mc_del_mc(&i7core_dev->pdev[0]->dev);
- if (mci) {
- struct i7core_pvt *pvt = mci->pvt_info;
-
- i7core_dev = pvt->i7core_dev;
- edac_mce_unregister(&pvt->edac_mce);
- kfree(mci->ctl_name);
- edac_mc_free(mci);
- i7core_put_devices(i7core_dev);
- } else {
- i7core_printk(KERN_ERR,
- "Couldn't find mci for socket %d\n",
- i7core_dev->socket);
- }
+
+ if (unlikely(!probed)) {
+ mutex_unlock(&i7core_edac_lock);
+ return;
}
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list)
+ i7core_unregister_mci(i7core_dev);
+
+ /* Release PCI resources */
+ i7core_put_all_devices();
+
probed--;
mutex_unlock(&i7core_edac_lock);
@@ -2070,7 +2137,8 @@ static int __init i7core_init(void)
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- i7core_xeon_pci_fixup(pci_dev_table);
+ if (use_pci_fixup)
+ i7core_xeon_pci_fixup(pci_dev_table);
pci_rc = pci_register_driver(&i7core_driver);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index b3d22d659990..e28e41668177 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -2,6 +2,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/bootmem.h>
@@ -361,6 +362,33 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
}
}
+static void __init print_filtered(const char *info)
+{
+ const char *p;
+
+ if (!info)
+ return;
+
+ for (p = info; *p; p++)
+ if (isprint(*p))
+ printk(KERN_CONT "%c", *p);
+ else
+ printk(KERN_CONT "\\x%02x", *p & 0xff);
+}
+
+static void __init dmi_dump_ids(void)
+{
+ printk(KERN_DEBUG "DMI: ");
+ print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
+ printk(KERN_CONT "/");
+ print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+ printk(KERN_CONT ", BIOS ");
+ print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
+ printk(KERN_CONT " ");
+ print_filtered(dmi_get_system_info(DMI_BIOS_DATE));
+ printk(KERN_CONT "\n");
+}
+
static int __init dmi_present(const char __iomem *p)
{
u8 buf[15];
@@ -381,8 +409,10 @@ static int __init dmi_present(const char __iomem *p)
buf[14] >> 4, buf[14] & 0xF);
else
printk(KERN_INFO "DMI present.\n");
- if (dmi_walk_early(dmi_decode) == 0)
+ if (dmi_walk_early(dmi_decode) == 0) {
+ dmi_dump_ids();
return 0;
+ }
}
return 1;
}
diff --git a/drivers/gpio/74x164.c b/drivers/gpio/74x164.c
new file mode 100644
index 000000000000..d91ff4c282e9
--- /dev/null
+++ b/drivers/gpio/74x164.c
@@ -0,0 +1,182 @@
+/*
+ * 74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
+ *
+ * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/74x164.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#define GEN_74X164_GPIO_COUNT 8
+
+
+struct gen_74x164_chip {
+ struct spi_device *spi;
+ struct gpio_chip gpio_chip;
+ struct mutex lock;
+ u8 port_config;
+};
+
+static void gen_74x164_set_value(struct gpio_chip *, unsigned, int);
+
+static struct gen_74x164_chip *gpio_to_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct gen_74x164_chip, gpio_chip);
+}
+
+static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
+{
+ return spi_write(chip->spi,
+ &chip->port_config, sizeof(chip->port_config));
+}
+
+static int gen_74x164_direction_output(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ gen_74x164_set_value(gc, offset, val);
+ return 0;
+}
+
+static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
+{
+ struct gen_74x164_chip *chip = gpio_to_chip(gc);
+ int ret;
+
+ mutex_lock(&chip->lock);
+ ret = (chip->port_config >> offset) & 0x1;
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static void gen_74x164_set_value(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ struct gen_74x164_chip *chip = gpio_to_chip(gc);
+
+ mutex_lock(&chip->lock);
+ if (val)
+ chip->port_config |= (1 << offset);
+ else
+ chip->port_config &= ~(1 << offset);
+
+ __gen_74x164_write_config(chip);
+ mutex_unlock(&chip->lock);
+}
+
+static int __devinit gen_74x164_probe(struct spi_device *spi)
+{
+ struct gen_74x164_chip *chip;
+ struct gen_74x164_chip_platform_data *pdata;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata || !pdata->base) {
+ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
+ return -EINVAL;
+ }
+
+ /*
+ * bits_per_word cannot be configured in platform data
+ */
+ spi->bits_per_word = 8;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ mutex_init(&chip->lock);
+
+ dev_set_drvdata(&spi->dev, chip);
+
+ chip->spi = spi;
+
+ chip->gpio_chip.label = GEN_74X164_DRIVER_NAME,
+ chip->gpio_chip.direction_output = gen_74x164_direction_output;
+ chip->gpio_chip.get = gen_74x164_get_value;
+ chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.base = pdata->base;
+ chip->gpio_chip.ngpio = GEN_74X164_GPIO_COUNT;
+ chip->gpio_chip.can_sleep = 1;
+ chip->gpio_chip.dev = &spi->dev;
+ chip->gpio_chip.owner = THIS_MODULE;
+
+ ret = __gen_74x164_write_config(chip);
+ if (ret) {
+ dev_err(&spi->dev, "Failed writing: %d\n", ret);
+ goto exit_destroy;
+ }
+
+ ret = gpiochip_add(&chip->gpio_chip);
+ if (ret)
+ goto exit_destroy;
+
+ return ret;
+
+exit_destroy:
+ dev_set_drvdata(&spi->dev, NULL);
+ mutex_destroy(&chip->lock);
+ kfree(chip);
+ return ret;
+}
+
+static int gen_74x164_remove(struct spi_device *spi)
+{
+ struct gen_74x164_chip *chip;
+ int ret;
+
+ chip = dev_get_drvdata(&spi->dev);
+ if (chip == NULL)
+ return -ENODEV;
+
+ dev_set_drvdata(&spi->dev, NULL);
+
+ ret = gpiochip_remove(&chip->gpio_chip);
+ if (!ret) {
+ mutex_destroy(&chip->lock);
+ kfree(chip);
+ } else
+ dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
+ ret);
+
+ return ret;
+}
+
+static struct spi_driver gen_74x164_driver = {
+ .driver = {
+ .name = GEN_74X164_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = gen_74x164_probe,
+ .remove = __devexit_p(gen_74x164_remove),
+};
+
+static int __init gen_74x164_init(void)
+{
+ return spi_register_driver(&gen_74x164_driver);
+}
+subsys_initcall(gen_74x164_init);
+
+static void __exit gen_74x164_exit(void)
+{
+ spi_unregister_driver(&gen_74x164_driver);
+}
+module_exit(gen_74x164_exit);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
+MODULE_DESCRIPTION("GPIO expander driver for 74X164 8-bits shift register");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 510aa2054544..dd9b4ba8d32d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -70,6 +70,11 @@ config GPIO_MAX730X
comment "Memory mapped GPIO expanders:"
+config GPIO_BASIC_MMIO
+ tristate "Basic memory-mapped GPIO controllers support"
+ help
+ Say yes here to support basic memory-mapped GPIO controllers.
+
config GPIO_IT8761E
tristate "IT8761E GPIO support"
depends on GPIOLIB
@@ -267,6 +272,13 @@ config GPIO_ADP5588
To compile this driver as a module, choose M here: the module will be
called adp5588-gpio.
+config GPIO_ADP5588_IRQ
+ bool "Interrupt controller support for ADP5588"
+ depends on GPIO_ADP5588=y
+ help
+ Say yes here to enable the adp5588 to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+
comment "PCI GPIO expanders:"
config GPIO_CS5535
@@ -301,6 +313,14 @@ config GPIO_LANGWELL
help
Say Y here to support Intel Langwell/Penwell GPIO.
+config GPIO_PCH
+ tristate "PCH GPIO of Intel Topcliff"
+ depends on PCI
+ help
+ This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
+ which is an IOH(Input/Output Hub) for x86 embedded processor.
+ This driver can access PCH GPIO device.
+
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -339,6 +359,14 @@ config GPIO_MC33880
SPI driver for Freescale MC33880 high-side/low-side switch.
This provides GPIO interface supporting inputs and outputs.
+config GPIO_74X164
+ tristate "74x164 serial-in/parallel-out 8-bits shift register"
+ depends on SPI_MASTER
+ help
+ Platform driver for 74x164 compatible serial-in/parallel-out
+ 8-outputs shift registers. This driver can be used to provide access
+ to more gpio outputs.
+
comment "AC97 GPIO expanders:"
config GPIO_UCB1400
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index fc6019d93720..da2ecde5abdd 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
+obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX730X) += max730x.o
obj-$(CONFIG_GPIO_MAX7300) += max7300.o
@@ -17,8 +18,10 @@ obj-$(CONFIG_GPIO_MAX7301) += max7301.o
obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MC33880) += mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
+obj-$(CONFIG_GPIO_74X164) += 74x164.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
+obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
obj-$(CONFIG_GPIO_STMPE) += stmpe-gpio.o
obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
index 2e8e9e24f887..0871f78af593 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/adp5588-gpio.c
@@ -1,8 +1,8 @@
/*
* GPIO Chip driver for Analog Devices
- * ADP5588 I/O Expander and QWERTY Keypad Controller
+ * ADP5588/ADP5587 I/O Expander and QWERTY Keypad Controller
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -13,21 +13,34 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i2c/adp5588.h>
-#define DRV_NAME "adp5588-gpio"
-#define MAXGPIO 18
-#define ADP_BANK(offs) ((offs) >> 3)
-#define ADP_BIT(offs) (1u << ((offs) & 0x7))
+#define DRV_NAME "adp5588-gpio"
+
+/*
+ * Early pre 4.0 Silicon required to delay readout by at least 25ms,
+ * since the Event Counter Register updated 25ms after the interrupt
+ * asserted.
+ */
+#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
struct adp5588_gpio {
struct i2c_client *client;
struct gpio_chip gpio_chip;
struct mutex lock; /* protect cached dir, dat_out */
+ /* protect serialized access to the interrupt controller bus */
+ struct mutex irq_lock;
unsigned gpio_start;
+ unsigned irq_base;
uint8_t dat_out[3];
uint8_t dir[3];
+ uint8_t int_lvl[3];
+ uint8_t int_en[3];
+ uint8_t irq_mask[3];
+ uint8_t irq_stat[3];
};
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -55,8 +68,8 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off))
- & ADP_BIT(off));
+ return !!(adp5588_gpio_read(dev->client,
+ GPIO_DAT_STAT1 + ADP5588_BANK(off)) & ADP5588_BIT(off));
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
@@ -66,8 +79,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- bank = ADP_BANK(off);
- bit = ADP_BIT(off);
+ bank = ADP5588_BANK(off);
+ bit = ADP5588_BIT(off);
mutex_lock(&dev->lock);
if (val)
@@ -87,10 +100,10 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- bank = ADP_BANK(off);
+ bank = ADP5588_BANK(off);
mutex_lock(&dev->lock);
- dev->dir[bank] &= ~ADP_BIT(off);
+ dev->dir[bank] &= ~ADP5588_BIT(off);
ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
mutex_unlock(&dev->lock);
@@ -105,8 +118,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
struct adp5588_gpio *dev =
container_of(chip, struct adp5588_gpio, gpio_chip);
- bank = ADP_BANK(off);
- bit = ADP_BIT(off);
+ bank = ADP5588_BANK(off);
+ bit = ADP5588_BIT(off);
mutex_lock(&dev->lock);
dev->dir[bank] |= bit;
@@ -125,6 +138,213 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
+#ifdef CONFIG_GPIO_ADP5588_IRQ
+static int adp5588_gpio_to_irq(struct gpio_chip *chip, unsigned off)
+{
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+ return dev->irq_base + off;
+}
+
+static void adp5588_irq_bus_lock(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ mutex_lock(&dev->irq_lock);
+}
+
+ /*
+ * genirq core code can issue chip->mask/unmask from atomic context.
+ * This doesn't work for slow busses where an access needs to sleep.
+ * bus_sync_unlock() is therefore called outside the atomic context,
+ * syncs the current irq mask state with the slow external controller
+ * and unlocks the bus.
+ */
+
+static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+ if (dev->int_en[i] ^ dev->irq_mask[i]) {
+ dev->int_en[i] = dev->irq_mask[i];
+ adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
+ dev->int_en[i]);
+ }
+
+ mutex_unlock(&dev->irq_lock);
+}
+
+static void adp5588_irq_mask(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ unsigned gpio = irq - dev->irq_base;
+
+ dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
+}
+
+static void adp5588_irq_unmask(unsigned int irq)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ unsigned gpio = irq - dev->irq_base;
+
+ dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
+}
+
+static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct adp5588_gpio *dev = get_irq_chip_data(irq);
+ uint16_t gpio = irq - dev->irq_base;
+ unsigned bank, bit;
+
+ if ((type & IRQ_TYPE_EDGE_BOTH)) {
+ dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
+ irq, type);
+ return -EINVAL;
+ }
+
+ bank = ADP5588_BANK(gpio);
+ bit = ADP5588_BIT(gpio);
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ dev->int_lvl[bank] |= bit;
+ else if (type & IRQ_TYPE_LEVEL_LOW)
+ dev->int_lvl[bank] &= ~bit;
+ else
+ return -EINVAL;
+
+ adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
+ adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
+ dev->int_lvl[bank]);
+
+ return 0;
+}
+
+static struct irq_chip adp5588_irq_chip = {
+ .name = "adp5588",
+ .mask = adp5588_irq_mask,
+ .unmask = adp5588_irq_unmask,
+ .bus_lock = adp5588_irq_bus_lock,
+ .bus_sync_unlock = adp5588_irq_bus_sync_unlock,
+ .set_type = adp5588_irq_set_type,
+};
+
+static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
+{
+ int ret = i2c_smbus_read_i2c_block_data(client, GPIO_INT_STAT1, 3, buf);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Read INT_STAT Error\n");
+
+ return ret;
+}
+
+static irqreturn_t adp5588_irq_handler(int irq, void *devid)
+{
+ struct adp5588_gpio *dev = devid;
+ unsigned status, bank, bit, pending;
+ int ret;
+ status = adp5588_gpio_read(dev->client, INT_STAT);
+
+ if (status & ADP5588_GPI_INT) {
+ ret = adp5588_gpio_read_intstat(dev->client, dev->irq_stat);
+ if (ret < 0)
+ memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
+
+ for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
+ bank++, bit = 0) {
+ pending = dev->irq_stat[bank] & dev->irq_mask[bank];
+
+ while (pending) {
+ if (pending & (1 << bit)) {
+ handle_nested_irq(dev->irq_base +
+ (bank << 3) + bit);
+ pending &= ~(1 << bit);
+
+ }
+ bit++;
+ }
+ }
+ }
+
+ adp5588_gpio_write(dev->client, INT_STAT, status); /* Status is W1C */
+
+ return IRQ_HANDLED;
+}
+
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+ struct i2c_client *client = dev->client;
+ struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+ unsigned gpio;
+ int ret;
+
+ adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC);
+ adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */
+ adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */
+
+ dev->irq_base = pdata->irq_base;
+ mutex_init(&dev->irq_lock);
+
+ for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) {
+ int irq = gpio + dev->irq_base;
+ set_irq_chip_data(irq, dev);
+ set_irq_chip_and_handler(irq, &adp5588_irq_chip,
+ handle_level_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ /*
+ * ARM needs us to explicitly flag the IRQ as VALID,
+ * once we do so, it will also set the noprobe.
+ */
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ adp5588_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ client->irq);
+ goto out;
+ }
+
+ dev->gpio_chip.to_irq = adp5588_gpio_to_irq;
+ adp5588_gpio_write(client, CFG,
+ ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT);
+
+ return 0;
+
+out:
+ dev->irq_base = 0;
+ return ret;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+ if (dev->irq_base)
+ free_irq(dev->client->irq, dev);
+}
+
+#else
+static int adp5588_irq_setup(struct adp5588_gpio *dev)
+{
+ struct i2c_client *client = dev->client;
+ dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+ return 0;
+}
+
+static void adp5588_irq_teardown(struct adp5588_gpio *dev)
+{
+}
+#endif /* CONFIG_GPIO_ADP5588_IRQ */
+
static int __devinit adp5588_gpio_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -160,37 +380,46 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
gc->can_sleep = 1;
gc->base = pdata->gpio_start;
- gc->ngpio = MAXGPIO;
+ gc->ngpio = ADP5588_MAXGPIO;
gc->label = client->name;
gc->owner = THIS_MODULE;
mutex_init(&dev->lock);
-
ret = adp5588_gpio_read(dev->client, DEV_ID);
if (ret < 0)
goto err;
revid = ret & ADP5588_DEVICE_ID_MASK;
- for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) {
+ for (i = 0, ret = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
(pdata->pullup_dis_mask >> (8 * i)) & 0xFF);
-
+ ret |= adp5588_gpio_write(client, GPIO_INT_EN1 + i, 0);
if (ret)
goto err;
}
+ if (pdata->irq_base) {
+ if (WA_DELAYED_READOUT_REVID(revid)) {
+ dev_warn(&client->dev, "GPIO int not supported\n");
+ } else {
+ ret = adp5588_irq_setup(dev);
+ if (ret)
+ goto err;
+ }
+ }
+
ret = gpiochip_add(&dev->gpio_chip);
if (ret)
- goto err;
+ goto err_irq;
- dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n",
+ dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
gc->base, gc->base + gc->ngpio - 1,
- client->name, revid);
+ pdata->irq_base, client->name, revid);
if (pdata->setup) {
ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
@@ -199,8 +428,11 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, dev);
+
return 0;
+err_irq:
+ adp5588_irq_teardown(dev);
err:
kfree(dev);
return ret;
@@ -222,6 +454,9 @@ static int __devexit adp5588_gpio_remove(struct i2c_client *client)
}
}
+ if (dev->irq_base)
+ free_irq(dev->client->irq, dev);
+
ret = gpiochip_remove(&dev->gpio_chip);
if (ret) {
dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/basic_mmio_gpio.c
new file mode 100644
index 000000000000..3addea65894e
--- /dev/null
+++ b/drivers/gpio/basic_mmio_gpio.c
@@ -0,0 +1,297 @@
+/*
+ * Driver for basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`.......
+ * ...`` ```````..
+ * ..The simplest form of a GPIO controller that the driver supports is``
+ * `.just a single "data" register, where GPIO state can be read and/or `
+ * `,..written. ,,..``~~~~ .....``.`.`.~~.```.`.........``````.```````
+ * `````````
+ ___
+_/~~|___/~| . ```~~~~~~ ___/___\___ ,~.`.`.`.`````.~~...,,,,...
+__________|~$@~~~ %~ /o*o*o*o*o*o\ .. Implementing such a GPIO .
+o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
+ `....trivial..'~`.```.```
+ * ```````
+ * .```````~~~~`..`.``.``.
+ * . The driver supports `... ,..```.`~~~```````````````....````.``,,
+ * . big-endian notation, just`. .. A bit more sophisticated controllers ,
+ * . register the device with -be`. .with a pair of set/clear-bit registers ,
+ * `.. suffix. ```~~`````....`.` . affecting the data register and the .`
+ * ``.`.``...``` ```.. output pins are also supported.`
+ * ^^ `````.`````````.,``~``~``~~``````
+ * . ^^
+ * ,..`.`.`...````````````......`.`.`.`.`.`..`.`.`..
+ * .. The expectation is that in at least some cases . ,-~~~-,
+ * .this will be used with roll-your-own ASIC/FPGA .` \ /
+ * .logic in Verilog or VHDL. ~~~`````````..`````~~` \ /
+ * ..````````......``````````` \o_
+ * |
+ * ^^ / \
+ *
+ * ...`````~~`.....``.`..........``````.`.``.```........``.
+ * ` 8, 16, 32 and 64 bits registers are supported, and``.
+ * . the number of GPIOs is determined by the width of ~
+ * .. the registers. ,............```.`.`..`.`.~~~.`.`.`~
+ * `.......````.```
+ */
+
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/basic_mmio_gpio.h>
+
+struct bgpio_chip {
+ struct gpio_chip gc;
+ void __iomem *reg_dat;
+ void __iomem *reg_set;
+ void __iomem *reg_clr;
+
+ /* Number of bits (GPIOs): <register width> * 8. */
+ int bits;
+
+ /*
+ * Some GPIO controllers work with the big-endian bits notation,
+ * e.g. in a 8-bits register, GPIO7 is the least significant bit.
+ */
+ int big_endian_bits;
+
+ /*
+ * Used to lock bgpio_chip->data. Also, this is needed to keep
+ * shadowed and real data registers writes together.
+ */
+ spinlock_t lock;
+
+ /* Shadowed data register to clear/set bits safely. */
+ unsigned long data;
+};
+
+static struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct bgpio_chip, gc);
+}
+
+static unsigned long bgpio_in(struct bgpio_chip *bgc)
+{
+ switch (bgc->bits) {
+ case 8:
+ return __raw_readb(bgc->reg_dat);
+ case 16:
+ return __raw_readw(bgc->reg_dat);
+ case 32:
+ return __raw_readl(bgc->reg_dat);
+#if BITS_PER_LONG >= 64
+ case 64:
+ return __raw_readq(bgc->reg_dat);
+#endif
+ }
+ return -EINVAL;
+}
+
+static void bgpio_out(struct bgpio_chip *bgc, void __iomem *reg,
+ unsigned long data)
+{
+ switch (bgc->bits) {
+ case 8:
+ __raw_writeb(data, reg);
+ return;
+ case 16:
+ __raw_writew(data, reg);
+ return;
+ case 32:
+ __raw_writel(data, reg);
+ return;
+#if BITS_PER_LONG >= 64
+ case 64:
+ __raw_writeq(data, reg);
+ return;
+#endif
+ }
+}
+
+static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin)
+{
+ if (bgc->big_endian_bits)
+ return 1 << (bgc->bits - 1 - pin);
+ else
+ return 1 << pin;
+}
+
+static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return bgpio_in(bgc) & bgpio_pin2mask(bgc, gpio);
+}
+
+static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ unsigned long mask = bgpio_pin2mask(bgc, gpio);
+ unsigned long flags;
+
+ if (bgc->reg_set) {
+ if (val)
+ bgpio_out(bgc, bgc->reg_set, mask);
+ else
+ bgpio_out(bgc, bgc->reg_clr, mask);
+ return;
+ }
+
+ spin_lock_irqsave(&bgc->lock, flags);
+
+ if (val)
+ bgc->data |= mask;
+ else
+ bgc->data &= ~mask;
+
+ bgpio_out(bgc, bgc->reg_dat, bgc->data);
+
+ spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ return 0;
+}
+
+static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ bgpio_set(gc, gpio, val);
+ return 0;
+}
+
+static int __devinit bgpio_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct device *dev = &pdev->dev;
+ struct bgpio_pdata *pdata = dev_get_platdata(dev);
+ struct bgpio_chip *bgc;
+ struct resource *res_dat;
+ struct resource *res_set;
+ struct resource *res_clr;
+ resource_size_t dat_sz;
+ int bits;
+ int ret;
+
+ res_dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+ if (!res_dat)
+ return -EINVAL;
+
+ dat_sz = resource_size(res_dat);
+ if (!is_power_of_2(dat_sz))
+ return -EINVAL;
+
+ bits = dat_sz * 8;
+ if (bits > BITS_PER_LONG)
+ return -EINVAL;
+
+ bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL);
+ if (!bgc)
+ return -ENOMEM;
+
+ bgc->reg_dat = devm_ioremap(dev, res_dat->start, dat_sz);
+ if (!bgc->reg_dat)
+ return -ENOMEM;
+
+ res_set = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set");
+ res_clr = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr");
+ if (res_set && res_clr) {
+ if (resource_size(res_set) != resource_size(res_clr) ||
+ resource_size(res_set) != dat_sz)
+ return -EINVAL;
+
+ bgc->reg_set = devm_ioremap(dev, res_set->start, dat_sz);
+ bgc->reg_clr = devm_ioremap(dev, res_clr->start, dat_sz);
+ if (!bgc->reg_set || !bgc->reg_clr)
+ return -ENOMEM;
+ } else if (res_set || res_clr) {
+ return -EINVAL;
+ }
+
+ spin_lock_init(&bgc->lock);
+
+ bgc->bits = bits;
+ bgc->big_endian_bits = !strcmp(platid->name, "basic-mmio-gpio-be");
+ bgc->data = bgpio_in(bgc);
+
+ bgc->gc.ngpio = bits;
+ bgc->gc.direction_input = bgpio_dir_in;
+ bgc->gc.direction_output = bgpio_dir_out;
+ bgc->gc.get = bgpio_get;
+ bgc->gc.set = bgpio_set;
+ bgc->gc.dev = dev;
+ bgc->gc.label = dev_name(dev);
+
+ if (pdata)
+ bgc->gc.base = pdata->base;
+ else
+ bgc->gc.base = -1;
+
+ dev_set_drvdata(dev, bgc);
+
+ ret = gpiochip_add(&bgc->gc);
+ if (ret)
+ dev_err(dev, "gpiochip_add() failed: %d\n", ret);
+
+ return ret;
+}
+
+static int __devexit bgpio_remove(struct platform_device *pdev)
+{
+ struct bgpio_chip *bgc = dev_get_drvdata(&pdev->dev);
+
+ return gpiochip_remove(&bgc->gc);
+}
+
+static const struct platform_device_id bgpio_id_table[] = {
+ { "basic-mmio-gpio", },
+ { "basic-mmio-gpio-be", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, bgpio_id_table);
+
+static struct platform_driver bgpio_driver = {
+ .driver = {
+ .name = "basic-mmio-gpio",
+ },
+ .id_table = bgpio_id_table,
+ .probe = bgpio_probe,
+ .remove = __devexit_p(bgpio_remove),
+};
+
+static int __init bgpio_init(void)
+{
+ return platform_driver_register(&bgpio_driver);
+}
+module_init(bgpio_init);
+
+static void __exit bgpio_exit(void)
+{
+ platform_driver_unregister(&bgpio_driver);
+}
+module_exit(bgpio_exit);
+
+MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
+MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 8383a8d7f994..64db9dc3a275 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -18,10 +18,12 @@
/* Supports:
* Moorestown platform Langwell chip.
* Medfield platform Penwell chip.
+ * Whitney point.
*/
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
@@ -158,15 +160,15 @@ static int lnw_irq_type(unsigned irq, unsigned type)
spin_unlock_irqrestore(&lnw->lock, flags);
return 0;
-};
+}
static void lnw_irq_unmask(unsigned irq)
{
-};
+}
static void lnw_irq_mask(unsigned irq)
{
-};
+}
static struct irq_chip lnw_irqchip = {
.name = "LNW-GPIO",
@@ -300,9 +302,88 @@ static struct pci_driver lnw_gpio_driver = {
.probe = lnw_gpio_probe,
};
+
+static int __devinit wp_gpio_probe(struct platform_device *pdev)
+{
+ struct lnw_gpio *lnw;
+ struct gpio_chip *gc;
+ struct resource *rc;
+ int retval = 0;
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rc)
+ return -EINVAL;
+
+ lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
+ if (!lnw) {
+ dev_err(&pdev->dev,
+ "can't allocate whitneypoint_gpio chip data\n");
+ return -ENOMEM;
+ }
+ lnw->reg_base = ioremap_nocache(rc->start, resource_size(rc));
+ if (lnw->reg_base == NULL) {
+ retval = -EINVAL;
+ goto err_kmalloc;
+ }
+ spin_lock_init(&lnw->lock);
+ gc = &lnw->chip;
+ gc->label = dev_name(&pdev->dev);
+ gc->owner = THIS_MODULE;
+ gc->direction_input = lnw_gpio_direction_input;
+ gc->direction_output = lnw_gpio_direction_output;
+ gc->get = lnw_gpio_get;
+ gc->set = lnw_gpio_set;
+ gc->to_irq = NULL;
+ gc->base = 0;
+ gc->ngpio = 64;
+ gc->can_sleep = 0;
+ retval = gpiochip_add(gc);
+ if (retval) {
+ dev_err(&pdev->dev, "whitneypoint gpiochip_add error %d\n",
+ retval);
+ goto err_ioremap;
+ }
+ platform_set_drvdata(pdev, lnw);
+ return 0;
+err_ioremap:
+ iounmap(lnw->reg_base);
+err_kmalloc:
+ kfree(lnw);
+ return retval;
+}
+
+static int __devexit wp_gpio_remove(struct platform_device *pdev)
+{
+ struct lnw_gpio *lnw = platform_get_drvdata(pdev);
+ int err;
+ err = gpiochip_remove(&lnw->chip);
+ if (err)
+ dev_err(&pdev->dev, "failed to remove gpio_chip.\n");
+ iounmap(lnw->reg_base);
+ kfree(lnw);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver wp_gpio_driver = {
+ .probe = wp_gpio_probe,
+ .remove = __devexit_p(wp_gpio_remove),
+ .driver = {
+ .name = "wp_gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init lnw_gpio_init(void)
{
- return pci_register_driver(&lnw_gpio_driver);
+ int ret;
+ ret = pci_register_driver(&lnw_gpio_driver);
+ if (ret < 0)
+ return ret;
+ ret = platform_driver_register(&wp_gpio_driver);
+ if (ret < 0)
+ pci_unregister_driver(&lnw_gpio_driver);
+ return ret;
}
device_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index a2b12aa1f2b9..501866662e05 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -345,7 +345,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- handle_nested_irq(level + chip->irq_base);
+ generic_handle_irq(level + chip->irq_base);
pending &= ~(1 << level);
} while (pending);
@@ -360,7 +360,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct pca953x_platform_data *pdata = client->dev.platform_data;
int ret;
- if (pdata->irq_base && (id->driver_data & PCA953X_INT)) {
+ if (pdata->irq_base != -1
+ && (id->driver_data & PCA953X_INT)) {
int lvl;
ret = pca953x_read_reg(chip, PCA953X_INPUT,
@@ -383,7 +384,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
set_irq_chip_data(irq, chip);
set_irq_chip_and_handler(irq, &pca953x_irq_chip,
handle_edge_irq);
- set_irq_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
@@ -394,6 +394,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
ret = request_threaded_irq(client->irq,
NULL,
pca953x_irq_handler,
+ IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (ret) {
@@ -408,13 +409,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return 0;
out_failed:
- chip->irq_base = 0;
+ chip->irq_base = -1;
return ret;
}
static void pca953x_irq_teardown(struct pca953x_chip *chip)
{
- if (chip->irq_base)
+ if (chip->irq_base != -1)
free_irq(chip->client->irq, chip);
}
#else /* CONFIG_GPIO_PCA953X_IRQ */
@@ -424,7 +425,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base && (id->driver_data & PCA953X_INT))
+ if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
new file mode 100644
index 000000000000..0eba0a75c804
--- /dev/null
+++ b/drivers/gpio/pch_gpio.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCH_GPIO_ALL_PINS 0xfff /* Mask for GPIO pins 0 to 11 */
+#define GPIO_NUM_PINS 12 /* Specifies number of GPIO PINS GPIO0-GPIO11 */
+
+struct pch_regs {
+ u32 ien;
+ u32 istatus;
+ u32 idisp;
+ u32 iclr;
+ u32 imask;
+ u32 imaskclr;
+ u32 po;
+ u32 pi;
+ u32 pm;
+ u32 im0;
+ u32 im1;
+ u32 reserved[4];
+ u32 reset;
+};
+
+/**
+ * struct pch_gpio_reg_data - The register store data.
+ * @po_reg: To store contents of PO register.
+ * @pm_reg: To store contents of PM register.
+ */
+struct pch_gpio_reg_data {
+ u32 po_reg;
+ u32 pm_reg;
+};
+
+/**
+ * struct pch_gpio - GPIO private data structure.
+ * @base: PCI base address of Memory mapped I/O register.
+ * @reg: Memory mapped PCH GPIO register list.
+ * @dev: Pointer to device structure.
+ * @gpio: Data for GPIO infrastructure.
+ * @pch_gpio_reg: Memory mapped Register data is saved here
+ * when suspend.
+ */
+struct pch_gpio {
+ void __iomem *base;
+ struct pch_regs __iomem *reg;
+ struct device *dev;
+ struct gpio_chip gpio;
+ struct pch_gpio_reg_data pch_gpio_reg;
+ struct mutex lock;
+};
+
+static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+ u32 reg_val;
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+ mutex_lock(&chip->lock);
+ reg_val = ioread32(&chip->reg->po);
+ if (val)
+ reg_val |= (1 << nr);
+ else
+ reg_val &= ~(1 << nr);
+
+ iowrite32(reg_val, &chip->reg->po);
+ mutex_unlock(&chip->lock);
+}
+
+static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+
+ return ioread32(&chip->reg->pi) & (1 << nr);
+}
+
+static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+ u32 pm;
+ u32 reg_val;
+
+ mutex_lock(&chip->lock);
+ pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS;
+ pm |= (1 << nr);
+ iowrite32(pm, &chip->reg->pm);
+
+ reg_val = ioread32(&chip->reg->po);
+ if (val)
+ reg_val |= (1 << nr);
+ else
+ reg_val &= ~(1 << nr);
+
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+ u32 pm;
+
+ mutex_lock(&chip->lock);
+ pm = ioread32(&chip->reg->pm) & PCH_GPIO_ALL_PINS; /*bits 0-11*/
+ pm &= ~(1 << nr);
+ iowrite32(pm, &chip->reg->pm);
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
+{
+ chip->pch_gpio_reg.po_reg = ioread32(&chip->reg->po);
+ chip->pch_gpio_reg.pm_reg = ioread32(&chip->reg->pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void pch_gpio_restore_reg_conf(struct pch_gpio *chip)
+{
+ /* to store contents of PO register */
+ iowrite32(chip->pch_gpio_reg.po_reg, &chip->reg->po);
+ /* to store contents of PM register */
+ iowrite32(chip->pch_gpio_reg.pm_reg, &chip->reg->pm);
+}
+
+static void pch_gpio_setup(struct pch_gpio *chip)
+{
+ struct gpio_chip *gpio = &chip->gpio;
+
+ gpio->label = dev_name(chip->dev);
+ gpio->owner = THIS_MODULE;
+ gpio->direction_input = pch_gpio_direction_input;
+ gpio->get = pch_gpio_get;
+ gpio->direction_output = pch_gpio_direction_output;
+ gpio->set = pch_gpio_set;
+ gpio->dbg_show = NULL;
+ gpio->base = -1;
+ gpio->ngpio = GPIO_NUM_PINS;
+ gpio->can_sleep = 0;
+}
+
+static int __devinit pch_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ s32 ret;
+ struct pch_gpio *chip;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s : pci_enable_device FAILED", __func__);
+ goto err_pci_enable;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
+ goto err_request_regions;
+ }
+
+ chip->base = pci_iomap(pdev, 1, 0);
+ if (chip->base == 0) {
+ dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
+ ret = -ENOMEM;
+ goto err_iomap;
+ }
+
+ chip->reg = chip->base;
+ pci_set_drvdata(pdev, chip);
+ mutex_init(&chip->lock);
+ pch_gpio_setup(chip);
+ ret = gpiochip_add(&chip->gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "PCH gpio: Failed to register GPIO\n");
+ goto err_gpiochip_add;
+ }
+
+ return 0;
+
+err_gpiochip_add:
+ pci_iounmap(pdev, chip->base);
+
+err_iomap:
+ pci_release_regions(pdev);
+
+err_request_regions:
+ pci_disable_device(pdev);
+
+err_pci_enable:
+ kfree(chip);
+ dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+ return ret;
+}
+
+static void __devexit pch_gpio_remove(struct pci_dev *pdev)
+{
+ int err;
+ struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+ err = gpiochip_remove(&chip->gpio);
+ if (err)
+ dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+
+ pci_iounmap(pdev, chip->base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int pch_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ s32 ret;
+ struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+ pch_gpio_save_reg_conf(chip);
+ pch_gpio_restore_reg_conf(chip);
+
+ ret = pci_save_state(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+ return ret;
+ }
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_wake(pdev, PCI_D0, 1);
+ if (ret)
+ dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+ return 0;
+}
+
+static int pch_gpio_resume(struct pci_dev *pdev)
+{
+ s32 ret;
+ struct pch_gpio *chip = pci_get_drvdata(pdev);
+
+ ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+ return ret;
+ }
+ pci_restore_state(pdev);
+
+ iowrite32(0x01, &chip->reg->reset);
+ iowrite32(0x00, &chip->reg->reset);
+ pch_gpio_restore_reg_conf(chip);
+
+ return 0;
+}
+#else
+#define pch_gpio_suspend NULL
+#define pch_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+ { 0, }
+};
+
+static struct pci_driver pch_gpio_driver = {
+ .name = "pch_gpio",
+ .id_table = pch_gpio_pcidev_id,
+ .probe = pch_gpio_probe,
+ .remove = __devexit_p(pch_gpio_remove),
+ .suspend = pch_gpio_suspend,
+ .resume = pch_gpio_resume
+};
+
+static int __init pch_gpio_pci_init(void)
+{
+ return pci_register_driver(&pch_gpio_driver);
+}
+module_init(pch_gpio_pci_init);
+
+static void __exit pch_gpio_pci_exit(void)
+{
+ pci_unregister_driver(&pch_gpio_driver);
+}
+module_exit(pch_gpio_pci_exit);
+
+MODULE_DESCRIPTION("PCH GPIO PCI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index ddd053108a13..45293662e950 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -47,6 +47,7 @@ struct timbgpio {
spinlock_t lock; /* mutual exclusion */
struct gpio_chip gpio;
int irq_base;
+ unsigned long last_ier;
};
static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
@@ -112,16 +113,24 @@ static void timbgpio_irq_disable(unsigned irq)
{
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
+ unsigned long flags;
- timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
+ spin_lock_irqsave(&tgpio->lock, flags);
+ tgpio->last_ier &= ~(1 << offset);
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
}
static void timbgpio_irq_enable(unsigned irq)
{
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
+ unsigned long flags;
- timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
+ spin_lock_irqsave(&tgpio->lock, flags);
+ tgpio->last_ier |= 1 << offset;
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
}
static int timbgpio_irq_type(unsigned irq, unsigned trigger)
@@ -194,8 +203,16 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
+ /*
+ * Some versions of the hardware trash the IER register if more than
+ * one interrupt is received simultaneously.
+ */
+ iowrite32(0, tgpio->membase + TGPIO_IER);
+
for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
+
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
}
static struct irq_chip timbgpio_irqchip = {
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 30879df3daea..cc9277885dd0 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/ vga/
+obj-y += drm/ vga/ stub/
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f3a23a329f4e..997c43d04909 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
- drm_context.o drm_dma.o drm_drawable.o \
+ drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index ba38e0147220..252fdb98b73a 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -193,7 +193,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
@@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
- if (!(memory = drm_alloc_agp(dev, pages, type))) {
+ if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
kfree(entry);
return -ENOMEM;
}
@@ -423,38 +423,6 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return head;
}
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
- size_t pages, u32 type)
-{
- return agp_allocate_memory(bridge, pages, type);
-}
-
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return 0;
- agp_free_memory(handle);
- return 1;
-}
-
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
- if (!handle)
- return -EINVAL;
- return agp_bind_memory(handle, start);
-}
-
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
- if (!handle)
- return -EINVAL;
- return agp_unbind_memory(handle);
-}
-
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
@@ -474,7 +442,7 @@ drm_agp_bind_pages(struct drm_device *dev,
DRM_DEBUG("\n");
- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+ mem = agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
@@ -487,7 +455,7 @@ drm_agp_bind_pages(struct drm_device *dev,
mem->page_count = num_pages;
mem->is_flushed = true;
- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+ ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 2607753a320b..6d440fb894cf 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -333,14 +333,6 @@ int drm_addctx(struct drm_device *dev, void *data,
return -ENOMEM;
}
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_ctor)
- if (!dev->driver->context_ctor(dev, ctx->handle)) {
- DRM_DEBUG("Running out of ctxs or memory.\n");
- return -ENOMEM;
- }
- }
-
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 37e0b4fa482a..6985cb1da72c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (fb->funcs->dirty) {
- ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+ ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+ clips, num_clips);
} else {
ret = -ENOSYS;
goto out_err2;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 677b275fa721..9d8c892d07c9 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -48,7 +48,6 @@ static struct drm_info_list drm_debugfs_list[] = {
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
deleted file mode 100644
index c53c9768cc11..000000000000
--- a/drivers/gpu/drm/drm_drawable.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- unsigned long irqflags;
- struct drm_draw *draw = data;
- int new_id = 0;
- int ret;
-
-again:
- if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
- DRM_ERROR("Out of memory expanding drawable idr\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
- if (ret == -EAGAIN) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- goto again;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- draw->handle = new_id;
-
- DRM_DEBUG("%d\n", draw->handle);
-
- return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_draw *draw = data;
- unsigned long irqflags;
- struct drm_drawable_info *info;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- info = drm_get_drawable_info(dev, draw->handle);
- if (info == NULL) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- return -EINVAL;
- }
- kfree(info->rects);
- kfree(info);
-
- idr_remove(&dev->drw_idr, draw->handle);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw->handle);
- return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_update_draw *update = data;
- unsigned long irqflags;
- struct drm_clip_rect *rects;
- struct drm_drawable_info *info;
- int err;
-
- info = idr_find(&dev->drw_idr, update->handle);
- if (!info) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
- DRM_ERROR("No such drawable %d\n", update->handle);
- kfree(info);
- return -EINVAL;
- }
- }
-
- switch (update->type) {
- case DRM_DRAWABLE_CLIPRECTS:
- if (update->num == 0)
- rects = NULL;
- else if (update->num != info->num_rects) {
- rects = kmalloc(update->num *
- sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- } else
- rects = info->rects;
-
- if (update->num && !rects) {
- DRM_ERROR("Failed to allocate cliprect memory\n");
- err = -ENOMEM;
- goto error;
- }
-
- if (update->num && DRM_COPY_FROM_USER(rects,
- (struct drm_clip_rect __user *)
- (unsigned long)update->data,
- update->num *
- sizeof(*rects))) {
- DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = -EFAULT;
- goto error;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (rects != info->rects) {
- kfree(info->rects);
- }
-
- info->rects = rects;
- info->num_rects = update->num;
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update->handle);
- break;
- default:
- DRM_ERROR("Invalid update type %d\n", update->type);
- return -EINVAL;
- }
-
- return 0;
-
-error:
- if (rects != info->rects)
- kfree(rects);
-
- return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
- return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
- struct drm_drawable_info *info = p;
-
- if (info) {
- kfree(info->rects);
- kfree(info);
- }
-
- return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
- idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
- idr_remove_all(&dev->drw_idr);
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ff6690f4fc87..271835a71570 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -91,8 +91,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
@@ -127,7 +127,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
@@ -180,10 +180,6 @@ int drm_lastclose(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
- /* Free drawable information memory */
- drm_drawable_free_all(dev);
- del_timer(&dev->timer);
-
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 96e963108225..c1a26217a530 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
@@ -1268,34 +1267,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
}
#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK 0x01
#define VENDOR_BLOCK 0x03
+#define EDID_BASIC_AUDIO (1 << 6)
+
/**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
- * @edid: monitor EDID information
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
+ * Search EDID for CEA extension block.
*/
-bool drm_detect_hdmi_monitor(struct edid *edid)
+static u8 *drm_find_cea_extension(struct edid *edid)
{
- char *edid_ext = NULL;
- int i, hdmi_id;
- int start_offset, end_offset;
- bool is_hdmi = false;
+ u8 *edid_ext = NULL;
+ int i;
/* No EDID or EDID extensions */
if (edid == NULL || edid->extensions == 0)
- goto end;
+ return NULL;
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
- if (edid_ext[0] == 0x02)
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
break;
}
if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, hdmi_id;
+ int start_offset, end_offset;
+ bool is_hdmi = false;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
goto end;
/* Data block offset in CEA extension block */
@@ -1326,6 +1342,53 @@ end:
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, j;
+ bool has_audio = false;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ goto end;
+
+ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
+ }
+
+ /* Data block offset in CEA extension block */
+ start_offset = 4;
+ end_offset = edid_ext[2];
+
+ for (i = start_offset; i < end_offset;
+ i += ((edid_ext[i] & 0x1f) + 1)) {
+ if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+ has_audio = true;
+ for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (edid_ext[i + j] >> 3) & 0xf);
+ goto end;
+ }
+ }
+end:
+ return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
+/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6a5e403f9aa1..d2849e4ea4d0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -242,6 +242,30 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
return 0;
}
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+ uint16_t *r_base, *g_base, *b_base;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -260,11 +284,12 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
continue;
funcs = mode_set->crtc->helper_private;
+ drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
mode_set->x,
- mode_set->y);
-
+ mode_set->y,
+ ENTER_ATOMIC_MODE_SET);
}
}
@@ -308,8 +333,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
continue;
}
+ drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
- crtc->y);
+ crtc->y, LEAVE_ATOMIC_MODE_SET);
}
return 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 5663d2719063..ea1c4b019ebf 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -92,12 +92,6 @@ drm_gem_init(struct drm_device *dev)
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
if (!mm) {
@@ -151,9 +145,6 @@ int drm_gem_object_init(struct drm_device *dev,
atomic_set(&obj->handle_count, 0);
obj->size = size;
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
-
return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);
@@ -180,8 +171,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
return obj;
fput:
/* Object_init mangles the global counters - readjust them. */
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -436,10 +425,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
}
EXPORT_SYMBOL(drm_gem_object_release);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 974e970ce3f8..3cdbaf379bb5 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -270,20 +270,6 @@ int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
- seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
- seq_printf(m, "%d gtt total\n", dev->gtt_total);
- return 0;
-}
-
#if DRM_DEBUG_CODE
int drm_vma_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 9bf93bc9a32c..632ae243ede0 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -37,6 +37,8 @@
static int drm_notifier(void *priv);
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
/**
* Lock ioctl.
*
@@ -124,9 +126,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
}
- if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
- dev->driver->dma_ready(dev);
-
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
{
if (dev->driver->dma_quiescent(dev)) {
@@ -136,12 +135,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
}
- if (dev->driver->kernel_context_switch &&
- dev->last_context != lock->context) {
- dev->driver->kernel_context_switch(dev, dev->last_context,
- lock->context);
- }
-
return 0;
}
@@ -169,15 +162,8 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
- /* kernel_context_switch isn't used by any of the x86 drm
- * modules but is required by the Sparc driver.
- */
- if (dev->driver->kernel_context_switch_unlock)
- dev->driver->kernel_context_switch_unlock(dev);
- else {
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
}
unblock_all_signals();
@@ -193,6 +179,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
+static
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
@@ -229,7 +216,6 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
return 0;
}
-EXPORT_SYMBOL(drm_lock_take);
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -297,7 +283,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
-EXPORT_SYMBOL(drm_lock_free);
/**
* If we get here, it means that the process has called DRM_IOCTL_LOCK
@@ -360,7 +345,6 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
{
@@ -380,8 +364,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
-EXPORT_SYMBOL(drm_idlelock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
@@ -390,5 +372,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 7732268eced2..c9b805000a11 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -99,29 +99,23 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
- return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
/** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
- return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+ agp_free_memory(handle);
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
- return drm_agp_bind_memory(handle, start);
+ return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
- return drm_agp_unbind_memory(handle);
+ return agp_unbind_memory(handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index a9ba6b69ad35..9e5b07efebb7 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -55,7 +55,6 @@ static struct drm_info_list drm_proc_list[] = {
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
@@ -151,7 +150,6 @@ fail:
int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root)
{
- struct drm_device *dev = minor->dev;
char name[64];
int ret;
@@ -172,14 +170,6 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
return ret;
}
- if (dev->driver->proc_init) {
- ret = dev->driver->proc_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/proc/dri.\n");
- return ret;
- }
- }
return 0;
}
@@ -216,15 +206,11 @@ int drm_proc_remove_files(struct drm_info_list *files, int count,
*/
int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
- struct drm_device *dev = minor->dev;
char name[64];
if (!root || !minor->proc_root)
return 0;
- if (dev->driver->proc_cleanup)
- dev->driver->proc_cleanup(minor);
-
drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
sprintf(name, "%d", minor->index);
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 9034c4c6100d..d15e09b0ae0b 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -184,8 +184,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
drm_sg_cleanup(entry);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_sg_alloc);
-
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d1ad57450df1..cdc89ee042cc 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -240,14 +240,10 @@ int drm_fill_in_dev(struct drm_device *dev,
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
- spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
- idr_init(&dev->drw_idr);
-
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 5df450683aab..2c3fcbdfd8ff 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -523,14 +523,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
- return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
{
#ifdef __alpha__
return dev->hose->dense_mem_base - dev->hose->mem_space->start;
@@ -539,8 +532,6 @@ resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
#endif
}
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
/**
* mmap DMA memory.
*
@@ -627,7 +618,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
- offset = dev->driver->get_reg_ofs(dev);
+ offset = drm_core_get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#if !defined(__arm__)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fe69914ce507..88bcd331e7c5 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -52,8 +52,6 @@ static struct drm_driver driver = {
.device_is_agp = i810_driver_device_is_agp,
.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = i810_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 5b6298b24e24..f655ab7977da 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -57,8 +57,6 @@ static struct drm_driver driver = {
.device_is_agp = i830_driver_device_is_agp,
.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
.dma_quiescent = i830_driver_dma_quiescent,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
#if USE_IRQS
.irq_preinstall = i830_driver_irq_preinstall,
.irq_postinstall = i830_driver_irq_postinstall,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5c8e53458edb..fdc833d5cc7b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
+ intel_opregion.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o
-i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_ACPI) += intel_acpi.o
+
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 14d59804acd7..af70337567ce 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -165,67 +165,44 @@ struct ch7017_priv {
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- u8 out_buf[2];
- u8 in_buf[2];
-
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = in_buf,
+ .buf = val,
}
};
-
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
- *val= in_buf[0];
- return true;
- };
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
}
-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
- struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
- uint8_t out_buf[2];
+ uint8_t buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
- .buf = out_buf,
+ .buf = buf,
};
-
- out_buf[0] = addr;
- out_buf[1] = val;
-
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
- return true;
-
- return false;
+ return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
}
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
- uint8_t val;
+ const char *str;
+ u8 val;
priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
if (priv == NULL)
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
goto fail;
- if (val != CH7017_DEVICE_ID_VALUE &&
- val != CH7018_DEVICE_ID_VALUE &&
- val != CH7019_DEVICE_ID_VALUE) {
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
- "Slave %d.\n",
- val, i2cbus->adapter.name,dvo->slave_addr);
+ "slave %d.\n",
+ val, adapter->name,dvo->slave_addr);
goto fail;
}
+ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+ str, adapter->name, dvo->slave_addr);
return true;
+
fail:
kfree(priv);
return false;
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
}
/* XXX: Should actually wait for update power status somehow */
- udelay(20000);
+ msleep(20);
}
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 6f1944b24441..7eaa94e4ff06 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index a2ec3f487202..a12ed9414cc7 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
out_buf[0] = addr;
- if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+ if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
};
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
"%s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
out_buf[1] = data & 0xff;
out_buf[2] = data >> 8;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 9b8e6765cf26..e4b4091df942 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 56f66426207f..8ab2855bb544 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
}
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, dvo->slave_addr);
+ addr, adapter->name, dvo->slave_addr);
}
return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 048149748fdc..1f4f3ceb63c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,9 +40,51 @@
#if defined(CONFIG_DEBUG_FS)
-#define ACTIVE_LIST 1
-#define FLUSHING_LIST 2
-#define INACTIVE_LIST 3
+enum {
+ ACTIVE_LIST,
+ FLUSHING_LIST,
+ INACTIVE_LIST,
+ PINNED_LIST,
+ DEFERRED_FREE_LIST,
+};
+
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ const struct intel_device_info *info = INTEL_INFO(dev);
+
+ seq_printf(m, "gen: %d\n", info->gen);
+#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+ B(is_mobile);
+ B(is_i85x);
+ B(is_i915g);
+ B(is_i945gm);
+ B(is_g33);
+ B(need_gfx_hws);
+ B(is_g4x);
+ B(is_pineview);
+ B(is_broadwater);
+ B(is_crestline);
+ B(has_fbc);
+ B(has_rc6);
+ B(has_pipe_cxsr);
+ B(has_hotplug);
+ B(cursor_needs_physical);
+ B(has_overlay);
+ B(overlay_needs_physical);
+ B(supports_tv);
+ B(has_bsd_ring);
+ B(has_blt_ring);
+#undef B
+
+ return 0;
+}
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
{
@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
}
}
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+ seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ obj->base.size,
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_rendering_seqno,
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", obj->fence_reg);
+ if (obj->gtt_space != NULL)
+ seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+ if (obj->ring != NULL)
+ seq_printf(m, " (%s)", obj->ring->name);
+}
+
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
- spinlock_t *lock = NULL;
+ size_t total_obj_size, total_gtt_size;
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
switch (list) {
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
- lock = &dev_priv->mm.active_list_lock;
- head = &dev_priv->render_ring.active_list;
+ head = &dev_priv->mm.active_list;
break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
+ case PINNED_LIST:
+ seq_printf(m, "Pinned:\n");
+ head = &dev_priv->mm.pinned_list;
+ break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
+ case DEFERRED_FREE_LIST:
+ seq_printf(m, "Deferred free:\n");
+ head = &dev_priv->mm.deferred_free_list;
+ break;
default:
- DRM_INFO("Ooops, unexpected list\n");
- return 0;
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- if (lock)
- spin_lock(lock);
- list_for_each_entry(obj_priv, head, list)
- {
- seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- &obj_priv->base,
- get_pin_flag(obj_priv),
- obj_priv->base.size,
- obj_priv->base.read_domains,
- obj_priv->base.write_domain,
- obj_priv->last_rendering_seqno,
- obj_priv->dirty ? " dirty" : "",
- obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-
- if (obj_priv->base.name)
- seq_printf(m, " (name: %d)", obj_priv->base.name);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
- if (obj_priv->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
-
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj_priv, head, mm_list) {
+ seq_printf(m, " ");
+ describe_obj(m, obj_priv);
seq_printf(m, "\n");
+ total_obj_size += obj_priv->base.size;
+ total_gtt_size += obj_priv->gtt_space->size;
+ count++;
}
+ mutex_unlock(&dev->struct_mutex);
- if (lock)
- spin_unlock(lock);
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
return 0;
}
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
+ seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
+ seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
+ seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
+ seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
+ seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
+ seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "Request:\n");
list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev, &dev_priv->render_ring));
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
seq_printf(m, "Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
atomic_read(&dev_priv->irq_received));
if (dev_priv->render_ring.status_page.page_addr != NULL) {
seq_printf(m, "Current sequence: %d\n",
- i915_get_gem_seqno(dev, &dev_priv->render_ring));
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "\n");
}
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+static void i915_dump_object(struct seq_file *m,
+ struct io_mapping *mapping,
+ struct drm_i915_gem_object *obj_priv)
{
- int page, i;
- uint32_t *mem;
+ int page, page_count, i;
+ page_count = obj_priv->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
- mem = kmap_atomic(pages[page], KM_USER0);
+ u32 *mem = io_mapping_map_wc(mapping,
+ obj_priv->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(mem, KM_USER0);
+ io_mapping_unmap(mem);
}
}
@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_i915_gem_object *obj_priv;
int ret;
- spin_lock(&dev_priv->mm.active_list_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
- list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret) {
- DRM_ERROR("Failed to get pages: %d\n", ret);
- spin_unlock(&dev_priv->mm.active_list_lock);
- return ret;
- }
-
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
- i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
-
- i915_gem_object_put_pages(obj);
+ seq_printf(m, "--- gtt_offset = 0x%08x\n",
+ obj_priv->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
}
}
- spin_unlock(&dev_priv->mm.active_list_lock);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u8 *virt;
- uint32_t *ptr, off;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
if (!dev_priv->render_ring.gem_object) {
seq_printf(m, "No ringbuffer setup\n");
- return 0;
- }
-
- virt = dev_priv->render_ring.virtual_start;
+ } else {
+ u8 *virt = dev_priv->render_ring.virtual_start;
+ uint32_t off;
- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
- ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
+ for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+ uint32_t *ptr = (uint32_t *)(virt + off);
+ seq_printf(m, "%08x : %08x\n", off, *ptr);
+ }
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail);
seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+ seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
return 0;
}
@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
}
@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
} else {
seq_printf(m, "FBC disabled: ");
switch (dev_priv->no_fbc_reason) {
+ case FBC_NO_OUTPUT:
+ seq_printf(m, "no outputs");
+ break;
case FBC_STOLEN_TOO_SMALL:
seq_printf(m, "not enough stolen memory");
break;
@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
- if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_GEN5(dev))
+ sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
- "disabled");
+ seq_printf(m, "self-refresh: %s\n",
+ sr_enabled ? "enabled" : "disabled");
return 0;
}
@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
temp = i915_mch_val(dev_priv);
chipset = i915_chipset_val(dev_priv);
gfx = i915_gfx_val(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "GMCH temp: %ld\n", temp);
seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
return 0;
}
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_fbdev *ifbdev;
+ struct intel_framebuffer *fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ ifbdev = dev_priv->fbdev;
+ fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, to_intel_bo(fb->obj));
+ seq_printf(m, "\n");
+
+ list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ if (&fb->base == ifbdev->helper.fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.depth,
+ fb->base.bits_per_pixel);
+ describe_obj(m, to_intel_bo(fb->obj));
+ seq_printf(m, "\n");
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
"wedged : %d\n",
atomic_read(&dev_priv->mm.wedged));
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
atomic_set(&dev_priv->mm.wedged, val);
if (val) {
- DRM_WAKEUP(&dev_priv->irq_queue);
+ wake_up_all(&dev_priv->irq_queue);
queue_work(dev_priv->wq, &dev_priv->error_work);
}
@@ -824,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
+ {"i915_capabilities", i915_capabilities, 0, 0},
+ {"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
+ {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -846,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dd2c93ebfa3..7a26f4dd21ae 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,8 +40,7 @@
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-
-extern int intel_max_stolen; /* from AGP driver */
+#include <acpi/video.h>
/**
* Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
0xf0;
@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
- ring->setup_status_page(dev, ring);
+ intel_ring_setup_status_page(dev, ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
return -EINVAL;
}
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
BEGIN_LP_RING(4);
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
if (!IS_I830(dev) && !IS_845G(dev)) {
BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_GEN5(dev)) {
BEGIN_LP_RING(2);
OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
OUT_RING(MI_NOOP);
@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BSD:
value = HAS_BSD(dev);
break;
+ case I915_PARAM_HAS_BLT:
+ value = HAS_BLT(dev);
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -888,12 +890,12 @@ static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
return ret;
}
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
@@ -934,7 +936,7 @@ static void
intel_setup_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -971,7 +973,7 @@ static void
intel_teardown_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
if (dev_priv->mchbar_need_disable) {
@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
release_resource(&dev_priv->mch_res);
}
-/**
- * i915_probe_agp - get AGP bootup configuration
- * @pdev: PCI device
- * @aperture_size: returns AGP aperture configured size
- * @preallocated_size: returns size of BIOS preallocated AGP space
- *
- * Since Intel integrated graphics are UMA, the BIOS has to set aside
- * some RAM for the framebuffer at early boot. This code figures out
- * how much was set aside so we can use it for our own purposes.
- */
-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size,
- uint32_t *start)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 tmp = 0;
- unsigned long overhead;
- unsigned long stolen;
-
- /* Get the fb aperture size and "stolen" memory amount. */
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
-
- *aperture_size = 1024 * 1024;
- *preallocated_size = 1024 * 1024;
-
- switch (dev->pdev->device) {
- case PCI_DEVICE_ID_INTEL_82830_CGC:
- case PCI_DEVICE_ID_INTEL_82845G_IG:
- case PCI_DEVICE_ID_INTEL_82855GM_IG:
- case PCI_DEVICE_ID_INTEL_82865_IG:
- if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
- *aperture_size *= 64;
- else
- *aperture_size *= 128;
- break;
- default:
- /* 9xx supports large sizes, just look at the length */
- *aperture_size = pci_resource_len(dev->pdev, 2);
- break;
- }
-
- /*
- * Some of the preallocated space is taken by the GTT
- * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
- */
- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
- overhead = 4096;
- else
- overhead = (*aperture_size / 1024) + 4096;
-
- if (IS_GEN6(dev)) {
- /* SNB has memory control reg at 0x50.w */
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
-
- switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen = 192 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen = 288 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen = 320 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen = 384 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen = 416 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen = 448 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen = 480 * 1024 * 1024;
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen = 512 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & SNB_GMCH_GMS_STOLEN_MASK);
- return -1;
- }
- } else {
- switch (tmp & INTEL_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- stolen = 1 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- stolen = 4 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- stolen = 8 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- stolen = 16 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- stolen = 48 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_GMCH_GMS_MASK);
- return -1;
- }
- }
-
- *preallocated_size = stolen - overhead;
- *start = overhead;
-
- return 0;
-}
-
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
{
unsigned long *gtt;
unsigned long entry, phys;
- int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+ int gtt_bar = IS_GEN2(dev) ? 1 : 0;
int gtt_offset, gtt_size;
- if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
/* Mask out these reserved bits on this hardware. */
- if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
- IS_I945G(dev) || IS_I945GM(dev)) {
+ if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
entry &= ~PTE_ADDRESS_MASK_HIGH;
- }
/* If it's not a mapping type we know, then bail. */
if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
if (!compressed_fb) {
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
}
if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
4096, 0);
if (!compressed_llb) {
i915_warn_stolen(dev);
@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
- drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "i915: switched off\n");
- drm_kms_helper_poll_disable(dev);
i915_suspend(dev, pmm);
}
}
@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
}
static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_start,
unsigned long prealloc_size,
unsigned long agp_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
- 0xff000000;
-
- /* Basic memrange allocator for stolen space (aka vram) */
- drm_mm_init(&dev_priv->vram, 0, prealloc_size);
- DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
-
- /* We're off and running w/KMS */
- dev_priv->mm.suspended = 0;
+ /* Basic memrange allocator for stolen space (aka mm.vram) */
+ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
/* Let GEM Manage from end of prealloc space to end of aperture.
*
@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
*/
dev_priv->allow_batchbuffer = 1;
- ret = intel_init_bios(dev);
+ ret = intel_parse_bios(dev);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto cleanup_ringbuffer;
+ intel_register_dsm_handler();
+
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
i915_switcheroo_can_switch);
@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
- /*
- * Initialize the hardware status page IRQ location.
- */
-
- I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
-
ret = intel_fbdev_init(dev);
if (ret)
goto cleanup_irq;
drm_kms_helper_poll_init(dev);
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
+
return 0;
cleanup_irq:
@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
* - dev_priv->fmax
* - dev_priv->gpu_busy
*/
-DEFINE_SPINLOCK(mchdev_lock);
+static DEFINE_SPINLOCK(mchdev_lock);
/**
* i915_read_mch_val - return value for IPS use
@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size, prealloc_start;
+ uint32_t agp_size, prealloc_size;
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->info = (struct intel_device_info *) flags;
/* Add register map (needed for suspend/resume) */
- mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
base = pci_resource_start(dev->pdev, mmio_bar);
size = pci_resource_len(dev->pdev, mmio_bar);
@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
"performance may suffer.\n");
}
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
- if (ret)
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
goto out_iomapfree;
-
- if (prealloc_size > intel_max_stolen) {
- DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
- prealloc_size >> 20, intel_max_stolen >> 20);
- prealloc_size = intel_max_stolen;
}
- dev_priv->wq = create_singlethread_workqueue("i915");
+ prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ /* The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and hangcheck.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time: max_active = 1 and NON_REENTRANT.
+ */
+ dev_priv->wq = alloc_workqueue("i915",
+ WQ_UNBOUND | WQ_NON_REENTRANT,
+ 1);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
+ intel_setup_gmbus(dev);
+ intel_opregion_setup(dev);
+
+ /* Make sure the bios did its job and set up vital registers */
+ intel_setup_bios(dev);
i915_gem_load(dev);
@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_PINEVIEW(dev))
i915_pineview_get_mem_freq(dev);
- else if (IS_IRONLAKE(dev))
+ else if (IS_GEN5(dev))
i915_ironlake_get_mem_freq(dev);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_start,
- prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_workqueue_free;
@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Must be done after probing outputs */
- intel_opregion_init(dev, 0);
+ intel_opregion_init(dev);
+ acpi_video_register();
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock);
- /* XXX Prevent module unload due to memory corruption bugs. */
- __module_get(THIS_MODULE);
-
return 0;
out_workqueue_free:
@@ -2252,15 +2090,20 @@ free_priv:
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
-
- i915_destroy_error_state(dev);
+ int ret;
spin_lock(&mchdev_lock);
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
- destroy_workqueue(dev_priv->wq);
- del_timer_sync(&dev_priv->hangcheck_timer);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Cancel the retire work handler, which should be idle now. */
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->mm.gtt_mtrr = -1;
}
+ acpi_video_unregister();
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
/*
@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
}
- drm_irq_uninstall(dev);
+
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
+ /* Free error state after interrupts are fully disabled. */
+ del_timer_sync(&dev_priv->hangcheck_timer);
+ cancel_work_sync(&dev_priv->error_work);
+ i915_destroy_error_state(dev);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
- if (dev_priv->regs != NULL)
- iounmap(dev_priv->regs);
-
- intel_opregion_free(dev, 0);
+ intel_opregion_fini(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
+
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);
@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->vram);
- i915_gem_lastclose(dev);
+ drm_mm_takedown(&dev_priv->mm.vram);
intel_cleanup_overlay(dev);
+
+ if (!I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
}
+ if (dev_priv->regs != NULL)
+ iounmap(dev_priv->regs);
+
+ intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ destroy_workqueue(dev_priv->wq);
+
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
return 0;
}
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv;
+ struct drm_i915_file_private *file_priv;
DRM_DEBUG_DRIVER("\n");
- i915_file_priv = (struct drm_i915_file_private *)
- kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
-
- if (!i915_file_priv)
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
return -ENOMEM;
- file_priv->driver_priv = i915_file_priv;
+ file->driver_priv = file_priv;
- INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+ spin_lock_init(&file_priv->mm.lock);
+ INIT_LIST_HEAD(&file_priv->mm.request_list);
return 0;
}
@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
- kfree(i915_file_priv);
+ kfree(file_priv);
}
struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 895ab896e336..3467dd420760 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_drv.h"
#include <linux/console.h>
#include "drm_crtc_helper.h"
@@ -61,86 +62,110 @@ extern int intel_agp_enabled;
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .gen = 2, .is_i8xx = 1,
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .gen = 2, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .gen = 2, .is_i8xx = 1,
+ .gen = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_mobile = 1,
.cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 4, .is_broadwater = 1,
.has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .gen = 4, .is_crestline = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
};
static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+ .gen = 3, .is_g33 = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+ .gen = 4, .is_g4x = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 5,
.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 5, .is_mobile = 1,
.need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+ .gen = 6,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+ .gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
- intel_opregion_free(dev, 1);
+ intel_opregion_fini(dev);
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
@@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
+ drm_kms_helper_poll_disable(dev);
+
error = i915_drm_freeze(dev);
if (error)
return error;
@@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev)
int error = 0;
i915_restore_state(dev);
-
- intel_opregion_init(dev, 1);
+ intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev)
drm_helper_resume_force_mode(dev);
}
+ intel_opregion_init(dev);
+
dev_priv->modeset_on_lid = 0;
return error;
@@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev)
int i915_resume(struct drm_device *dev)
{
+ int ret;
+
if (pci_enable_device(dev->pdev))
return -EIO;
pci_set_master(dev->pdev);
- return i915_drm_thaw(dev);
+ ret = i915_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ msleep(1);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ msleep(1);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return gdrst & 0x1;
+}
+
+static int i965_do_reset(struct drm_device *dev, u8 flags)
+{
+ u8 gdrst;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+
+ return wait_for(i965_reset_complete(dev), 500);
+}
+
+static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
/**
@@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i965_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev, u8 flags)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned long timeout;
- u8 gdrst;
/*
* We really should only reset the display subsystem if we actually
* need to
*/
bool need_display = true;
+ int ret;
mutex_lock(&dev->struct_mutex);
- /*
- * Clear request list
- */
- i915_gem_retire_requests(dev);
-
- if (need_display)
- i915_save_display(dev);
-
- if (IS_I965G(dev) || IS_G4X(dev)) {
- /*
- * Set the domains we want to reset, then the reset bit (bit 0).
- * Clear the reset bit after a while and wait for hardware status
- * bit (bit 1) to be set
- */
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
- udelay(50);
- pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
-
- /* ...we don't want to loop forever though, 500ms should be plenty */
- timeout = jiffies + msecs_to_jiffies(500);
- do {
- udelay(100);
- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
- } while ((gdrst & 0x1) && time_after(timeout, jiffies));
-
- if (gdrst & 0x1) {
- WARN(true, "i915: Failed to reset chip\n");
- mutex_unlock(&dev->struct_mutex);
- return -EIO;
- }
- } else {
- DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ i915_gem_reset(dev);
+
+ ret = -ENODEV;
+ if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+ DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+ } else switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ ret = ironlake_do_reset(dev, flags);
+ break;
+ case 4:
+ ret = i965_do_reset(dev, flags);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev, flags);
+ break;
+ }
+ dev_priv->last_gpu_reset = get_seconds();
+ if (ret) {
+ DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
- return -ENODEV;
+ return ret;
}
/* Ok, now get things going again... */
@@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
mutex_lock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
+
/*
- * Display needs restore too...
+ * Perform a full modeset as on later generations, e.g. Ironlake, we may
+ * need to retrain the display link and cannot just restore the register
+ * values.
*/
- if (need_display)
- i915_restore_display(dev);
+ if (need_display) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -524,8 +610,6 @@ static struct drm_driver driver = {
.irq_uninstall = i915_driver_irq_uninstall,
.irq_handler = i915_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index af4a263cf257..2c2c19b6285e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,6 +34,8 @@
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <drm/intel-gtt.h>
/* General customization:
*/
@@ -73,11 +75,9 @@ enum plane {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_BUF 0
#define WATCH_EXEC 0
-#define WATCH_LRU 0
#define WATCH_RELOC 0
-#define WATCH_INACTIVE 0
+#define WATCH_LISTS 0
#define WATCH_PWRITE 0
#define I915_GEM_PHYS_CURSOR_0 1
@@ -110,8 +110,9 @@ struct intel_opregion {
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
- int enabled;
+ void *vbt;
};
+#define OPREGION_SIZE (8*1024)
struct intel_overlay;
struct intel_overlay_error_state;
@@ -125,13 +126,16 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
struct list_head lru_list;
+ bool gpu;
};
struct sdvo_device_mapping {
+ u8 initialized;
u8 dvo_port;
u8 slave_addr;
u8 dvo_wiring;
- u8 initialized;
+ u8 i2c_pin;
+ u8 i2c_speed;
u8 ddc_pin;
};
@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
struct intel_device_info {
u8 gen;
u8 is_mobile : 1;
- u8 is_i8xx : 1;
u8 is_i85x : 1;
u8 is_i915g : 1;
- u8 is_i9xx : 1;
u8 is_i945gm : 1;
- u8 is_i965g : 1;
- u8 is_i965gm : 1;
u8 is_g33 : 1;
u8 need_gfx_hws : 1;
u8 is_g4x : 1;
u8 is_pineview : 1;
u8 is_broadwater : 1;
u8 is_crestline : 1;
- u8 is_ironlake : 1;
u8 has_fbc : 1;
u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
+ u8 has_overlay : 1;
+ u8 overlay_needs_physical : 1;
+ u8 supports_tv : 1;
+ u8 has_bsd_ring : 1;
+ u8 has_blt_ring : 1;
};
enum no_fbc_reason {
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
void __iomem *regs;
+ struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+ } *gmbus;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
+ struct intel_ring_buffer blt_ring;
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
int front_offset;
int current_page;
int page_flipping;
+#define I915_DEBUG_READ (1<<0)
+#define I915_DEBUG_WRITE (1<<1)
+ unsigned long debug_flags;
wait_queue_head_t irq_queue;
atomic_t irq_received;
@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
- u32 flush_rings;
-#define FLUSH_RENDER_RING 0x1
-#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
uint32_t last_instdone;
uint32_t last_instdone1;
- struct drm_mm vram;
-
unsigned long cfb_size;
unsigned long cfb_pitch;
+ unsigned long cfb_offset;
int cfb_fence;
int cfb_plane;
+ int cfb_y;
int irq_enabled;
@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
struct intel_overlay *overlay;
/* LVDS info */
- int backlight_duty_cycle; /* restore backlight to this value */
- bool panel_wants_dither;
+ int backlight_level; /* restore backlight to this value */
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
- unsigned int edp_support:1;
int lvds_ssc_freq;
- int edp_bpp;
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
struct notifier_block lid_notifier;
- int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+ int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
spinlock_t error_lock;
struct drm_i915_error_state *first_error;
struct work_struct error_work;
+ struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
u32 saveMCHBAR_RENDER_STANDBY;
struct {
+ /** Bridge to intel-gtt-ko */
+ struct intel_gtt *gtt;
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm vram;
+ /** Memory allocator for GTT */
struct drm_mm gtt_space;
struct io_mapping *gtt_mapping;
@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
*/
struct list_head shrink_list;
- spinlock_t active_list_lock;
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
/**
* List of objects which are not in the ringbuffer but which
@@ -535,15 +571,6 @@ typedef struct drm_i915_private {
struct list_head flushing_list;
/**
- * List of objects currently pending a GPU write flush.
- *
- * All elements on this list will belong to either the
- * active_list or flushing_list, last_rendering_seqno can
- * be used to differentiate between the two elements.
- */
- struct list_head gpu_write_list;
-
- /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
+ /**
+ * LRU list of objects which are not in the ringbuffer but
+ * are still pinned in the GTT.
+ */
+ struct list_head pinned_list;
+
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ uint32_t flush_rings;
+
+ /* accounting, useful for userland debugging */
+ size_t object_memory;
+ size_t pin_memory;
+ size_t gtt_memory;
+ size_t gtt_total;
+ u32 object_count;
+ u32 pin_count;
+ u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
- /* indicate whether the LVDS EDID is OK */
- bool lvds_edid_good;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct work_struct idle_work;
@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
+ unsigned long last_gpu_reset;
+
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
} drm_i915_private_t;
@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
struct drm_mm_node *gtt_space;
/** This object's place on the active/flushing/inactive lists */
- struct list_head list;
+ struct list_head ring_list;
+ struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place on eviction list */
@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
/** global list entry for this request */
struct list_head list;
+ struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
};
struct drm_i915_file_private {
struct {
+ struct spinlock lock;
struct list_head request_list;
} mm;
};
@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
-extern int i965_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev, u8 flags);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
-void i915_destroy_error_state(struct drm_device *dev);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
+#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+#else
+#define i915_destroy_error_state(x)
+#endif
+
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
+int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+ bool interruptible);
+int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+ bool interruptible);
void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
@@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring);
+ struct drm_file *file_priv,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring);
int i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno, int interruptible,
- struct intel_ring_buffer *ring);
+ uint32_t seqno,
+ bool interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+ bool pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj,
int id,
@@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
-void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
@@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
-#if WATCH_INACTIVE
-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
#else
-#define i915_verify_inactive(dev, file, line)
+#define i915_verify_lists(dev) 0
#endif
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
-void i915_dump_lru(struct drm_device *dev, const char *where);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
@@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+extern int intel_opregion_setup(struct drm_device *dev);
#ifdef CONFIG_ACPI
-/* i915_opregion.c */
-extern int intel_opregion_init(struct drm_device *dev, int resume);
-extern void intel_opregion_free(struct drm_device *dev, int suspend);
-extern void opregion_asle_intr(struct drm_device *dev);
-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
-extern void opregion_enable_asle(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern void intel_opregion_gse_intr(struct drm_device *dev);
+extern void intel_opregion_enable_asle(struct drm_device *dev);
#else
-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
#endif
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
/* modesetting */
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/* overlay */
+#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+#endif
/**
* Lock test for when it's just for synchronization of ring access.
@@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
LOCK_TEST_WITH_RETURN(dev, file_priv); \
} while (0)
-#define I915_READ(reg) readl(dev_priv->regs + (reg))
-#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
+static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val;
+
+ val = readl(dev_priv->regs + reg);
+ if (dev_priv->debug_flags & I915_DEBUG_READ)
+ printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+ return val;
+}
+
+static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writel(val, dev_priv->regs + reg);
+ if (dev_priv->debug_flags & I915_DEBUG_WRITE)
+ printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+}
+
+#define I915_READ(reg) i915_read(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
#define I915_READ16(reg) readw(dev_priv->regs + (reg))
#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
#define I915_READ8(reg) readb(dev_priv->regs + (reg))
@@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define POSTING_READ(reg) (void)I915_READ(reg)
#define POSTING_READ16(reg) (void)I915_READ16(reg)
+#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
+ I915_DEBUG_WRITE)
+#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
+ I915_DEBUG_WRITE))
+
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
@@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
-#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
@@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
-#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
@@ -1188,33 +1290,34 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
- !IS_GEN6(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
- IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90b1d6753b9d..8eb8453208b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,7 +37,9 @@
#include <linux/intel-gtt.h>
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+ bool pipelined);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+ bool interruptible);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
struct drm_file *file_priv);
static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+static int
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask);
+
+static void
+i915_gem_object_put_pages(struct drm_gem_object *obj);
+
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count++;
+ dev_priv->mm.object_memory += size;
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count--;
+ dev_priv->mm.object_memory -= size;
+}
+
+static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.gtt_count++;
+ dev_priv->mm.gtt_memory += size;
+}
+
+static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.gtt_count--;
+ dev_priv->mm.gtt_memory -= size;
+}
+
+static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.pin_count++;
+ dev_priv->mm.pin_memory += size;
+}
+
+static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.pin_count--;
+ dev_priv->mm.pin_memory -= size;
+}
+
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct completion *x = &dev_priv->error_completion;
+ unsigned long flags;
+ int ret;
+
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ ret = wait_for_completion_interruptible(x);
+ if (ret)
+ return ret;
+
+ /* Success, we reset the GPU! */
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return 0;
+
+ /* GPU is hung, bump the completion count to account for
+ * the token we just consumed so that we never hit zero and
+ * end up waiting upon a subsequent completion event that
+ * will never happen.
+ */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EAGAIN;
+ }
+
+ WARN_ON(i915_verify_lists(dev));
+ return 0;
+}
+
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
{
@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
obj_priv->pin_count == 0;
}
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+int i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
- dev->gtt_total = (uint32_t) (end - start);
+ dev_priv->mm.gtt_total = end - start;
return 0;
}
@@ -103,14 +209,16 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- args->aper_size = dev->gtt_total;
- args->aper_available_size = (args->aper_size -
- atomic_read(&dev->pin_memory));
+ mutex_lock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -136,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
if (ret) {
+ drm_gem_object_release(obj);
+ i915_gem_info_remove_obj(dev->dev_private, obj->size);
+ kfree(obj);
return ret;
}
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference(obj);
+ trace_i915_gem_object_create(obj);
+
args->handle = handle;
return 0;
}
@@ -152,19 +265,14 @@ fast_shmem_read(struct page **pages,
char __user *data,
int length)
{
- char __iomem *vaddr;
- int unwritten;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
- if (vaddr == NULL)
- return -ENOMEM;
- unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr, KM_USER0);
+ char *vaddr;
+ int ret;
- if (unwritten)
- return -EFAULT;
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
+ ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+ kunmap_atomic(vaddr);
- return 0;
+ return ret;
}
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@ -258,22 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length;
- int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret != 0)
- goto fail_unlock;
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0)
- goto fail_put_pages;
-
obj_priv = to_intel_bo(obj);
offset = args->offset;
@@ -290,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- ret = fast_shmem_read(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length);
- if (ret)
- goto fail_put_pages;
+ if (fast_shmem_read(obj_priv->pages,
+ page_base, page_offset,
+ user_data, page_length))
+ return -EFAULT;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return 0;
}
static int
@@ -367,31 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
+ mutex_unlock(&dev->struct_mutex);
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
num_pages, 1, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
if (pinned_pages < num_pages) {
ret = -EFAULT;
- goto fail_put_user_pages;
+ goto out;
}
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages_or_evict(obj);
+ ret = i915_gem_object_set_cpu_read_domain_range(obj,
+ args->offset,
+ args->size);
if (ret)
- goto fail_unlock;
+ goto out;
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
- args->size);
- if (ret != 0)
- goto fail_put_pages;
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
obj_priv = to_intel_bo(obj);
offset = args->offset;
@@ -436,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
+out:
for (i = 0; i < pinned_pages; i++) {
SetPageDirty(user_pages[i]);
page_cache_release(user_pages[i]);
@@ -462,37 +545,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pread *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret;
+ int ret = 0;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
/* Bounds check source. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
- goto err;
+ goto out;
}
+ if (args->size == 0)
+ goto out;
+
if (!access_ok(VERIFY_WRITE,
(char __user *)(uintptr_t)args->data_ptr,
args->size)) {
ret = -EFAULT;
- goto err;
+ goto out;
}
- if (i915_gem_object_needs_bit17_swizzle(obj)) {
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
- } else {
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
- if (ret != 0)
- ret = i915_gem_shmem_pread_slow(dev, obj, args,
- file_priv);
+ ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
}
-err:
- drm_gem_object_unreference_unlocked(obj);
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_cpu_read_domain_range(obj,
+ args->offset,
+ args->size);
+ if (ret)
+ goto out_put;
+
+ ret = -EFAULT;
+ if (!i915_gem_object_needs_bit17_swizzle(obj))
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ if (ret == -EFAULT)
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+
+out_put:
+ i915_gem_object_put_pages(obj);
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -509,13 +619,11 @@ fast_user_write(struct io_mapping *mapping,
char *vaddr_atomic;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
+ vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
- if (unwritten)
- return -EFAULT;
- return 0;
+ io_mapping_unmap_atomic(vaddr_atomic);
+ return unwritten;
}
/* Here's the write path which can sleep for
@@ -548,18 +656,14 @@ fast_shmem_write(struct page **pages,
char __user *data,
int length)
{
- char __iomem *vaddr;
- unsigned long unwritten;
+ char *vaddr;
+ int ret;
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
- if (vaddr == NULL)
- return -ENOMEM;
- unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr, KM_USER0);
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
+ ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+ kunmap_atomic(vaddr);
- if (unwritten)
- return -EFAULT;
- return 0;
+ return ret;
}
/**
@@ -577,22 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length;
- int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret)
- goto fail;
-
obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
@@ -609,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
- page_offset, user_data, page_length);
-
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (ret)
- goto fail;
+ if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+ page_offset, user_data, page_length))
+
+ return -EFAULT;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-fail:
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -665,27 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
+ mutex_unlock(&dev->struct_mutex);
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
num_pages, 0, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
if (pinned_pages < num_pages) {
ret = -EFAULT;
goto out_unpin_pages;
}
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
- if (ret)
- goto out_unlock;
-
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
- goto out_unpin_object;
+ goto out_unpin_pages;
obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
@@ -721,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
data_ptr += page_length;
}
-out_unpin_object:
- i915_gem_object_unpin(obj);
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
out_unpin_pages:
for (i = 0; i < pinned_pages; i++)
page_cache_release(user_pages[i]);
@@ -747,21 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length;
- int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret != 0)
- goto fail_unlock;
-
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret != 0)
- goto fail_put_pages;
-
obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -779,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- ret = fast_shmem_write(obj_priv->pages,
+ if (fast_shmem_write(obj_priv->pages,
page_base, page_offset,
- user_data, page_length);
- if (ret)
- goto fail_put_pages;
+ user_data, page_length))
+ return -EFAULT;
remain -= page_length;
user_data += page_length;
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -833,30 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
+ mutex_unlock(&dev->struct_mutex);
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
num_pages, 0, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
if (pinned_pages < num_pages) {
ret = -EFAULT;
- goto fail_put_user_pages;
+ goto out;
}
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_get_pages_or_evict(obj);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
- goto fail_unlock;
+ goto out;
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret != 0)
- goto fail_put_pages;
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
obj_priv = to_intel_bo(obj);
offset = args->offset;
@@ -902,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
offset += page_length;
}
-fail_put_pages:
- i915_gem_object_put_pages(obj);
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
+out:
for (i = 0; i < pinned_pages; i++)
page_cache_release(user_pages[i]);
drm_free_large(user_pages);
@@ -921,29 +976,46 @@ fail_put_user_pages:
*/
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret = 0;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
+
/* Bounds check destination. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
- goto err;
+ goto out;
}
+ if (args->size == 0)
+ goto out;
+
if (!access_ok(VERIFY_READ,
(char __user *)(uintptr_t)args->data_ptr,
args->size)) {
ret = -EFAULT;
- goto err;
+ goto out;
+ }
+
+ ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -953,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client.
*/
if (obj_priv->phys_obj)
- ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+ ret = i915_gem_phys_pwrite(dev, obj, args, file);
else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0 &&
+ obj_priv->gtt_space &&
obj->write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
- if (ret == -EFAULT) {
- ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
- file_priv);
- }
- } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
+ ret = i915_gem_object_pin(obj, 0);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+ if (ret == -EFAULT)
+ ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
+
+out_unpin:
+ i915_gem_object_unpin(obj);
} else {
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
- if (ret == -EFAULT) {
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
- file_priv);
- }
- }
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
+ goto out;
-#if WATCH_PWRITE
- if (ret)
- DRM_INFO("pwrite failed %d\n", ret);
-#endif
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ goto out_put;
-err:
- drm_gem_object_unreference_unlocked(obj);
+ ret = -EFAULT;
+ if (!i915_gem_object_needs_bit17_swizzle(obj))
+ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+ if (ret == -EFAULT)
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+
+out_put:
+ i915_gem_object_put_pages(obj);
+ }
+
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1014,19 +1101,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
- mutex_lock(&dev->struct_mutex);
-
intel_mark_busy(dev, obj);
-#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
- obj, obj->size, read_domains, write_domain);
-#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -1050,12 +1137,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
-
/* Maintain LRU order of "inactive" objects */
if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1069,30 +1156,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %zd)\n",
- __func__, args->handle, obj, obj->size);
-#endif
- obj_priv = to_intel_bo(obj);
-
/* Pinned buffers may be scanout, so flush the cache */
- if (obj_priv->pin_count)
+ if (to_intel_bo(obj)->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1181,13 +1265,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
+ ret = i915_gem_object_get_fence_reg(obj, true);
if (ret)
goto unlock;
}
if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1246,7 +1330,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
obj->size / PAGE_SIZE, 0, 0);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOMEM;
+ ret = -ENOSPC;
goto out_free_list;
}
@@ -1258,9 +1342,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
}
list->hash.key = list->file_offset_node->start;
- if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
DRM_ERROR("failed to add to map hash\n");
- ret = -ENOMEM;
goto out_free_mm;
}
@@ -1345,14 +1429,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (IS_I9XX(dev))
+ if (INTEL_INFO(dev)->gen == 3)
start = 1024*1024;
else
start = 512*1024;
@@ -1390,29 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
-
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
-
if (!obj_priv->mmap_offset) {
ret = i915_gem_create_mmap_offset(obj);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out;
}
args->offset = obj_priv->mmap_offset;
@@ -1423,20 +1505,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
*/
if (!obj_priv->agp_mem) {
ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out;
}
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ return ret;
}
-void
+static void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1470,13 +1550,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->pages = NULL;
}
+static uint32_t
+i915_gem_next_request_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ring->outstanding_lazy_request = true;
+ return dev_priv->next_seqno;
+}
+
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+i915_gem_object_move_to_active(struct drm_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
+
BUG_ON(ring == NULL);
obj_priv->ring = ring;
@@ -1485,10 +1577,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
drm_gem_object_reference(obj);
obj_priv->active = 1;
}
+
/* Move from whatever list we were on to the tail of execution. */
- spin_lock(&dev_priv->mm.active_list_lock);
- list_move_tail(&obj_priv->list, &ring->active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj_priv->ring_list, &ring->active_list);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1500,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
+ list_del_init(&obj_priv->ring_list);
obj_priv->last_rendering_seqno = 0;
}
@@ -1538,11 +1631,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
- list_del_init(&obj_priv->list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
else
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ list_del_init(&obj_priv->ring_list);
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
@@ -1552,30 +1645,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
obj_priv->active = 0;
drm_gem_object_unreference(obj);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
}
static void
i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains, uint32_t seqno,
+ uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
+ &ring->gpu_write_list,
gpu_write_list) {
struct drm_gem_object *obj = &obj_priv->base;
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain &&
- obj_priv->ring->ring_flag == ring->ring_flag) {
+ if (obj->write_domain & flush_domains) {
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno, ring);
+ i915_gem_object_move_to_active(obj, ring);
/* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,23 +1684,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
uint32_t
-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
- uint32_t flush_domains, struct intel_ring_buffer *ring)
+i915_add_request(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = NULL;
- struct drm_i915_gem_request *request;
+ struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
- if (file_priv != NULL)
- i915_file_priv = file_priv->driver_priv;
+ if (file != NULL)
+ file_priv = file->driver_priv;
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
+ if (request == NULL) {
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return 0;
+ }
- seqno = ring->add_request(dev, ring, file_priv, flush_domains);
+ seqno = ring->add_request(dev, ring, 0);
+ ring->outstanding_lazy_request = false;
request->seqno = seqno;
request->ring = ring;
@@ -1617,23 +1712,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
- if (i915_file_priv) {
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
+ request->file_priv = file_priv;
list_add_tail(&request->client_list,
- &i915_file_priv->mm.request_list);
- } else {
- INIT_LIST_HEAD(&request->client_list);
+ &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
- /* Associate any objects on the flushing list matching the write
- * domain we're flushing with our flush.
- */
- if (flush_domains != 0)
- i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
-
if (!dev_priv->mm.suspended) {
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
if (was_empty)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->mm.retire_work, HZ);
}
return seqno;
}
@@ -1644,91 +1736,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
* Ensures that all commands in the ring are finished
* before signalling the CPU
*/
-static uint32_t
+static void
i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
{
uint32_t flush_domains = 0;
/* The sampler always gets flushed on i965 (sigh) */
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
ring->flush(dev, ring,
I915_GEM_DOMAIN_COMMAND, flush_domains);
- return flush_domains;
}
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
- struct drm_i915_gem_request *request)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = request->file_priv;
- trace_i915_gem_request_retire(dev, request->seqno);
+ if (!file_priv)
+ return;
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
- */
- spin_lock(&dev_priv->mm.active_list_lock);
- while (!list_empty(&request->ring->active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
+}
- obj_priv = list_first_entry(&request->ring->active_list,
- struct drm_i915_gem_object,
- list);
- obj = &obj_priv->base;
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
- /* If the seqno being retired doesn't match the oldest in the
- * list, then the oldest in the list must still be newer than
- * this seqno.
- */
- if (obj_priv->last_rendering_seqno != request->seqno)
- goto out;
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
-#if WATCH_LRU
- DRM_INFO("%s: retire %d moves to inactive list %p\n",
- __func__, request->seqno, obj);
-#endif
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ kfree(request);
+ }
- if (obj->write_domain != 0)
- i915_gem_object_move_to_flushing(obj);
- else {
- /* Take a reference on the object so it won't be
- * freed while the spinlock is held. The list
- * protection for this spinlock is safe when breaking
- * the lock like this since the next thing we do
- * is just get the head of the list again.
- */
- drm_gem_object_reference(obj);
- i915_gem_object_move_to_inactive(obj);
- spin_unlock(&dev_priv->mm.active_list_lock);
- drm_gem_object_unreference(obj);
- spin_lock(&dev_priv->mm.active_list_lock);
- }
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_inactive(&obj_priv->base);
}
-out:
- spin_unlock(&dev_priv->mm.active_list_lock);
}
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+void i915_gem_reset(struct drm_device *dev)
{
- return (int32_t)(seq1 - seq2) >= 0;
-}
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int i;
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return ring->get_gem_seqno(dev, ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+
+ /* Remove anything from the flushing lists. The GPU cache is likely
+ * to be lost on reset along with the data, so simply move the
+ * lost bo to the inactive list.
+ */
+ while (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ mm_list);
+
+ obj_priv->base.write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_inactive(&obj_priv->base);
+ }
+
+ /* Move everything out of the GPU domains to ensure we do any
+ * necessary invalidation upon reuse.
+ */
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ {
+ obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ /* The fence registers are invalidated so clear them out */
+ for (i = 0; i < 16; i++) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ continue;
+
+ i915_gem_clear_fence_reg(reg->obj);
+ }
}
/**
@@ -1741,38 +1847,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!ring->status_page.page_addr
- || list_empty(&ring->request_list))
+ if (!ring->status_page.page_addr ||
+ list_empty(&ring->request_list))
return;
- seqno = i915_get_gem_seqno(dev, ring);
+ WARN_ON(i915_verify_lists(dev));
+ seqno = ring->get_seqno(dev, ring);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
- uint32_t retiring_seqno;
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
- retiring_seqno = request->seqno;
- if (i915_seqno_passed(seqno, retiring_seqno) ||
- atomic_read(&dev_priv->mm.wedged)) {
- i915_gem_retire_request(dev, request);
+ if (!i915_seqno_passed(seqno, request->seqno))
+ break;
+
+ trace_i915_gem_request_retire(dev, request->seqno);
+
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+ kfree(request);
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- list_del(&request->list);
- list_del(&request->client_list);
- kfree(request);
- } else
+ if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
break;
+
+ obj = &obj_priv->base;
+ if (obj->write_domain != 0)
+ i915_gem_object_move_to_flushing(obj);
+ else
+ i915_gem_object_move_to_inactive(obj);
}
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-
ring->user_irq_put(dev, ring);
dev_priv->trace_irq_seqno = 0;
}
+
+ WARN_ON(i915_verify_lists(dev));
}
void
@@ -1790,16 +1916,16 @@ i915_gem_retire_requests(struct drm_device *dev)
*/
list_for_each_entry_safe(obj_priv, tmp,
&dev_priv->mm.deferred_free_list,
- list)
+ mm_list)
i915_gem_free_object_tail(&obj_priv->base);
}
i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
}
-void
+static void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
@@ -1809,20 +1935,25 @@ i915_gem_retire_work_handler(struct work_struct *work)
mm.retire_work.work);
dev = dev_priv->dev;
- mutex_lock(&dev->struct_mutex);
+ /* Come back later if the device is busy... */
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ return;
+ }
+
i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
(!list_empty(&dev_priv->render_ring.request_list) ||
- (HAS_BSD(dev) &&
- !list_empty(&dev_priv->bsd_ring.request_list))))
+ !list_empty(&dev_priv->bsd_ring.request_list) ||
+ !list_empty(&dev_priv->blt_ring.request_list)))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
int
i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- int interruptible, struct intel_ring_buffer *ring)
+ bool interruptible, struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1831,9 +1962,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
BUG_ON(seqno == 0);
if (atomic_read(&dev_priv->mm.wedged))
- return -EIO;
+ return -EAGAIN;
- if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+ if (ring->outstanding_lazy_request) {
+ seqno = i915_add_request(dev, NULL, NULL, ring);
+ if (seqno == 0)
+ return -ENOMEM;
+ }
+ BUG_ON(seqno == dev_priv->next_seqno);
+
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1852,12 +1990,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
else
wait_event(ring->irq_queue,
i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ ring->get_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
ring->user_irq_put(dev, ring);
@@ -1866,11 +2004,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
trace_i915_gem_request_wait_end(dev, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
+ ret = -EAGAIN;
if (ret && ret != -ERESTARTSYS)
- DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
+ DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+ __func__, ret, seqno, ring->get_seqno(dev, ring),
+ dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
@@ -1889,27 +2028,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
*/
static int
i915_wait_request(struct drm_device *dev, uint32_t seqno,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
return i915_do_wait_request(dev, seqno, 1, ring);
}
static void
+i915_gem_flush_ring(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ ring->flush(dev, ring, invalidate_domains, flush_domains);
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
+static void
i915_gem_flush(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t invalidate_domains,
- uint32_t flush_domains)
+ uint32_t flush_domains,
+ uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
- invalidate_domains,
- flush_domains);
-
- if (HAS_BSD(dev))
- dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
- invalidate_domains,
- flush_domains);
+
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ if (flush_rings & RING_RENDER)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->render_ring,
+ invalidate_domains, flush_domains);
+ if (flush_rings & RING_BSD)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->bsd_ring,
+ invalidate_domains, flush_domains);
+ if (flush_rings & RING_BLT)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->blt_ring,
+ invalidate_domains, flush_domains);
+ }
}
/**
@@ -1917,7 +2077,8 @@ i915_gem_flush(struct drm_device *dev,
* safe to unbind from the GTT or access from the CPU.
*/
static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1932,13 +2093,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
* it.
*/
if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_wait_request(dev,
- obj_priv->last_rendering_seqno, obj_priv->ring);
- if (ret != 0)
+ ret = i915_do_wait_request(dev,
+ obj_priv->last_rendering_seqno,
+ interruptible,
+ obj_priv->ring);
+ if (ret)
return ret;
}
@@ -1952,14 +2111,10 @@ int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
-#if WATCH_BUF
- DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
- DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
-#endif
if (obj_priv->gtt_space == NULL)
return 0;
@@ -1984,33 +2139,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
+ if (ret) {
+ i915_gem_clflush_object(obj);
+ obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
- if (obj_priv->agp_mem != NULL) {
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
- obj_priv->agp_mem = NULL;
- }
+ drm_unbind_agp(obj_priv->agp_mem);
+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
i915_gem_object_put_pages(obj);
BUG_ON(obj_priv->pages_refcount);
- if (obj_priv->gtt_space) {
- atomic_dec(&dev->gtt_count);
- atomic_sub(obj->size, &dev->gtt_memory);
-
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- }
+ i915_gem_info_remove_gtt(dev_priv, obj->size);
+ list_del_init(&obj_priv->mm_list);
- /* Remove ourselves from the LRU list if present. */
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
- spin_unlock(&dev_priv->mm.active_list_lock);
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ obj_priv->gtt_offset = 0;
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
@@ -2020,48 +2169,50 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+static int i915_ring_idle(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (list_empty(&ring->gpu_write_list))
+ return 0;
+
+ i915_gem_flush_ring(dev, NULL, ring,
+ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ return i915_wait_request(dev,
+ i915_gem_next_request_seqno(dev, ring),
+ ring);
+}
+
int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- uint32_t seqno1, seqno2;
int ret;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev) ||
- list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
+ list_empty(&dev_priv->bsd_ring.active_list) &&
+ list_empty(&dev_priv->blt_ring.active_list));
if (lists_empty)
return 0;
/* Flush everything onto the inactive list. */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
- &dev_priv->render_ring);
- if (seqno1 == 0)
- return -ENOMEM;
- ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
-
- if (HAS_BSD(dev)) {
- seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
- &dev_priv->bsd_ring);
- if (seqno2 == 0)
- return -ENOMEM;
+ ret = i915_ring_idle(dev, &dev_priv->render_ring);
+ if (ret)
+ return ret;
- ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
- if (ret)
- return ret;
- }
+ ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
+ if (ret)
+ return ret;
+ ret = i915_ring_idle(dev, &dev_priv->blt_ring);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-int
+static int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
@@ -2241,7 +2392,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
-static int i915_find_fence_reg(struct drm_device *dev)
+static int i915_find_fence_reg(struct drm_device *dev,
+ bool interruptible)
{
struct drm_i915_fence_reg *reg = NULL;
struct drm_i915_gem_object *obj_priv = NULL;
@@ -2286,7 +2438,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
* private reference to obj like the other callers of put_fence_reg
* (set_tiling ioctl) do. */
drm_gem_object_reference(obj);
- ret = i915_gem_object_put_fence_reg(obj);
+ ret = i915_gem_object_put_fence_reg(obj, interruptible);
drm_gem_object_unreference(obj);
if (ret != 0)
return ret;
@@ -2308,7 +2460,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2343,7 +2496,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
break;
}
- ret = i915_find_fence_reg(dev);
+ ret = i915_find_fence_reg(dev, interruptible);
if (ret < 0)
return ret;
@@ -2421,15 +2574,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
* i915_gem_object_put_fence_reg - waits on outstanding fenced access
* to the buffer to finish, and then resets the fence register.
* @obj: tiled object holding a fence register.
+ * @bool: whether the wait upon the fence is interruptible
*
* Zeroes out the fence register itself and clears out the associated
* data structures in dev_priv and obj_priv.
*/
int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+ bool interruptible)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg;
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
@@ -2444,20 +2601,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
*/
- if (!IS_I965G(dev)) {
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ if (reg->gpu) {
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
+ ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
+ ret = i915_gem_object_wait_rendering(obj, interruptible);
+ if (ret)
return ret;
+
+ reg->gpu = false;
}
i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg (obj);
+ i915_gem_clear_fence_reg(obj);
return 0;
}
@@ -2490,7 +2650,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->size > dev->gtt_total) {
+ if (obj->size > dev_priv->mm.gtt_total) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
@@ -2498,19 +2658,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
search_free:
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
- if (free_space != NULL) {
+ if (free_space != NULL)
obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
alignment);
- if (obj_priv->gtt_space != NULL)
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- }
if (obj_priv->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
-#if WATCH_LRU
- DRM_INFO("%s: GTT full, evicting something\n", __func__);
-#endif
ret = i915_gem_evict_something(dev, obj->size, alignment);
if (ret)
return ret;
@@ -2518,10 +2672,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
goto search_free;
}
-#if WATCH_BUF
- DRM_INFO("Binding object of size %zd at 0x%08x\n",
- obj->size, obj_priv->gtt_offset);
-#endif
ret = i915_gem_object_get_pages(obj, gfpmask);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
@@ -2553,7 +2703,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
obj_priv->agp_mem = drm_agp_bind_pages(dev,
obj_priv->pages,
obj->size >> PAGE_SHIFT,
- obj_priv->gtt_offset,
+ obj_priv->gtt_space->start,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_put_pages(obj);
@@ -2566,11 +2716,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
goto search_free;
}
- atomic_inc(&dev->gtt_count);
- atomic_add(obj->size, &dev->gtt_memory);
/* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ i915_gem_info_add_gtt(dev_priv, obj->size);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2579,6 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
return 0;
@@ -2603,25 +2753,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
/** Flushes any GPU write domain for the object if it's dirty. */
static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+ bool pipelined)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
- i915_gem_flush(dev, 0, obj->write_domain);
- if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
- return -ENOMEM;
+ i915_gem_flush_ring(dev, NULL,
+ to_intel_bo(obj)->ring,
+ 0, obj->write_domain);
+ BUG_ON(obj->write_domain);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
- return 0;
+
+ if (pipelined)
+ return 0;
+
+ return i915_gem_object_wait_rendering(obj, true);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2665,26 +2820,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
-int
-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
-{
- int ret = 0;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- i915_gem_object_flush_gtt_write_domain(obj);
- break;
- case I915_GEM_DOMAIN_CPU:
- i915_gem_object_flush_cpu_write_domain(obj);
- break;
- default:
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- break;
- }
-
- return ret;
-}
-
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -2702,32 +2837,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
if (ret != 0)
return ret;
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ if (write) {
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
- /* If we're writing through the GTT domain, then CPU and GPU caches
- * will need to be invalidated at next use.
- */
- if (write)
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
}
@@ -2744,51 +2875,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+ bool pipelined)
{
- struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t old_write_domain, old_read_domains;
+ uint32_t old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, true);
if (ret)
return ret;
- /* Wait on any GPU rendering and flushing to occur. */
- if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_do_wait_request(dev,
- obj_priv->last_rendering_seqno,
- 0,
- obj_priv->ring);
- if (ret != 0)
+ /* Currently, we are always called from an non-interruptible context. */
+ if (!pipelined) {
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
return ret;
}
i915_gem_object_flush_cpu_write_domain(obj);
- old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- old_write_domain);
+ obj->write_domain);
return 0;
}
@@ -2805,12 +2921,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
if (ret != 0)
return ret;
@@ -2821,6 +2932,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
+ if (write) {
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
+
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -2840,7 +2957,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2963,26 +3080,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
* drm_agp_chipset_flush
*/
static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
uint32_t old_read_domains;
- BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
- BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
-
intel_mark_busy(dev, obj);
-#if WATCH_BUF
- DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
- obj->read_domains, obj->pending_read_domains,
- obj->write_domain, obj->pending_write_domain);
-#endif
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
@@ -3009,13 +3118,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
-#if WATCH_BUF
- DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
- __func__, flush_domains, invalidate_domains);
-#endif
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- }
old_read_domains = obj->read_domains;
@@ -3029,21 +3133,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS) {
- if (obj_priv->ring == &dev_priv->render_ring)
- dev_priv->flush_rings |= FLUSH_RENDER_RING;
- else if (obj_priv->ring == &dev_priv->bsd_ring)
- dev_priv->flush_rings |= FLUSH_BSD_RING;
- }
-
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
-#if WATCH_BUF
- DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
- __func__,
- obj->read_domains, obj->write_domain,
- dev->invalidate_domains, dev->flush_domains);
-#endif
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ dev_priv->mm.flush_rings |= obj_priv->ring->id;
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ dev_priv->mm.flush_rings |= ring->id;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -3106,12 +3201,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -3164,66 +3254,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
* Pin an object to the GTT and evaluate the relocations landing in it.
*/
static int
-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *relocs)
+i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int i, ret;
- void __iomem *reloc_page;
- bool need_fence;
-
- need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj_priv->tiling_mode != I915_TILING_NONE;
-
- /* Check fence reg constraints and rebind if necessary */
- if (need_fence &&
- !i915_gem_object_fence_offset_ok(obj,
- obj_priv->tiling_mode)) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
- }
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ struct drm_gem_object *target_obj = NULL;
+ uint32_t target_handle = 0;
+ int i, ret = 0;
- /* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
- if (ret)
- return ret;
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+ uint32_t target_offset;
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle blits to/from tiled surfaces.
- */
- if (need_fence) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc))) {
+ ret = -EFAULT;
+ break;
}
- }
- entry->offset = obj_priv->gtt_offset;
+ if (reloc.target_handle != target_handle) {
+ drm_gem_object_unreference(target_obj);
- /* Apply the relocations, using the GTT aperture to avoid cache
- * flushing requirements.
- */
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
- struct drm_gem_object *target_obj;
- struct drm_i915_gem_object *target_obj_priv;
- uint32_t reloc_val, reloc_offset;
- uint32_t __iomem *reloc_entry;
-
- target_obj = drm_gem_object_lookup(obj->dev, file_priv,
- reloc->target_handle);
- if (target_obj == NULL) {
- i915_gem_object_unpin(obj);
- return -ENOENT;
+ target_obj = drm_gem_object_lookup(dev, file_priv,
+ reloc.target_handle);
+ if (target_obj == NULL) {
+ ret = -ENOENT;
+ break;
+ }
+
+ target_handle = reloc.target_handle;
}
- target_obj_priv = to_intel_bo(target_obj);
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3231,268 +3297,266 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
"presumed %08x delta %08x\n",
__func__,
obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
+ (int) reloc.offset,
+ (int) reloc.target_handle,
+ (int) reloc.read_domains,
+ (int) reloc.write_domain,
+ (int) target_offset,
+ (int) reloc.presumed_offset,
+ reloc.delta);
#endif
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
- if (target_obj_priv->gtt_space == NULL) {
+ if (target_offset == 0) {
DRM_ERROR("No GTT space found for object %d\n",
- reloc->target_handle);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
+ reloc.target_handle);
+ ret = -EINVAL;
+ break;
}
/* Validate that the target is in a valid r/w GPU domain */
- if (reloc->write_domain & (reloc->write_domain - 1)) {
+ if (reloc.write_domain & (reloc.write_domain - 1)) {
DRM_ERROR("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.read_domains,
+ reloc.write_domain);
+ ret = -EINVAL;
+ break;
}
- if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
- reloc->read_domains & I915_GEM_DOMAIN_CPU) {
+ if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc.read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.read_domains,
+ reloc.write_domain);
+ ret = -EINVAL;
+ break;
}
- if (reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain) {
+ if (reloc.write_domain && target_obj->pending_write_domain &&
+ reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.write_domain,
target_obj->pending_write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- target_obj->pending_read_domains |= reloc->read_domains;
- target_obj->pending_write_domain |= reloc->write_domain;
+ target_obj->pending_read_domains |= reloc.read_domains;
+ target_obj->pending_write_domain |= reloc.write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
- drm_gem_object_unreference(target_obj);
+ if (target_offset == reloc.presumed_offset)
continue;
- }
/* Check that the relocation address is valid... */
- if (reloc->offset > obj->size - 4) {
+ if (reloc.offset > obj->base.size - 4) {
DRM_ERROR("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
+ obj, reloc.target_handle,
+ (int) reloc.offset, (int) obj->base.size);
+ ret = -EINVAL;
+ break;
}
- if (reloc->offset & 3) {
+ if (reloc.offset & 3) {
DRM_ERROR("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
+ obj, reloc.target_handle,
+ (int) reloc.offset);
+ ret = -EINVAL;
+ break;
}
/* and points to somewhere within the target object. */
- if (reloc->delta >= target_obj->size) {
+ if (reloc.delta >= target_obj->size) {
DRM_ERROR("Relocation beyond target object bounds: "
"obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta, (int) target_obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
+ obj, reloc.target_handle,
+ (int) reloc.delta, (int) target_obj->size);
+ ret = -EINVAL;
+ break;
}
- /* Map the page containing the relocation we're going to
- * perform.
- */
- reloc_offset = obj_priv->gtt_offset + reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- (reloc_offset &
- ~(PAGE_SIZE - 1)),
- KM_USER0);
- reloc_entry = (uint32_t __iomem *)(reloc_page +
- (reloc_offset & (PAGE_SIZE - 1)));
- reloc_val = target_obj_priv->gtt_offset + reloc->delta;
-
-#if WATCH_BUF
- DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
- obj, (unsigned int) reloc->offset,
- readl(reloc_entry), reloc_val);
-#endif
- writel(reloc_val, reloc_entry);
- io_mapping_unmap_atomic(reloc_page, KM_USER0);
-
- /* The updated presumed offset for this entry will be
- * copied back out to the user.
- */
- reloc->presumed_offset = target_obj_priv->gtt_offset;
-
- drm_gem_object_unreference(target_obj);
- }
-
-#if WATCH_BUF
- if (0)
- i915_gem_dump_object(obj, 128, __func__, ~0);
-#endif
- return 0;
-}
+ reloc.delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc.offset & ~PAGE_MASK;
+ char *vaddr;
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * Note that if we were to use the current jiffies each time around the loop,
- * we wouldn't escape the function with any frames outstanding if the time to
- * render a frame was over 20ms.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
-static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- int ret = 0;
- unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
-
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
+ vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc.delta;
+ kunmap_atomic(vaddr);
+ } else {
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
- request = list_first_entry(&i915_file_priv->mm.request_list,
- struct drm_i915_gem_request,
- client_list);
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+ if (ret)
+ break;
- if (time_after_eq(request->emitted_jiffies, recent_enough))
- break;
+ /* Map the page containing the relocation we're going to perform. */
+ reloc.offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc.offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc.offset & ~PAGE_MASK));
+ iowrite32(reloc.delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
- ret = i915_wait_request(dev, request->seqno, request->ring);
- if (ret != 0)
- break;
+ /* and update the user's relocation entry */
+ reloc.presumed_offset = target_offset;
+ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset))) {
+ ret = -EFAULT;
+ break;
+ }
}
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference(target_obj);
return ret;
}
static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
- uint32_t buffer_count,
- struct drm_i915_gem_relocation_entry **relocs)
+i915_gem_execbuffer_pin(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
{
- uint32_t reloc_count = 0, reloc_index = 0, i;
- int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i, retry;
- *relocs = NULL;
- for (i = 0; i < buffer_count; i++) {
- if (reloc_count + exec_list[i].relocation_count < reloc_count)
- return -EINVAL;
- reloc_count += exec_list[i].relocation_count;
- }
+ /* attempt to pin all of the buffers into the GTT */
+ for (retry = 0; retry < 2; retry++) {
+ ret = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
+ struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
+ bool need_fence =
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ /* Check fence reg constraints and rebind if necessary */
+ if (need_fence &&
+ !i915_gem_object_fence_offset_ok(&obj->base,
+ obj->tiling_mode)) {
+ ret = i915_gem_object_unbind(&obj->base);
+ if (ret)
+ break;
+ }
- *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL) {
- DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
- return -ENOMEM;
- }
+ ret = i915_gem_object_pin(&obj->base, entry->alignment);
+ if (ret)
+ break;
- for (i = 0; i < buffer_count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
+ /*
+ * Pre-965 chips need a fence register set up in order
+ * to properly handle blits to/from tiled surfaces.
+ */
+ if (need_fence) {
+ ret = i915_gem_object_get_fence_reg(&obj->base, true);
+ if (ret) {
+ i915_gem_object_unpin(&obj->base);
+ break;
+ }
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+ dev_priv->fence_regs[obj->fence_reg].gpu = true;
+ }
- ret = copy_from_user(&(*relocs)[reloc_index],
- user_relocs,
- exec_list[i].relocation_count *
- sizeof(**relocs));
- if (ret != 0) {
- drm_free_large(*relocs);
- *relocs = NULL;
- return -EFAULT;
+ entry->offset = obj->gtt_offset;
}
- reloc_index += exec_list[i].relocation_count;
+ while (i--)
+ i915_gem_object_unpin(object_list[i]);
+
+ if (ret == 0)
+ break;
+
+ if (ret != -ENOSPC || retry)
+ return ret;
+
+ ret = i915_gem_evict_everything(dev);
+ if (ret)
+ return ret;
}
return 0;
}
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
- uint32_t buffer_count,
- struct drm_i915_gem_relocation_entry *relocs)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
- uint32_t reloc_count = 0, i;
- int ret = 0;
-
- if (relocs == NULL)
- return 0;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring = NULL;
+ u32 seqno = 0;
+ int ret;
- for (i = 0; i < buffer_count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- int unwritten;
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+ ring = request->ring;
+ seqno = request->seqno;
+ }
+ spin_unlock(&file_priv->mm.lock);
- unwritten = copy_to_user(user_relocs,
- &relocs[reloc_count],
- exec_list[i].relocation_count *
- sizeof(*relocs));
+ if (seqno == 0)
+ return 0;
- if (unwritten) {
- ret = -EFAULT;
- goto err;
- }
+ ret = 0;
+ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ /* And wait for the seqno passing without holding any locks and
+ * causing extra latency for others. This is safe as the irq
+ * generation is designed to be run atomically and so is
+ * lockless.
+ */
+ ring->user_irq_get(dev, ring);
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->user_irq_put(dev, ring);
- reloc_count += exec_list[i].relocation_count;
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
}
-err:
- drm_free_large(relocs);
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
return ret;
}
static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
- uint64_t exec_offset)
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
+ uint64_t exec_offset)
{
uint32_t exec_start, exec_len;
@@ -3509,44 +3573,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
}
static int
-i915_gem_wait_for_pending_flip(struct drm_device *dev,
- struct drm_gem_object **object_list,
- int count)
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- DEFINE_WAIT(wait);
- int i, ret = 0;
+ int i;
- for (;;) {
- prepare_to_wait(&dev_priv->pending_flip_queue,
- &wait, TASK_INTERRUPTIBLE);
- for (i = 0; i < count; i++) {
- obj_priv = to_intel_bo(object_list[i]);
- if (atomic_read(&obj_priv->pending_flip) > 0)
- break;
- }
- if (i == count)
- break;
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
- if (!signal_pending(current)) {
- mutex_unlock(&dev->struct_mutex);
- schedule();
- mutex_lock(&dev->struct_mutex);
- continue;
- }
- ret = -ERESTARTSYS;
- break;
+ if (!access_ok(VERIFY_READ, ptr, length))
+ return -EFAULT;
+
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (fault_in_pages_readable(ptr, length))
+ return -EFAULT;
}
- finish_wait(&dev_priv->pending_flip_queue, &wait);
- return ret;
+ return 0;
}
-
-int
+static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv,
+ struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec_list)
{
@@ -3555,26 +3607,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_relocation_entry *relocs = NULL;
- int ret = 0, ret2, i, pinned = 0;
+ struct drm_i915_gem_request *request = NULL;
+ int ret, i, flips;
uint64_t exec_offset;
- uint32_t seqno, flush_domains, reloc_index;
- int pin_tries, flips;
struct intel_ring_buffer *ring = NULL;
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
+
+ ret = validate_exec_list(exec_list, args->buffer_count);
+ if (ret)
+ return ret;
+
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
- if (args->flags & I915_EXEC_BSD) {
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->render_ring;
+ break;
+ case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with wrong flag\n");
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->bsd_ring;
- } else {
- ring = &dev_priv->render_ring;
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->blt_ring;
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
}
if (args->buffer_count < 1) {
@@ -3609,20 +3682,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
- ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
- &relocs);
- if (ret != 0)
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL) {
+ ret = -ENOMEM;
goto pre_mutex_err;
+ }
- mutex_lock(&dev->struct_mutex);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EIO;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
goto pre_mutex_err;
- }
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
@@ -3631,9 +3699,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- flips = 0;
for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ object_list[i] = drm_gem_object_lookup(dev, file,
exec_list[i].handle);
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
@@ -3654,75 +3721,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
obj_priv->in_execbuffer = true;
- flips += atomic_read(&obj_priv->pending_flip);
}
- if (flips > 0) {
- ret = i915_gem_wait_for_pending_flip(dev, object_list,
- args->buffer_count);
- if (ret)
- goto err;
- }
-
- /* Pin and relocate */
- for (pin_tries = 0; ; pin_tries++) {
- ret = 0;
- reloc_index = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i],
- &relocs[reloc_index]);
- if (ret)
- break;
- pinned = i + 1;
- reloc_index += exec_list[i].relocation_count;
- }
- /* success */
- if (ret == 0)
- break;
-
- /* error other than GTT full, or we've already tried again */
- if (ret != -ENOSPC || pin_tries >= 1) {
- if (ret != -ERESTARTSYS) {
- unsigned long long total_size = 0;
- int num_fences = 0;
- for (i = 0; i < args->buffer_count; i++) {
- obj_priv = to_intel_bo(object_list[i]);
-
- total_size += object_list[i]->size;
- num_fences +=
- exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj_priv->tiling_mode != I915_TILING_NONE;
- }
- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
- pinned+1, args->buffer_count,
- total_size, num_fences,
- ret);
- DRM_ERROR("%d objects [%d pinned], "
- "%d object bytes [%d pinned], "
- "%d/%d gtt bytes\n",
- atomic_read(&dev->object_count),
- atomic_read(&dev->pin_count),
- atomic_read(&dev->object_memory),
- atomic_read(&dev->pin_memory),
- atomic_read(&dev->gtt_memory),
- dev->gtt_total);
- }
- goto err;
- }
-
- /* unpin all of our buffers */
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
- pinned = 0;
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ ret = i915_gem_execbuffer_pin(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
+ if (ret)
+ goto err;
- /* evict everyone we can from the aperture */
- ret = i915_gem_evict_everything(dev);
- if (ret && ret != -ENOSPC)
+ /* The objects are in their final locations, apply the relocations. */
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
+ if (ret)
goto err;
}
@@ -3735,33 +3749,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
- /* Sanity check the batch buffer, prior to moving objects */
- exec_offset = exec_list[args->buffer_count - 1].offset;
- ret = i915_gem_check_execbuffer (args, exec_offset);
+ /* Sanity check the batch buffer */
+ exec_offset = to_intel_bo(batch_obj)->gtt_offset;
+ ret = i915_gem_check_execbuffer(args, exec_offset);
if (ret != 0) {
DRM_ERROR("execbuf with invalid offset/length\n");
goto err;
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
/* Zero the global flush/invalidate flags. These
* will be modified as new domains are computed
* for each object
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
- dev_priv->flush_rings = 0;
+ dev_priv->mm.flush_rings = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
/* Compute new gpu domains and update invalidate/flush */
- i915_gem_object_set_to_gpu_domain(obj);
+ i915_gem_object_set_to_gpu_domain(obj, ring);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
@@ -3769,38 +3779,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dev->invalidate_domains,
dev->flush_domains);
#endif
- i915_gem_flush(dev,
+ i915_gem_flush(dev, file,
dev->invalidate_domains,
- dev->flush_domains);
- if (dev_priv->flush_rings & FLUSH_RENDER_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->render_ring);
- if (dev_priv->flush_rings & FLUSH_BSD_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->bsd_ring);
+ dev->flush_domains,
+ dev_priv->mm.flush_rings);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain = obj->write_domain;
-
obj->write_domain = obj->pending_write_domain;
- if (obj->write_domain)
- list_move_tail(&obj_priv->gpu_write_list,
- &dev_priv->mm.gpu_write_list);
- else
- list_del_init(&obj_priv->gpu_write_list);
-
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
#if WATCH_COHERENCY
for (i = 0; i < args->buffer_count; i++) {
i915_gem_object_check_coherency(object_list[i],
@@ -3815,9 +3808,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
~0);
#endif
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+ flips = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ if (object_list[i]->write_domain)
+ flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
+ }
+ if (flips) {
+ int plane, flip_mask;
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring,
+ MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(dev, ring, MI_NOOP);
+ intel_ring_advance(dev, ring);
+ }
+ }
+
/* Exec the batchbuffer */
ret = ring->dispatch_gem_execbuffer(dev, ring, args,
- cliprects, exec_offset);
+ cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
@@ -3827,38 +3849,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires
*/
- flush_domains = i915_retire_commands(dev, ring);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ i915_retire_commands(dev, ring);
- /*
- * Get a seqno representing the execution of the current buffer,
- * which we can wait on. We would like to mitigate these interrupts,
- * likely by only creating seqnos occasionally (so that we have
- * *some* interrupts representing completion of buffers that we can
- * wait on when trying to clear up gtt space).
- */
- seqno = i915_add_request(dev, file_priv, flush_domains, ring);
- BUG_ON(seqno == 0);
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- obj_priv = to_intel_bo(obj);
- i915_gem_object_move_to_active(obj, seqno, ring);
-#if WATCH_LRU
- DRM_INFO("%s: move to exec list %p\n", __func__, obj);
-#endif
+ i915_gem_object_move_to_active(obj, ring);
+ if (obj->write_domain)
+ list_move_tail(&to_intel_bo(obj)->gpu_write_list,
+ &ring->gpu_write_list);
}
-#if WATCH_LRU
- i915_dump_lru(dev, __func__);
-#endif
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ i915_add_request(dev, file, request, ring);
+ request = NULL;
err:
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
-
for (i = 0; i < args->buffer_count; i++) {
if (object_list[i]) {
obj_priv = to_intel_bo(object_list[i]);
@@ -3870,22 +3875,9 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
- /* Copy the updated relocations out regardless of current error
- * state. Failure to update the relocs would mean that the next
- * time userland calls execbuf, it would do so with presumed offset
- * state that didn't match the actual object state.
- */
- ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
- relocs);
- if (ret2 != 0) {
- DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
-
- if (ret == 0)
- ret = ret2;
- }
-
drm_free_large(object_list);
kfree(cliprects);
+ kfree(request);
return ret;
}
@@ -3942,7 +3934,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
- if (!IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
@@ -4039,12 +4031,12 @@ int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
if (obj_priv->gtt_space != NULL) {
if (alignment == 0)
@@ -4072,14 +4064,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
* remove it from the inactive list
*/
if (obj_priv->pin_count == 1) {
- atomic_inc(&dev->pin_count);
- atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- list_del_init(&obj_priv->list);
+ i915_gem_info_add_pin(dev_priv, obj->size);
+ if (!obj_priv->active)
+ list_move_tail(&obj_priv->mm_list,
+ &dev_priv->mm.pinned_list);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -4090,7 +4081,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
obj_priv->pin_count--;
BUG_ON(obj_priv->pin_count < 0);
BUG_ON(obj_priv->gtt_space == NULL);
@@ -4100,14 +4091,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
* the inactive list
*/
if (obj_priv->pin_count == 0) {
- if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- list_move_tail(&obj_priv->list,
+ if (!obj_priv->active)
+ list_move_tail(&obj_priv->mm_list,
&dev_priv->mm.inactive_list);
- atomic_dec(&dev->pin_count);
- atomic_sub(obj->size, &dev->pin_memory);
+ i915_gem_info_remove_pin(dev_priv, obj->size);
}
- i915_verify_inactive(dev, __FILE__, __LINE__);
+ WARN_ON(i915_verify_lists(dev));
}
int
@@ -4119,41 +4108,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj_priv;
int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
obj_priv->user_pin_count++;
obj_priv->pin_filp = file_priv;
if (obj_priv->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ if (ret)
+ goto out;
}
/* XXX - flush the CPU caches for pinned objects
@@ -4161,10 +4145,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
*/
i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset;
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ return ret;
}
int
@@ -4174,24 +4159,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pin *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-
obj_priv = to_intel_bo(obj);
+
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
obj_priv->user_pin_count--;
if (obj_priv->user_pin_count == 0) {
@@ -4199,9 +4184,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unpin(obj);
}
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
int
@@ -4211,22 +4198,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
- args->handle);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-
- mutex_lock(&dev->struct_mutex);
+ obj_priv = to_intel_bo(obj);
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- obj_priv = to_intel_bo(obj);
args->busy = obj_priv->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
@@ -4234,10 +4223,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain) {
- i915_gem_flush(dev, 0, obj->write_domain);
- (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
- }
+ if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(dev, file_priv,
+ obj_priv->ring,
+ 0, obj->write_domain);
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
@@ -4250,8 +4239,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
int
@@ -4268,6 +4258,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_madvise *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ int ret;
switch (args->madv) {
case I915_MADV_DONTNEED:
@@ -4277,22 +4268,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
- args->handle);
- return -ENOENT;
+ ret = -ENOENT;
+ goto unlock;
}
-
- mutex_lock(&dev->struct_mutex);
obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (obj_priv->madv != __I915_MADV_PURGED)
@@ -4305,15 +4294,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
args->retained = obj_priv->madv != __I915_MADV_PURGED;
+out:
drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
-
- return 0;
+ return ret;
}
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -4325,18 +4316,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ i915_gem_info_add_obj(dev_priv, size);
+
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->agp_type = AGP_USER_MEMORY;
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
- trace_i915_gem_object_create(&obj->base);
-
return &obj->base;
}
@@ -4356,7 +4348,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
- list_move(&obj_priv->list,
+ list_move(&obj_priv->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
@@ -4365,6 +4357,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
i915_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
+ i915_gem_info_remove_obj(dev_priv, obj->size);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
@@ -4395,10 +4388,7 @@ i915_gem_idle(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.suspended ||
- (dev_priv->render_ring.gem_object == NULL) ||
- (HAS_BSD(dev) &&
- dev_priv->bsd_ring.gem_object == NULL)) {
+ if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4423,7 +4413,7 @@ i915_gem_idle(struct drm_device *dev)
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
+ del_timer_sync(&dev_priv->hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
@@ -4503,36 +4493,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- dev_priv->render_ring = render_ring;
-
- if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
- memset(dev_priv->render_ring.status_page.page_addr,
- 0, PAGE_SIZE);
- }
-
if (HAS_PIPE_CONTROL(dev)) {
ret = i915_gem_init_pipe_control(dev);
if (ret)
return ret;
}
- ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ ret = intel_init_render_ring_buffer(dev);
if (ret)
goto cleanup_pipe_control;
if (HAS_BSD(dev)) {
- dev_priv->bsd_ring = bsd_ring;
- ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ ret = intel_init_bsd_ring_buffer(dev);
if (ret)
goto cleanup_render_ring;
}
+ if (HAS_BLT(dev)) {
+ ret = intel_init_blt_ring_buffer(dev);
+ if (ret)
+ goto cleanup_bsd_ring;
+ }
+
dev_priv->next_seqno = 1;
return 0;
+cleanup_bsd_ring:
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
cleanup_render_ring:
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
cleanup_pipe_control:
@@ -4547,8 +4535,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
if (HAS_PIPE_CONTROL(dev))
i915_gem_cleanup_pipe_control(dev);
}
@@ -4577,15 +4565,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- spin_lock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
+ BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
+ BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
+ BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
+ BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4627,28 +4615,34 @@ i915_gem_lastclose(struct drm_device *dev)
DRM_ERROR("failed to idle hardware: %d\n", ret);
}
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
int i;
drm_i915_private_t *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->mm.active_list_lock);
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
- INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
- if (HAS_BSD(dev)) {
- INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
- INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
- }
+ init_ring_lists(&dev_priv->render_ring);
+ init_ring_lists(&dev_priv->bsd_ring);
+ init_ring_lists(&dev_priv->blt_ring);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ init_completion(&dev_priv->error_completion);
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
@@ -4667,21 +4661,30 @@ i915_gem_load(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
- if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ break;
+ case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ case 2:
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ break;
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4691,8 +4694,8 @@ i915_gem_load(struct drm_device *dev)
* Create a physically contiguous memory object for this object
* e.g. for cursor + overlay regs
*/
-int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size, int align)
+static int i915_gem_init_phys_object(struct drm_device *dev,
+ int id, int size, int align)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4724,7 +4727,7 @@ kfree_obj:
return ret;
}
-void i915_gem_free_phys_object(struct drm_device *dev, int id)
+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4772,11 +4775,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
+ char *dst = kmap_atomic(obj_priv->pages[i]);
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst, KM_USER0);
+ kunmap_atomic(dst);
}
drm_clflush_pages(obj_priv->pages, page_count);
drm_agp_chipset_flush(dev);
@@ -4833,11 +4836,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
+ char *src = kmap_atomic(obj_priv->pages[i]);
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(src);
}
i915_gem_object_put_pages(obj);
@@ -4869,18 +4872,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
return 0;
}
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
- mutex_lock(&dev->struct_mutex);
- while (!list_empty(&i915_file_priv->mm.request_list))
- list_del_init(i915_file_priv->mm.request_list.next);
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&file_priv->mm.lock);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ spin_unlock(&file_priv->mm.lock);
}
static int
@@ -4889,12 +4899,10 @@ i915_gpu_is_active(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int lists_empty;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list);
- if (HAS_BSD(dev))
- lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
- spin_unlock(&dev_priv->mm.active_list_lock);
+ list_empty(&dev_priv->render_ring.active_list) &&
+ list_empty(&dev_priv->bsd_ring.active_list) &&
+ list_empty(&dev_priv->blt_ring.active_list);
return !lists_empty;
}
@@ -4916,7 +4924,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
if (mutex_trylock(&dev->struct_mutex)) {
list_for_each_entry(obj_priv,
&dev_priv->mm.inactive_list,
- list)
+ mm_list)
cnt++;
mutex_unlock(&dev->struct_mutex);
}
@@ -4942,7 +4950,7 @@ rescan:
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
- list) {
+ mm_list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
@@ -4971,7 +4979,7 @@ rescan:
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
- list) {
+ mm_list) {
if (nr_to_scan > 0) {
i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 80f380b1d951..48644b840a8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -30,29 +30,112 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#if WATCH_INACTIVE
-void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
{
+ static int warned;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = &obj_priv->base;
- if (obj_priv->pin_count || obj_priv->active ||
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)))
- DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ if (warned)
+ return 0;
+
+ list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed render active %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid render active %p (a %d r %x)\n",
+ obj,
+ obj->active,
+ obj->base.read_domains);
+ err++;
+ } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
+ DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
+ obj,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed flushing %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
+ list_empty(&obj->gpu_write_list)){
+ DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
- obj_priv->pin_count, obj_priv->active,
- obj->write_domain, file, line);
+ obj->active,
+ obj->base.write_domain,
+ !list_empty(&obj->gpu_write_list));
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed gpu write %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
+ DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
+ obj,
+ obj->active,
+ obj->base.write_domain);
+ err++;
+ }
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed inactive %p\n", obj);
+ err++;
+ break;
+ } else if (obj->pin_count || obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
+ obj,
+ obj->pin_count, obj->active,
+ obj->base.write_domain);
+ err++;
+ }
}
+
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("freed pinned %p\n", obj);
+ err++;
+ break;
+ } else if (!obj->pin_count || obj->active ||
+ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+ DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
+ obj,
+ obj->pin_count, obj->active,
+ obj->base.write_domain);
+ err++;
+ }
+ }
+
+ return warned = err;
}
#endif /* WATCH_INACTIVE */
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+#if WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
}
#endif
-#if WATCH_LRU
-void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
-
- DRM_INFO("active list %s {\n", where);
- spin_lock(&dev_priv->mm.active_list_lock);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
- DRM_INFO("}\n");
- DRM_INFO("flushing list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("inactive %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
-}
-#endif
-
-
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5c428fa3e0b3..43a4013f53fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -31,49 +31,6 @@
#include "i915_drv.h"
#include "i915_drm.h"
-static struct drm_i915_gem_object *
-i915_gem_next_active_object(struct drm_device *dev,
- struct list_head **render_iter,
- struct list_head **bsd_iter)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
-
- if (*render_iter != &dev_priv->render_ring.active_list)
- render_obj = list_entry(*render_iter,
- struct drm_i915_gem_object,
- list);
-
- if (HAS_BSD(dev)) {
- if (*bsd_iter != &dev_priv->bsd_ring.active_list)
- bsd_obj = list_entry(*bsd_iter,
- struct drm_i915_gem_object,
- list);
-
- if (render_obj == NULL) {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
-
- if (bsd_obj == NULL) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-
- /* XXX can we handle seqno wrapping? */
- if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- } else {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
- } else {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-}
-
static bool
mark_free(struct drm_i915_gem_object *obj_priv,
struct list_head *unwind)
@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
return drm_mm_scan_add_block(obj_priv->gtt_space);
}
-#define i915_for_each_active_object(OBJ, R, B) \
- *(R) = dev_priv->render_ring.active_list.next; \
- *(B) = dev_priv->bsd_ring.active_list.next; \
- while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
-
int
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv;
- struct list_head *render_iter, *bsd_iter;
int ret = 0;
i915_gem_retire_requests(dev);
@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj_priv, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj_priv->base.write_domain || obj_priv->pin_count)
continue;
@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
}
/* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
if (obj_priv->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
goto found;
}
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
if (! obj_priv->base.write_domain || obj_priv->pin_count)
continue;
@@ -212,14 +163,11 @@ i915_gem_evict_everything(struct drm_device *dev)
int ret;
bool lists_empty;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
+ list_empty(&dev_priv->bsd_ring.active_list) &&
+ list_empty(&dev_priv->blt_ring.active_list));
if (lists_empty)
return -ENOSPC;
@@ -234,13 +182,11 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
- spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
+ list_empty(&dev_priv->bsd_ring.active_list) &&
+ list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!lists_empty);
return 0;
@@ -258,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
obj = &list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
- list)->base;
+ mm_list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 710eca70b323..af352de70be1 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+ if (IS_GEN5(dev) || IS_GEN6(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (!IS_I9XX(dev)) {
+ } else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I9XX(dev) ||
+ if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
/* i965 stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ } else {
if (stride > 8192)
return false;
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
/* 965+ just needs multiples of tile width */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I965G(dev)) {
- if (obj_priv->gtt_offset & (obj->size - 1))
+ if (INTEL_INFO(dev)->gen >= 4)
+ return true;
+
+ if (obj_priv->gtt_offset & (obj->size - 1))
+ return false;
+
+ if (IS_GEN3(dev)) {
+ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
+ } else {
+ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
return false;
- if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
}
return true;
@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
+
+ ret = i915_gem_check_is_wedged(dev);
+ if (ret)
+ return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
ret = i915_gem_object_unbind(obj);
else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj);
+ ret = i915_gem_object_put_fence_reg(obj, true);
else
i915_gem_release_mmap(obj);
@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static int
+static void
i915_gem_swizzle_page(struct page *page)
{
+ char temp[64];
char *vaddr;
int i;
- char temp[64];
vaddr = kmap(page);
- if (vaddr == NULL)
- return -ENOMEM;
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
}
kunmap(page);
-
- return 0;
}
void
@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj_priv->bit_17) != 0)) {
- int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
- if (ret != 0) {
- DRM_ERROR("Failed to swizzle page\n");
- return;
- }
+ i915_gem_swizzle_page(obj_priv->pages[i]);
set_page_dirty(obj_priv->pages[i]);
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 744225ebb4b2..729fd0c91d7b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
/* For display hotplug interrupt */
-void
+static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
else {
i915_enable_pipestat(dev_priv, 1,
PIPE_LEGACY_BLC_EVENT_ENABLE);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
@@ -191,12 +191,7 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
- if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
- return 1;
-
- return 0;
+ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
- u32 high1, high2, low, count;
-
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ u32 high1, high2, low;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return 0;
}
+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
- high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
- PIPE_FRAME_LOW_SHIFT);
- high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
+ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
- count = (high1 << 8) | low;
-
- return count;
+ high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ low >>= PIPE_FRAME_LOW_SHIFT;
+ return (high1 << 8) | low;
}
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
-
- if (mode_config->num_encoder) {
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->hot_plug)
- (*intel_encoder->hot_plug) (intel_encoder);
- }
- }
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->hot_plug)
+ encoder->hot_plug(encoder);
+
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
return;
}
-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+static void notify_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = ring->get_seqno(dev, ring);
+ ring->irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
+ wake_up_all(&ring->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+}
+
+static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+ if (IS_GEN6(dev))
+ bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
goto done;
+ if (HAS_PCH_CPT(dev))
+ hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+ else
+ hotplug_mask = SDE_HOTPLUG_MASK;
+
ret = IRQ_HANDLED;
if (dev->primary->master) {
@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_PIPE_NOTIFY) {
- u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
- render_ring->irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- }
- if (gt_iir & GT_BSD_USER_INTERRUPT)
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
-
+ if (gt_iir & GT_PIPE_NOTIFY)
+ notify_ring(dev, &dev_priv->render_ring);
+ if (gt_iir & bsd_usr_interrupt)
+ notify_ring(dev, &dev_priv->bsd_ring);
+ if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->blt_ring);
if (de_iir & DE_GSE)
- ironlake_opregion_gse_intr(dev);
+ intel_opregion_gse_intr(dev);
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
}
if (de_iir & DE_PLANEB_FLIP_DONE) {
intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
}
if (de_iir & DE_PIPEA_VBLANK)
@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
drm_handle_vblank(dev, 1);
/* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) &&
- (pch_iir & SDE_HOTPLUG_MASK)) {
+ if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- }
if (de_iir & DE_PCU_EVENT) {
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
- DRM_DEBUG_DRIVER("generating error event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
- if (IS_I965G(dev)) {
- DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i965_reset(dev, GDRST_RENDER)) {
- atomic_set(&dev_priv->mm.wedged, 0);
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
- }
- } else {
- DRM_DEBUG_DRIVER("reboot required\n");
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+ if (!i915_reset(dev, GRDOM_RENDER)) {
+ atomic_set(&dev_priv->mm.wedged, 0);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
+ complete_all(&dev_priv->error_completion);
}
}
+#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
@@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev,
local_irq_save(flags);
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset,
- KM_IRQ0);
+ reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s, KM_IRQ0);
+ io_mapping_unmap_atomic(s);
local_irq_restore(flags);
dst->pages[page] = d;
@@ -511,7 +510,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
if (IS_I830(dev) || IS_845G(dev))
cmd = MI_BATCH_BUFFER;
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
MI_BATCH_NON_SECURE_I965);
else
@@ -584,13 +583,16 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
- error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
+ DRM_DEBUG_DRIVER("generating error event\n");
+
+ error->seqno =
+ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
error->instpm = I915_READ(INSTPM);
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
error->ipeir = I915_READ(IPEIR);
error->ipehr = I915_READ(IPEHR);
error->instdone = I915_READ(INSTDONE);
@@ -612,9 +614,7 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[0] = NULL;
batchbuffer[1] = NULL;
count = 0;
- list_for_each_entry(obj_priv,
- &dev_priv->render_ring.active_list, list) {
-
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -631,7 +631,7 @@ static void i915_capture_error_state(struct drm_device *dev)
}
/* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -649,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
}
}
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
@@ -668,7 +668,7 @@ static void i915_capture_error_state(struct drm_device *dev)
}
/* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userpace.
+ * method to avoid being overwritten by userspace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
if (batchbuffer[1] != batchbuffer[0])
@@ -690,8 +690,7 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
- list_for_each_entry(obj_priv,
- &dev_priv->render_ring.active_list, list) {
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
@@ -744,6 +743,9 @@ void i915_destroy_error_state(struct drm_device *dev)
if (error)
i915_error_state_free(dev, error);
}
+#else
+#define i915_capture_error_state(x)
+#endif
static void i915_report_and_clear_eir(struct drm_device *dev)
{
@@ -785,7 +787,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
printk(KERN_ERR "page table error\n");
@@ -811,7 +813,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR "instruction error\n");
printk(KERN_ERR " INSTPM: 0x%08x\n",
I915_READ(INSTPM));
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
printk(KERN_ERR " IPEIR: 0x%08x\n",
@@ -876,12 +878,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
+ INIT_COMPLETION(dev_priv->error_completion);
atomic_set(&dev_priv->mm.wedged, 1);
/*
* Wakeup waiting processes so they don't hang
*/
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
+ if (HAS_BSD(dev))
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ if (HAS_BLT(dev))
+ wake_up_all(&dev_priv->blt_ring.irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -912,7 +919,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj_priv = to_intel_bo(work->pending_flip_obj);
- if(IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
} else {
@@ -942,7 +949,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
atomic_inc(&dev_priv->irq_received);
@@ -951,7 +957,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
iir = I915_READ(IIR);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
else
vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
@@ -1019,18 +1025,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
READ_BREADCRUMB(dev_priv);
}
- if (iir & I915_USER_INTERRUPT) {
- u32 seqno =
- render_ring->get_gem_seqno(dev, render_ring);
- render_ring->irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
- }
-
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->render_ring);
if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ notify_ring(dev, &dev_priv->bsd_ring);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
@@ -1065,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
- opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -1207,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 pipeconf;
- pipeconf = I915_READ(pipeconf_reg);
- if (!(pipeconf & PIPEACONF_ENABLE))
+ if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@@ -1252,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PCH_SPLIT(dev))
- opregion_enable_asle(dev);
+ intel_opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -1311,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-struct drm_i915_gem_request *
+static struct drm_i915_gem_request *
i915_get_tail_request(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1331,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1;
- /* No reset support on this chip yet. */
- if (IS_GEN6(dev))
- return;
-
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
acthd = I915_READ(ACTHD);
instdone = I915_READ(INSTDONE);
instdone1 = 0;
@@ -1347,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data)
/* If all work is done then ACTHD clearly hasn't advanced. */
if (list_empty(&dev_priv->render_ring.request_list) ||
- i915_seqno_passed(i915_get_gem_seqno(dev,
- &dev_priv->render_ring),
- i915_get_tail_request(dev)->seqno)) {
+ i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
+ i915_get_tail_request(dev)->seqno)) {
bool missed_wakeup = false;
dev_priv->hangcheck_count = 0;
@@ -1357,13 +1347,19 @@ void i915_hangcheck_elapsed(unsigned long data)
/* Issue a wake-up to catch stuck h/w. */
if (dev_priv->render_ring.waiting_gem_seqno &&
waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->render_ring.irq_queue);
missed_wakeup = true;
}
if (dev_priv->bsd_ring.waiting_gem_seqno &&
waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+ if (dev_priv->blt_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
+ wake_up_all(&dev_priv->blt_ring.irq_queue);
missed_wakeup = true;
}
@@ -1377,6 +1373,21 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+
+ if (!IS_GEN2(dev)) {
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ u32 tmp = I915_READ(PRB0_CTL);
+ if (tmp & RING_WAIT) {
+ I915_WRITE(PRB0_CTL, tmp);
+ POSTING_READ(PRB0_CTL);
+ goto out;
+ }
+ }
+
i915_handle_error(dev, true);
return;
}
@@ -1388,8 +1399,10 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 = instdone1;
}
+out:
/* Reset timer case chip hangs without another request being added */
- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
/* drm_dma.h hooks
@@ -1424,8 +1437,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
- u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ u32 hotplug_mask;
dev_priv->irq_mask_reg = ~display_mask;
dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
@@ -1436,20 +1448,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* Gen6 only needs render pipe_control now */
- if (IS_GEN6(dev))
- render_mask = GT_PIPE_NOTIFY;
+ if (IS_GEN6(dev)) {
+ render_mask =
+ GT_PIPE_NOTIFY |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ }
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev)) {
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
+ I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+ I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
+ }
+
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
+ if (HAS_PCH_CPT(dev)) {
+ hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+ } else {
+ hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ }
+
dev_priv->pch_irq_mask_reg = ~hotplug_mask;
dev_priv->pch_irq_enable_reg = hotplug_mask;
@@ -1506,9 +1533,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 error_mask;
DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
-
if (HAS_BSD(dev))
DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+ if (HAS_BLT(dev))
+ DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
@@ -1578,7 +1606,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
- opregion_enable_asle(dev);
+ intel_opregion_enable_asle(dev);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f5e15577e89..25ed911a3112 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,52 +25,16 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define INTEL_GMCH_ENABLED 0x4
-#define INTEL_GMCH_MEM_MASK 0x1
-#define INTEL_GMCH_MEM_64M 0x1
-#define INTEL_GMCH_MEM_128M 0
-
-#define INTEL_GMCH_GMS_MASK (0xf << 4)
-#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
/* PCI config space */
@@ -106,10 +70,13 @@
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
-#define GDRST 0xc0
-#define GDRST_FULL (0<<2)
-#define GDRST_RENDER (1<<2)
-#define GDRST_MEDIA (3<<2)
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define GRDOM_FULL (0<<2)
+#define GRDOM_RENDER (1<<2)
+#define GRDOM_MEDIA (3<<2)
/* VGA stuff */
@@ -192,11 +159,11 @@
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-
/*
* 3D instructions used by the kernel
*/
@@ -249,6 +216,16 @@
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830 0x6070
+#define DEBUG_RESET_FULL (1<<7)
+#define DEBUG_RESET_RENDER (1<<8)
+#define DEBUG_RESET_DISPLAY (1<<9)
+
+
/*
* Fence registers
*/
@@ -283,6 +260,17 @@
#define PRB0_HEAD 0x02034
#define PRB0_START 0x02038
#define PRB0_CTL 0x0203c
+#define RENDER_RING_BASE 0x02000
+#define BSD_RING_BASE 0x04000
+#define GEN6_BSD_RING_BASE 0x12000
+#define BLT_RING_BASE 0x22000
+#define RING_TAIL(base) ((base)+0x30)
+#define RING_HEAD(base) ((base)+0x34)
+#define RING_START(base) ((base)+0x38)
+#define RING_CTL(base) ((base)+0x3c)
+#define RING_HWS_PGA(base) ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define RING_ACTHD(base) ((base)+0x74)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -295,6 +283,8 @@
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
+#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
@@ -306,7 +296,6 @@
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
-#define HWS_PGA_GEN6 0x04080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -464,17 +453,17 @@
#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
-/*
- * BSD (bit stream decoder instruction and interrupt control register defines
- * (G4X and Ironlake only)
- */
-#define BSD_RING_TAIL 0x04030
-#define BSD_RING_HEAD 0x04034
-#define BSD_RING_START 0x04038
-#define BSD_RING_CTL 0x0403c
-#define BSD_RING_ACTHD 0x04074
-#define BSD_HWS_PGA 0x04080
+#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
+#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+
+#define GEN6_BSD_IMR 0x120a8
+#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
+
+#define GEN6_BSD_RNCID 0x12198
/*
* Framebuffer compression (915+ only)
@@ -579,12 +568,51 @@
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-#define GMBUS0 0x5100
-#define GMBUS1 0x5104
-#define GMBUS2 0x5108
-#define GMBUS3 0x510c
-#define GMBUS4 0x5110
-#define GMBUS5 0x5120
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
/*
* Clock control & power management
@@ -603,6 +631,7 @@
#define VGA1_PD_P1_MASK (0x1f << 8)
#define DPLL_A 0x06014
#define DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -633,31 +662,6 @@
#define LVDS 0x61180
#define LVDS_ON (1<<31)
-#define ADPA 0x61100
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
-
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
/* Scratch pad debug 0 reg:
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
@@ -736,10 +740,13 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
#define FPA0 0x06040
#define FPA1 0x06044
#define FPB0 0x06048
#define FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
+#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@@ -760,6 +767,7 @@
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE 0x6104
+#define DSTATE_GFX_RESET_I830 (1<<6)
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
@@ -926,6 +934,8 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
+#define TSC1 0x11001
+#define TSE (1<<0)
#define TR1 0x11006
#define TSFS 0x11020
#define TSFS_SLOPE_MASK 0x0000ff00
@@ -1070,6 +1080,8 @@
#define MEMSTAT_SRC_CTL_STDBY 3
#define RCPREVBSYTUPAVG 0x113b8
#define RCPREVBSYTDNAVG 0x113bc
+#define PMMISC 0x11214
+#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
#define SDEW 0x1124c
#define CSIEW0 0x11250
#define CSIEW1 0x11254
@@ -1150,6 +1162,15 @@
#define PIPEBSRC 0x6101c
#define BCLRPAT_B 0x61020
+#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
+#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+
/* VGA port control */
#define ADPA 0x61100
#define ADPA_DAC_ENABLE (1<<31)
@@ -1173,6 +1194,7 @@
#define ADPA_DPMS_STANDBY (2<<10)
#define ADPA_DPMS_OFF (3<<10)
+
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN 0x61110
#define HDMIB_HOTPLUG_INT_EN (1 << 29)
@@ -1331,6 +1353,22 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA 0x61178
+#define VIDEO_DIP_CTL 0x61170
+#define VIDEO_DIP_ENABLE (1 << 31)
+#define VIDEO_DIP_PORT_B (1 << 29)
+#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+
/* Panel power sequencing */
#define PP_STATUS 0x61200
#define PP_ON (1 << 31)
@@ -1346,6 +1384,9 @@
#define PP_SEQUENCE_ON (1 << 28)
#define PP_SEQUENCE_OFF (2 << 28)
#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
@@ -1481,6 +1522,7 @@
# define TV_TEST_MODE_MASK (7 << 0)
#define TV_DAC 0x68004
+# define TV_DAC_SAVE 0x00ffff00
/**
* Reports that DAC state change logic has reported change (RO).
*
@@ -2075,29 +2117,35 @@
/* Display & cursor control */
-/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
-#define PIPE_DITHER_TYPE_MASK (3 << 2)
-#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
-#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
-#define PIPEACONF_ENABLE (1<<31)
-#define PIPEACONF_DISABLE 0
-#define PIPEACONF_DOUBLE_WIDE (1<<30)
+#define PIPECONF_ENABLE (1<<31)
+#define PIPECONF_DISABLE 0
+#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPEACONF_SINGLE_WIDE 0
-#define PIPEACONF_PIPE_UNLOCKED 0
-#define PIPEACONF_PIPE_LOCKED (1<<25)
-#define PIPEACONF_PALETTE 0
-#define PIPEACONF_GAMMA (1<<24)
+#define PIPECONF_SINGLE_WIDE 0
+#define PIPECONF_PIPE_UNLOCKED 0
+#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PALETTE 0
+#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_BPP_MASK (0x000000e0)
+#define PIPECONF_BPP_8 (0<<5)
+#define PIPECONF_BPP_10 (1<<5)
+#define PIPECONF_BPP_6 (2<<5)
+#define PIPECONF_BPP_12 (3<<5)
+#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define PIPECONF_DITHER_TYPE_SP (0<<2)
+#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2128,12 +2176,15 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
+#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
#define PIPE_8BPC (0 << 5)
#define PIPE_10BPC (1 << 5)
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
+#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2206,8 +2257,8 @@
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
-#define WM1_LP_FBC_LP1_MASK (0xf<<20)
-#define WM1_LP_FBC_LP1_SHIFT 20
+#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_FBC_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
@@ -2333,6 +2384,14 @@
#define DSPASURF 0x7019C /* 965+ only */
#define DSPATILEOFF 0x701A4 /* 965+ only */
+#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@@ -2397,6 +2456,7 @@
#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_FB_CLOCK_MASK 0xff
#define FDI_PLL_BIOS_1 0x46004
#define FDI_PLL_BIOS_2 0x46008
#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
@@ -2420,46 +2480,47 @@
#define PIPEA_DATA_M1 0x60030
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
-#define PIPEA_DATA_M1_OFFSET 0
+#define PIPE_DATA_M1_OFFSET 0
#define PIPEA_DATA_N1 0x60034
-#define PIPEA_DATA_N1_OFFSET 0
+#define PIPE_DATA_N1_OFFSET 0
#define PIPEA_DATA_M2 0x60038
-#define PIPEA_DATA_M2_OFFSET 0
+#define PIPE_DATA_M2_OFFSET 0
#define PIPEA_DATA_N2 0x6003c
-#define PIPEA_DATA_N2_OFFSET 0
+#define PIPE_DATA_N2_OFFSET 0
#define PIPEA_LINK_M1 0x60040
-#define PIPEA_LINK_M1_OFFSET 0
+#define PIPE_LINK_M1_OFFSET 0
#define PIPEA_LINK_N1 0x60044
-#define PIPEA_LINK_N1_OFFSET 0
+#define PIPE_LINK_N1_OFFSET 0
#define PIPEA_LINK_M2 0x60048
-#define PIPEA_LINK_M2_OFFSET 0
+#define PIPE_LINK_M2_OFFSET 0
#define PIPEA_LINK_N2 0x6004c
-#define PIPEA_LINK_N2_OFFSET 0
+#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_M1_OFFSET 0
#define PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_N1_OFFSET 0
#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_M2_OFFSET 0
#define PIPEB_DATA_N2 0x6103c
-#define PIPEB_DATA_N2_OFFSET 0
#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_M1_OFFSET 0
#define PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_N1_OFFSET 0
#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_M2_OFFSET 0
#define PIPEB_LINK_N2 0x6104c
-#define PIPEB_LINK_N2_OFFSET 0
+
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
/* CPU panel fitter */
#define PFA_CTL_1 0x68080
@@ -2516,7 +2577,8 @@
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
#define GT_BSD_USER_INTERRUPT (1 << 5)
-
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BLT_USER_INTERRUPT (1 << 22)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -2551,6 +2613,10 @@
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2600,11 +2666,14 @@
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define PCH_FPA0 0xc6040
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -2690,6 +2759,13 @@
#define TRANS_VBLANK_B 0xe1010
#define TRANS_VSYNC_B 0xe1014
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
+
#define TRANSB_DATA_M1 0xe1030
#define TRANSB_DATA_N1 0xe1034
#define TRANSB_DATA_M2 0xe1038
@@ -2701,6 +2777,7 @@
#define TRANSACONF 0xf0008
#define TRANSBCONF 0xf1008
+#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@@ -2721,10 +2798,15 @@
#define FDI_RXA_CHICKEN 0xc200c
#define FDI_RXB_CHICKEN 0xc2010
#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D 0xc2020
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
/* CPU: FDI_TX */
#define FDI_TXA_CTL 0x60100
#define FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -2766,8 +2848,8 @@
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define FDI_RXA_CTL 0xf000c
#define FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
-#define FDI_RX_DISABLE (0<<31)
/* train, dp width same as FDI_TX */
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_8BPC (0<<16)
@@ -2782,8 +2864,7 @@
#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_SEL_RAWCLK (0<<4)
-#define FDI_SEL_PCDCLK (1<<4)
+#define FDI_PCDCLK (1<<4)
/* CPT */
#define FDI_AUTO_TRAINING (1<<10)
#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
@@ -2798,6 +2879,9 @@
#define FDI_RXA_TUSIZE2 0xf0038
#define FDI_RXB_TUSIZE1 0xf1030
#define FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -2816,6 +2900,8 @@
#define FDI_RXA_IMR 0xf0018
#define FDI_RXB_IIR 0xf1014
#define FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
@@ -2935,6 +3021,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 31f08581e93a..989c19d2d959 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
- if (IS_I965GM(dev) || IS_GM45(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
POSTING_READ(DPLL_A_MD);
}
@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
POSTING_READ(DPLL_B_MD);
}
@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
dev_priv->saveCURBPOS = I915_READ(CURBPOS);
dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
@@ -878,9 +878,7 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
- /* I2C state */
- intel_i2c_reset_gmbus(dev);
+ intel_i2c_reset(dev);
return 0;
}
-
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
new file mode 100644
index 000000000000..65c88f9ba12c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -0,0 +1,286 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/vga_switcheroo.h>
+#include <acpi/acpi_drivers.h>
+
+#include "drmP.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+
+#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+ acpi_handle dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+ 0xd3, 0x73, 0xd8, 0x7e,
+ 0xd0, 0xc2,
+ 0x4f, 0x4e,
+ 0xa8, 0x54,
+ 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static int intel_dsm(acpi_handle handle, int func, int arg)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object params[4];
+ union acpi_object *obj;
+ u32 result;
+ int ret = 0;
+
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(intel_dsm_guid);
+ params[0].buffer.pointer = (char *)intel_dsm_guid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = INTEL_DSM_REVISION_ID;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = arg;
+
+ ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+ return ret;
+ }
+
+ obj = (union acpi_object *)output.pointer;
+
+ result = 0;
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ result = obj->integer.value;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length == 4) {
+ result =(obj->buffer.pointer[0] |
+ (obj->buffer.pointer[1] << 8) |
+ (obj->buffer.pointer[2] << 16) |
+ (obj->buffer.pointer[3] << 24));
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (result == 0x80000002)
+ ret = -ENODEV;
+
+ kfree(output.pointer);
+ return ret;
+}
+
+static char *intel_dsm_port_name(u8 id)
+{
+ switch (id) {
+ case 0:
+ return "Reserved";
+ case 1:
+ return "Analog VGA";
+ case 2:
+ return "LVDS";
+ case 3:
+ return "Reserved";
+ case 4:
+ return "HDMI/DVI_B";
+ case 5:
+ return "HDMI/DVI_C";
+ case 6:
+ return "HDMI/DVI_D";
+ case 7:
+ return "DisplayPort_A";
+ case 8:
+ return "DisplayPort_B";
+ case 9:
+ return "DisplayPort_C";
+ case 0xa:
+ return "DisplayPort_D";
+ case 0xb:
+ case 0xc:
+ case 0xd:
+ return "Reserved";
+ case 0xe:
+ return "WiDi";
+ default:
+ return "bad type";
+ }
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+ switch (type) {
+ case 0:
+ return "unknown";
+ case 1:
+ return "No MUX, iGPU only";
+ case 2:
+ return "No MUX, dGPU only";
+ case 3:
+ return "MUXed between iGPU and dGPU";
+ default:
+ return "bad type";
+ }
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object params[4];
+ union acpi_object *pkg;
+ int i, ret;
+
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(intel_dsm_guid);
+ params[0].buffer.pointer = (char *)intel_dsm_guid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = INTEL_DSM_REVISION_ID;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = 0;
+
+ ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
+ &output);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+ goto out;
+ }
+
+ pkg = (union acpi_object *)output.pointer;
+
+ if (pkg->type == ACPI_TYPE_PACKAGE) {
+ union acpi_object *connector_count = &pkg->package.elements[0];
+ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ (unsigned long long)connector_count->integer.value);
+ for (i = 1; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+ union acpi_object *connector_id =
+ &obj->package.elements[0];
+ union acpi_object *info = &obj->package.elements[1];
+ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ (unsigned long long)connector_id->integer.value);
+ DRM_DEBUG_DRIVER(" port id: %s\n",
+ intel_dsm_port_name(info->buffer.pointer[0]));
+ DRM_DEBUG_DRIVER(" display mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[1]));
+ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[2]));
+ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[3]));
+ }
+ } else {
+ DRM_ERROR("MUX INFO call failed\n");
+ }
+
+out:
+ kfree(output.pointer);
+}
+
+static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
+{
+ return 0;
+}
+
+static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ return 0;
+}
+
+static int intel_dsm_init(void)
+{
+ return 0;
+}
+
+static int intel_dsm_get_client_id(struct pci_dev *pdev)
+{
+ if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler intel_dsm_handler = {
+ .switchto = intel_dsm_switchto,
+ .power_state = intel_dsm_power_state,
+ .init = intel_dsm_init,
+ .get_client_id = intel_dsm_get_client_id,
+};
+
+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, intel_handle;
+ acpi_status status;
+ int ret;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
+ if (ACPI_FAILURE(status)) {
+ DRM_DEBUG_KMS("no _DSM method for intel device\n");
+ return false;
+ }
+
+ ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+ if (ret < 0) {
+ DRM_ERROR("failed to get supported _DSM functions\n");
+ return false;
+ }
+
+ intel_dsm_priv.dhandle = dhandle;
+
+ intel_dsm_platform_mux_info();
+ return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ bool has_dsm = false;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+ has_dsm |= intel_dsm_pci_probe(pdev);
+ }
+
+ if (vga_count == 2 && has_dsm) {
+ acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+ if (!intel_dsm_detect())
+ return;
+
+ vga_switcheroo_register_handler(&intel_dsm_handler);
+}
+
+void intel_unregister_dsm_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96f75d7f6633..b0b1200ed650 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*
*/
+#include <drm/drm_dp_helper.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
int i, temp_downclock;
struct drm_display_mode *temp_mode;
- /* Defaults if we can't find VBT info */
- dev_priv->lvds_dither = 0;
- dev_priv->lvds_vbt = 0;
-
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
+
panel_type = lvds_options->panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
((unsigned char *)entry + dvo_timing_offset);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
fill_detail_timing_data(panel_fixed_mode, dvo_timing);
@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
- dev_priv->sdvo_lvds_vbt_mode = NULL;
-
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
- /* Set sensible defaults in case we can't find the general block */
- dev_priv->int_tv_support = 1;
- dev_priv->int_crt_support = 1;
-
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
dev_priv->int_tv_support = general->int_tv_support;
@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->lvds_use_ssc = general->enable_ssc;
if (dev_priv->lvds_use_ssc) {
- if (IS_I85X(dev_priv->dev))
+ if (IS_I85X(dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
+ else if (IS_GEN5(dev) || IS_GEN6(dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_general_definitions *general;
- const int crt_bus_map_table[] = {
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- GPIOF,
- };
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if ((bus_pin >= 1) && (bus_pin <= 6)) {
- dev_priv->crt_ddc_bus =
- crt_bus_map_table[bus_pin-1];
- }
+ if (bus_pin >= 1 && bus_pin <= 6)
+ dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->i2c_speed = p_child->i2c_speed;
p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin,
+ p_mapping->i2c_speed);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver)
return;
- if (driver && SUPPORTS_EDP(dev) &&
- driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
- dev_priv->edp_support = 1;
- } else {
- dev_priv->edp_support = 0;
- }
+ if (SUPPORTS_EDP(dev) &&
+ driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->edp.support = 1;
- if (driver && driver->dual_frequency)
+ if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
}
@@ -424,27 +414,78 @@ static void
parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume 18bpp panel color "
- "depth.\n");
- dev_priv->edp_bpp = 18;
+ "supported, assume %dbpp panel color "
+ "depth.\n",
+ dev_priv->edp.bpp);
}
return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
- dev_priv->edp_bpp = 18;
+ dev_priv->edp.bpp = 18;
break;
case EDP_24BPP:
- dev_priv->edp_bpp = 24;
+ dev_priv->edp.bpp = 24;
break;
case EDP_30BPP:
- dev_priv->edp_bpp = 30;
+ dev_priv->edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->edp.pps = *edp_pps;
+
+ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->edp.lanes = 1;
+ break;
+ case 1:
+ dev_priv->edp.lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->edp.lanes = 4;
+ break;
+ }
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
break;
}
}
@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
}
return;
}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+ dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+ /* LFP panel data */
+ dev_priv->lvds_dither = 1;
+ dev_priv->lvds_vbt = 0;
+
+ /* SDVO panel data */
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ /* general features */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+ dev_priv->lvds_use_ssc = 0;
+
+ /* eDP data */
+ dev_priv->edp.bpp = 18;
+}
+
/**
- * intel_init_bios - initialize VBIOS settings & find VBT
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
*
- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
- * feed an updated VBT back through that, compared to what we'll fetch using
- * this method of groping around in the BIOS data.
- *
* Returns 0 on success, nonzero on failure.
*/
bool
-intel_init_bios(struct drm_device *dev)
+intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
- struct vbt_header *vbt = NULL;
- struct bdb_header *bdb;
- u8 __iomem *bios;
- size_t size;
- int i;
-
- bios = pci_map_rom(pdev, &size);
- if (!bios)
- return -1;
-
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
- break;
- }
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
+
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
}
- if (!vbt) {
- DRM_ERROR("VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
- return -1;
- }
+ if (bdb == NULL) {
+ struct vbt_header *vbt = NULL;
+ size_t size;
+ int i;
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ DRM_ERROR("VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+ }
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
- pci_unmap_rom(pdev, bios);
+ if (bios)
+ pci_unmap_rom(pdev, bios);
return 0;
}
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Set the Panel Power On/Off timings if uninitialized. */
+ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+ /* Set T2 to 40ms and T5 to 200ms */
+ I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+ /* Set T3 to 35ms and Tx to 200ms */
+ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 4c18514f6f80..5f8e4edcbbb9 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -197,7 +197,8 @@ struct bdb_general_features {
struct child_device_config {
u16 handle;
u16 device_type;
- u8 device_id[10]; /* See DEVICE_TYPE_* above */
+ u8 i2c_speed;
+ u8 rsvd[9];
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
@@ -466,7 +467,8 @@ struct bdb_edp {
struct edp_link_params link_params[16];
} __attribute__ ((packed));
-bool intel_init_bios(struct drm_device *dev);
+void intel_setup_bios(struct drm_device *dev);
+bool intel_parse_bios(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 197d4f32585a..c55c77043357 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
max_clock = 350000;
else
max_clock = 400000;
@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(PCH_ADPA, adpa);
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000, 1))
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(PCH_ADPA, temp);
+ /* Make sure hotplug is enabled */
+ I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
(void)I915_READ(PCH_ADPA);
}
@@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* wait for FORCE_DETECT to go off */
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000, 1))
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
@@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
+static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
+{
+ u8 buf;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0xA0,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf,
+ },
+ };
+ /* DDC monitor detect: Does it ACK a write to 0xA0? */
+ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
+}
+
static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
/* CRT should always be at 0, but check anyway */
if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
return false;
- return intel_ddc_probe(intel_encoder);
+ if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
+ return true;
+ }
+
+ if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ }
+
+ return false;
}
static enum drm_connector_status
intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
uint8_t st00;
enum drm_connector_status status;
+ DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
if (pipe == 0) {
bclrpat_reg = BCLRPAT_A;
vtotal_reg = VTOTAL_A;
@@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev, pipe);
@@ -404,34 +434,37 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- if (intel_crt_detect_hotplug(connector))
+ if (I915_HAS_HOTPLUG(dev)) {
+ if (intel_crt_detect_hotplug(connector)) {
+ DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- else
+ } else
return connector_status_disconnected;
}
- if (intel_crt_detect_ddc(encoder))
+ if (intel_crt_detect_ddc(&encoder->base))
return connector_status_connected;
if (!force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (encoder->crtc && encoder->crtc->enabled) {
- status = intel_crt_load_detect(encoder->crtc, intel_encoder);
+ if (encoder->base.crtc && encoder->base.crtc->enabled) {
+ status = intel_crt_load_detect(encoder->base.crtc, encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ crtc = intel_get_load_detect_pipe(encoder, connector,
NULL, &dpms_mode);
if (crtc) {
- status = intel_crt_load_detect(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder,
+ if (intel_crt_detect_ddc(&encoder->base))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crtc, encoder);
+ intel_release_load_detect_pipe(encoder,
connector, dpms_mode);
} else
status = connector_status_unknown;
@@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
- int ret;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
-
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ ret = intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
if (ret || !IS_G4X(dev))
- goto end;
+ return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
-
- if (!ddc_bus) {
- dev_printk(KERN_ERR, &connector->dev->pdev->dev,
- "DDC bus registration failed for CRTDDC_D.\n");
- goto end;
- }
- /* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(connector, ddc_bus);
- intel_i2c_destroy(ddc_bus);
-
-end:
- return ret;
-
+ return intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 i2c_reg;
intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
if (!intel_encoder)
@@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
-
- /* Set up the DDC bus. */
- if (HAS_PCH_SPLIT(dev))
- i2c_reg = PCH_GPIOA;
- else {
- i2c_reg = GPIOA;
- /* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != 0)
- i2c_reg = dev_priv->crt_ddc_bus;
- }
- intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
- if (!intel_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- return;
- }
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_ANALOG;
intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev)
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 979228594599..990f065374b2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -43,8 +43,8 @@
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
-static void intel_crtc_update_cursor(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
typedef struct {
/* given values */
@@ -342,6 +342,16 @@ static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+ if (IS_GEN5(dev)) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+ } else
+ return 27;
+}
+
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
@@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
- } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i9xx_lvds;
- else
- limit = &intel_limits_i9xx_sdvo;
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (!IS_GEN2(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i9xx_lvds;
+ else
+ limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
@@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
/**
* Returns whether any output on the specified pipe is of the specified type
*/
-bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *l_entry;
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+ if (encoder->base.crtc == crtc && encoder->type == type)
+ return true;
- list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
- if (l_entry && l_entry->crtc == crtc) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
- if (intel_encoder->type == type)
- return true;
- }
- }
- return false;
+ return false;
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
- /* return directly when it is eDP */
- if (HAS_eDP)
- return true;
-
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
@@ -955,26 +958,26 @@ static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
- intel_clock_t clock;
- if (target < 200000) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 2;
- clock.m1 = 23;
- clock.m2 = 8;
- } else {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 1;
- clock.m1 = 14;
- clock.m2 = 2;
- }
- clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
- clock.p = (clock.p1 * clock.p2);
- clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
- clock.vco = 0;
- memcpy(best_clock, &clock, sizeof(intel_clock_t));
- return true;
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 2;
+ clock.m1 = 23;
+ clock.m2 = 8;
+ } else {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 1;
+ clock.m1 = 14;
+ clock.m2 = 2;
+ }
+ clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+ clock.p = (clock.p1 * clock.p2);
+ clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+ clock.vco = 0;
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
}
/**
@@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
/* Wait for vblank interrupt bit to set */
- if (wait_for((I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS),
- 50, 0))
+ if (wait_for(I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS,
+ 50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
@@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
* Otherwise:
* wait for the display line value to settle (it usually
* ends up stopping at the start of the next frame).
- *
+ *
*/
-static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 4) {
- int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
+ int reg = PIPECONF(pipe);
/* Wait for the Pipe State to go off */
- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
- 100, 0))
+ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
u32 last_line;
- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+ int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
/* Wait for the display line to settle */
do {
- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+ last_line = I915_READ(reg) & DSL_LINEMASK;
mdelay(5);
- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+ } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
}
}
-/* Parameters have changed, update FBC info */
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
int plane, i;
u32 fbc_ctl, fbc_ctl2;
+ if (fb->pitch == dev_priv->cfb_pitch &&
+ obj_priv->fence_reg == dev_priv->cfb_fence &&
+ intel_crtc->plane == dev_priv->cfb_plane &&
+ I915_READ(FBC_CONTROL) & FBC_CTL_EN)
+ return;
+
+ i8xx_disable_fbc(dev);
+
dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitch < dev_priv->cfb_pitch)
@@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
- dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
}
void i8xx_disable_fbc(struct drm_device *dev)
@@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
- if (!I915_HAS_FBC(dev))
- return;
-
- if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
- return; /* Already off, just return */
-
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
@@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
- DPFC_CTL_PLANEB);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ POSTING_READ(DPFC_CONTROL);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
@@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev)
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
}
static bool g4x_fbc_enabled(struct drm_device *dev)
@@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
- DPFC_CTL_PLANEB;
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+ dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_offset == obj_priv->gtt_offset &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+ POSTING_READ(ILK_DPFC_CONTROL);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
+ dev_priv->cfb_offset = obj_priv->gtt_offset;
+ dev_priv->cfb_y = crtc->y;
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
- DPFC_CTL_EN);
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
@@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
}
static bool ironlake_fbc_enabled(struct drm_device *dev)
@@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev)
/**
* intel_update_fbc - enable/disable FBC as needed
- * @crtc: CRTC to point the compressor at
- * @mode: mode in use
+ * @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
@@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev)
*
* We need to enable/disable FBC on a global basis.
*/
-static void intel_update_fbc(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+static void intel_update_fbc(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv;
- struct drm_crtc *tmp_crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane;
- int crtcs_enabled = 0;
DRM_DEBUG_KMS("\n");
@@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (!I915_HAS_FBC(dev))
return;
- if (!crtc->fb)
- return;
-
- intel_fb = to_intel_framebuffer(fb);
- obj_priv = to_intel_bo(intel_fb->obj);
-
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
@@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled)
- crtcs_enabled++;
+ if (tmp_crtc->enabled) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
}
- DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
- if (crtcs_enabled > 1) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj_priv = to_intel_bo(intel_fb->obj);
+
if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
+ "compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
+ "disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
- if ((mode->hdisplay > 2048) ||
- (mode->vdisplay > 1536)) {
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
@@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (in_dbg_master())
goto out_disable;
- if (intel_fbc_enabled(dev)) {
- /* We can re-enable it in this case, but need to update pitch */
- if ((fb->pitch > dev_priv->cfb_pitch) ||
- (obj_priv->fence_reg != dev_priv->cfb_fence) ||
- (plane != dev_priv->cfb_plane))
- intel_disable_fbc(dev);
- }
-
- /* Now try to turn it back on if possible */
- if (!intel_fbc_enabled(dev))
- intel_enable_fbc(crtc, 500);
-
+ intel_enable_fbc(crtc, 500);
return;
out_disable:
@@ -1407,7 +1432,9 @@ out_disable:
}
int
-intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ bool pipelined)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
@@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
- else if (IS_I965G(dev))
+ else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
else
alignment = 64 * 1024;
@@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
}
ret = i915_gem_object_pin(obj, alignment);
- if (ret != 0)
+ if (ret)
return ret;
+ ret = i915_gem_object_set_to_display_plane(obj, pipelined);
+ if (ret)
+ goto err_unpin;
+
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
@@ -1445,20 +1476,22 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
*/
if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
+ ret = i915_gem_object_get_fence_reg(obj, false);
+ if (ret)
+ goto err_unpin;
}
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+ return ret;
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y)
+ int x, int y, enum mode_set_atomic state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
- int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
+ u32 reg;
switch (plane) {
case 0:
@@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
obj = intel_fb->obj;
obj_priv = to_intel_bo(obj);
- dspcntr = I915_READ(dspcntr_reg);
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
@@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
DRM_ERROR("Unknown color depth\n");
return -EINVAL;
}
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (obj_priv->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
@@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- I915_WRITE(dspcntr_reg, dspcntr);
+ I915_WRITE(reg, dspcntr);
Start = obj_priv->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
- I915_WRITE(dspstride, fb->pitch);
- if (IS_I965G(dev)) {
- I915_WRITE(dspsurf, Start);
- I915_WRITE(dsptileoff, (y << 16) | x);
- I915_WRITE(dspbase, Offset);
- } else {
- I915_WRITE(dspbase, Start + Offset);
- }
- POSTING_READ(dspbase);
-
- if (IS_I965G(dev) || plane == 0)
- intel_update_fbc(crtc, &crtc->mode);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ } else
+ I915_WRITE(DSPADDR(plane), Start + Offset);
+ POSTING_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- intel_increase_pllclock(crtc, true);
+ intel_update_fbc(dev);
+ intel_increase_pllclock(crtc);
return 0;
}
@@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
int ret;
/* no fb bound */
@@ -1566,45 +1587,42 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (plane) {
+ switch (intel_crtc->plane) {
case 0:
case 1:
break;
default:
- DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
- intel_fb = to_intel_framebuffer(crtc->fb);
- obj = intel_fb->obj;
- obj_priv = to_intel_bo(obj);
-
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(crtc->fb)->obj,
+ false);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = i915_gem_object_set_to_display_plane(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ if (old_fb) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&obj_priv->pending_flip) == 0);
}
- ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
+ LEAVE_ATOMIC_MODE_SET);
if (ret) {
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
- if (old_fb) {
- intel_fb = to_intel_framebuffer(old_fb);
- obj_priv = to_intel_bo(intel_fb->obj);
- i915_gem_object_unpin(intel_fb->obj);
- }
+ if (old_fb)
+ i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -1615,7 +1633,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (!master_priv->sarea_priv)
return 0;
- if (pipe) {
+ if (intel_crtc->pipe) {
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
} else {
@@ -1626,7 +1644,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1659,6 +1677,7 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
}
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
udelay(500);
}
@@ -1669,84 +1688,109 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
- u32 temp, tries = 0;
+ u32 reg, temp, tries;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
- temp = I915_READ(fdi_rx_imr_reg);
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
+ I915_WRITE(reg, temp);
+ I915_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
udelay(150);
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
+
+ reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(fdi_rx_iir_reg);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
DRM_DEBUG_KMS("FDI train 1 done.\n");
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
- udelay(150);
+ I915_WRITE(reg, temp);
- tries = 0;
+ POSTING_READ(reg);
+ udelay(150);
+ reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(fdi_rx_iir_reg);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done\n");
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ udelay(1000);
}
-static int snb_b_fdi_train_param [] = {
+static const int const snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -1760,24 +1804,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
- u32 temp, i;
+ u32 reg, temp, i;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
- temp = I915_READ(fdi_rx_imr_reg);
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1785,10 +1827,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1796,32 +1838,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(500);
- temp = I915_READ(fdi_rx_iir_reg);
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN6(dev)) {
@@ -1829,9 +1876,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
- temp = I915_READ(fdi_rx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -1839,535 +1887,593 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- I915_WRITE(fdi_rx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(fdi_tx_reg, temp);
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
udelay(500);
- temp = I915_READ(fdi_rx_iir_reg);
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_fdi_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
- int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
- int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
- int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
- int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
- int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
- int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
- u32 temp;
- u32 pipe_bpc;
-
- temp = I915_READ(pipeconf_reg);
- pipe_bpc = temp & PIPE_BPC_MASK;
+ u32 reg, temp;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ /* Write the TU size bits so error detection works */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0) {
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- }
- }
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~((0x7 << 19) | (0x7 << 16));
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
- if (!HAS_eDP) {
+ POSTING_READ(reg);
+ udelay(200);
- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
- temp = I915_READ(fdi_rx_reg);
- /*
- * make the BPC in FDI Rx be consistent with that in
- * pipeconf reg.
- */
- temp &= ~(0x7 << 16);
- temp |= (pipe_bpc << 11);
- temp &= ~(7 << 19);
- temp |= (intel_crtc->fdi_lanes - 1) << 19;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
- I915_READ(fdi_rx_reg);
- udelay(200);
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
- /* Switch from Rawclk to PCDclk */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
- I915_READ(fdi_rx_reg);
- udelay(200);
+ POSTING_READ(reg);
+ udelay(200);
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
- }
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- /* Enable panel fitting for LVDS */
- if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || HAS_eDP || intel_pch_has_edp(crtc))) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
- PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
- dev_priv->pch_pf_pos);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
- dev_priv->pch_pf_size);
- }
+ POSTING_READ(reg);
+ udelay(100);
+ }
+}
- /* Enable CPU pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- udelay(100);
- }
+static void intel_flush_display_plane(struct drm_device *dev,
+ int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg = DSPADDR(plane);
+ I915_WRITE(reg, I915_READ(reg));
+}
- /* configure and enable CPU plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+/*
+ * When we disable a pipe, we need to clear any pending scanline wait events
+ * to avoid hanging the ring, which we assume we are waiting on.
+ */
+static void intel_clear_scanline_wait(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
- if (!HAS_eDP) {
- /* For PCH output, training FDI link */
- if (IS_GEN6(dev))
- gen6_fdi_link_train(crtc);
- else
- ironlake_fdi_link_train(crtc);
+ if (IS_GEN2(dev))
+ /* Can't break the hang on i8xx */
+ return;
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
- udelay(200);
+ tmp = I915_READ(PRB0_CTL);
+ if (tmp & RING_WAIT) {
+ I915_WRITE(PRB0_CTL, tmp);
+ POSTING_READ(PRB0_CTL);
+ }
+}
- if (HAS_PCH_CPT(dev)) {
- /* Be sure PCH DPLL SEL is set */
- temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0 &&
- (temp & TRANSA_DPLL_ENABLE) == 0)
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- else if (trans_dpll_sel == 1 &&
- (temp & TRANSB_DPLL_ENABLE) == 0)
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
- }
+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv;
- /* set transcoder timing */
- I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
- I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
- I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
-
- I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
- I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
- I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
-
- /* enable normal train */
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
+ if (crtc->fb == NULL)
+ return;
- /* wait one idle pattern time */
- udelay(100);
+ obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+ dev_priv = crtc->dev->dev_private;
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&obj_priv->pending_flip) == 0);
+}
- /* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
- int reg;
-
- reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
- reg |= (TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING);
-
- if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
- reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
- if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
- reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-
- switch (intel_trans_dp_port_sel(crtc)) {
- case PCH_DP_B:
- reg |= TRANS_DP_PORT_SEL_B;
- break;
- case PCH_DP_C:
- reg |= TRANS_DP_PORT_SEL_C;
- break;
- case PCH_DP_D:
- reg |= TRANS_DP_PORT_SEL_D;
- break;
- default:
- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
- reg |= TRANS_DP_PORT_SEL_B;
- break;
- }
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
- I915_WRITE(trans_dp_ctl, reg);
- POSTING_READ(trans_dp_ctl);
- }
+ if (intel_crtc->active)
+ return;
- /* enable PCH transcoder */
- temp = I915_READ(transconf_reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
- I915_READ(transconf_reg);
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
- if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
- DRM_ERROR("failed to enable transcoder\n");
- }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
- intel_crtc_load_lut(crtc);
+ ironlake_fdi_enable(crtc);
- intel_update_fbc(crtc, &crtc->mode);
- break;
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+ dev_priv->pch_pf_pos);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+ dev_priv->pch_pf_size);
+ }
+
+ /* Enable CPU pipe */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if ((temp & PIPECONF_ENABLE) == 0) {
+ I915_WRITE(reg, temp | PIPECONF_ENABLE);
+ POSTING_READ(reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
+ /* configure and enable CPU plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
+
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
+
+ /* enable PCH DPLL */
+ reg = PCH_DPLL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+ POSTING_READ(reg);
+ udelay(200);
+ }
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
- drm_vblank_off(dev, pipe);
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
+ /* set transcoder timing */
+ I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
+ I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
+ I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
+
+ I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+ I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+ I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK);
+ temp |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ temp |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ temp |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
}
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ I915_WRITE(reg, temp);
+ }
- /* disable cpu pipe, disable after all planes disabled */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ /* enable PCH transcoder */
+ reg = TRANSCONF(pipe);
+ temp = I915_READ(reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ I915_WRITE(reg, temp | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder %d\n", pipe);
- /* wait for cpu pipe off, pipe state */
- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
- DRM_ERROR("failed to turn off cpu pipe\n");
- } else
- DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+ intel_crtc_update_cursor(crtc, true);
+}
- udelay(100);
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
+
+ if (!intel_crtc->active)
+ return;
- /* Disable PF */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
- /* disable CPU FDI tx and PCH FDI rx */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
- I915_READ(fdi_tx_reg);
+ /* Disable display plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if (temp & DISPLAY_PLANE_ENABLE) {
+ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
- temp = I915_READ(fdi_rx_reg);
- /* BPC in FDI rx is consistent with that in pipeconf */
- temp &= ~(0x07 << 16);
- temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
- udelay(100);
+ /* disable cpu pipe, disable after all planes disabled */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if (temp & PIPECONF_ENABLE) {
+ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
+ POSTING_READ(reg);
+ /* wait for cpu pipe off, pipe state */
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+ }
+
+ /* Disable PF */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
- /* still set train pattern 1 */
- temp = I915_READ(fdi_tx_reg);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- POSTING_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- I915_WRITE(fdi_rx_reg, temp);
- POSTING_READ(fdi_rx_reg);
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if (temp & LVDS_PORT_EN) {
I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- I915_READ(PCH_LVDS);
+ POSTING_READ(PCH_LVDS);
udelay(100);
}
+ }
- /* disable PCH transcoder */
- temp = I915_READ(transconf_reg);
- if ((temp & TRANS_ENABLE) != 0) {
- I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
+ /* disable PCH transcoder */
+ reg = TRANSCONF(plane);
+ temp = I915_READ(reg);
+ if (temp & TRANS_ENABLE) {
+ I915_WRITE(reg, temp & ~TRANS_ENABLE);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder\n");
+ }
- /* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
- DRM_ERROR("failed to disable transcoder\n");
- }
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(reg, temp);
- temp = I915_READ(transconf_reg);
- /* BPC in transcoder is consistent with that in pipeconf */
- temp &= ~PIPE_BPC_MASK;
- temp |= pipe_bpc;
- I915_WRITE(transconf_reg, temp);
- I915_READ(transconf_reg);
- udelay(100);
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (pipe == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
- if (HAS_PCH_CPT(dev)) {
- /* disable TRANS_DP_CTL */
- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
- int reg;
+ /* disable PCH DPLL */
+ reg = PCH_DPLL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
- I915_WRITE(trans_dp_ctl, reg);
- POSTING_READ(trans_dp_ctl);
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
- /* disable DPLL_SEL */
- temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0)
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
- else
- temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
- }
+ POSTING_READ(reg);
+ udelay(100);
- /* disable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
-
- /* Switch from PCDclk to Rawclk */
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_SEL_PCDCLK;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
-
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
- /* Wait for the clocks to turn off. */
- udelay(100);
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+ intel_update_fbc(dev);
+ intel_clear_scanline_wait(dev);
+}
+
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ ironlake_crtc_enable(crtc);
+ break;
+
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ ironlake_crtc_disable(crtc);
break;
}
}
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
- struct intel_overlay *overlay;
- int ret;
-
if (!enable && intel_crtc->overlay) {
- overlay = intel_crtc->overlay;
- mutex_lock(&overlay->dev->struct_mutex);
- for (;;) {
- ret = intel_overlay_switch_off(overlay);
- if (ret == 0)
- break;
+ struct drm_device *dev = intel_crtc->base.dev;
- ret = intel_overlay_recover_from_interrupt(overlay, 0);
- if (ret != 0) {
- /* overlay doesn't react anymore. Usually
- * results in a black screen and an unkillable
- * X server. */
- BUG();
- overlay->hw_wedged = HW_WEDGED;
- break;
- }
- }
- mutex_unlock(&overlay->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
+ (void) intel_overlay_switch_off(intel_crtc->overlay, false);
+ mutex_unlock(&dev->struct_mutex);
}
- /* Let userspace switch the overlay on again. In most cases userspace
- * has to recompute where to put it anyway. */
- return;
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
}
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 temp;
+ u32 reg, temp;
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(dpll_reg, temp);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
+ if (intel_crtc->active)
+ return;
- /* Enable the pipe */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0)
- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
-
- /* Enable the plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
- intel_crtc_load_lut(crtc);
+ /* Enable the DPLL */
+ reg = DPLL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(reg, temp);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
- /* Give the overlay scaler a chance to enable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, true);
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable if it's on this pipe */
- intel_crtc_dpms_overlay(intel_crtc, false);
- drm_vblank_off(dev, pipe);
-
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
-
- /* Disable display plane */
- temp = I915_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- I915_READ(dspbase_reg);
- }
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
- /* Don't disable pipe A or pipe A PLLs if needed */
- if (pipeconf_reg == PIPEACONF &&
- (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
- /* Wait for vblank for the disable to take effect */
+ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if ((temp & PIPECONF_ENABLE) == 0)
+ I915_WRITE(reg, temp | PIPECONF_ENABLE);
+
+ /* Enable the plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+ }
+
+ intel_crtc_load_lut(crtc);
+ intel_update_fbc(dev);
+
+ /* Give the overlay scaler a chance to enable if it's on this pipe */
+ intel_crtc_dpms_overlay(intel_crtc, true);
+ intel_crtc_update_cursor(crtc, true);
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
+
+ if (!intel_crtc->active)
+ return;
+
+ /* Give the overlay scaler a chance to disable if it's on this pipe */
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
+ /* Disable display plane */
+ reg = DSPCNTR(plane);
+ temp = I915_READ(reg);
+ if (temp & DISPLAY_PLANE_ENABLE) {
+ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ intel_flush_display_plane(dev, plane);
+
+ /* Wait for vblank for the disable to take effect */
+ if (IS_GEN2(dev))
intel_wait_for_vblank(dev, pipe);
- goto skip_pipe_off;
- }
+ }
- /* Next, disable display pipes */
- temp = I915_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- I915_READ(pipeconf_reg);
- }
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ goto done;
+
+ /* Next, disable display pipes */
+ reg = PIPECONF(pipe);
+ temp = I915_READ(reg);
+ if (temp & PIPECONF_ENABLE) {
+ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
/* Wait for the pipe to turn off */
+ POSTING_READ(reg);
intel_wait_for_pipe_off(dev, pipe);
+ }
+
+ reg = DPLL(pipe);
+ temp = I915_READ(reg);
+ if (temp & DPLL_VCO_ENABLE) {
+ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
- temp = I915_READ(dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
- }
- skip_pipe_off:
/* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
udelay(150);
+ }
+
+done:
+ intel_crtc->active = false;
+ intel_update_fbc(dev);
+ intel_update_watermarks(dev);
+ intel_clear_scanline_wait(dev);
+}
+
+static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ i9xx_crtc_enable(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ i9xx_crtc_disable(crtc);
break;
}
}
@@ -2388,26 +2494,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
return;
intel_crtc->dpms_mode = mode;
- intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
-
- /* When switching on the display, ensure that SR is disabled
- * with multiple pipes prior to enabling to new pipe.
- *
- * When switching off the display, make sure the cursor is
- * properly hidden prior to disabling the pipe.
- */
- if (mode == DRM_MODE_DPMS_ON)
- intel_update_watermarks(dev);
- else
- intel_crtc_update_cursor(crtc);
dev_priv->display.dpms(crtc, mode);
- if (mode == DRM_MODE_DPMS_ON)
- intel_crtc_update_cursor(crtc);
- else
- intel_update_watermarks(dev);
-
if (!dev->primary->master)
return;
@@ -2432,16 +2521,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-static void intel_crtc_prepare (struct drm_crtc *crtc)
+static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
-static void intel_crtc_commit (struct drm_crtc *crtc)
+/* Prepare for a mode set.
+ *
+ * Note we could be a lot smarter here. We need to figure out which outputs
+ * will be enabled, which disabled (in short, how the config will changes)
+ * and perform the minimum necessary steps to accomplish that, e.g. updating
+ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
+ * panel fitting is in the proper state, etc.
+ */
+static void i9xx_crtc_prepare(struct drm_crtc *crtc)
{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ i9xx_crtc_disable(crtc);
+}
+
+static void i9xx_crtc_commit(struct drm_crtc *crtc)
+{
+ i9xx_crtc_enable(crtc);
+}
+
+static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+{
+ ironlake_crtc_disable(crtc);
+}
+
+static void ironlake_crtc_commit(struct drm_crtc *crtc)
+{
+ ironlake_crtc_enable(crtc);
}
void intel_encoder_prepare (struct drm_encoder *encoder)
@@ -2460,13 +2579,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
void intel_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
@@ -2557,33 +2670,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
return 133000;
}
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-int intel_panel_fitter_pipe (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pfit_control;
-
- /* i830 doesn't have a panel fitter */
- if (IS_I830(dev))
- return -1;
-
- pfit_control = I915_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
-
- /* 965 can place panel fitter on either pipe */
- if (IS_I965G(dev))
- return (pfit_control >> 29) & 0x3;
-
- /* older chips can only use pipe 1 */
- return 1;
-}
-
struct fdi_m_n {
u32 tu;
u32 gmch_m;
@@ -2902,7 +2988,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
@@ -2919,7 +3005,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
@@ -2934,8 +3020,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
size >>= 2; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
+ plane ? "B" : "A",
+ size);
return size;
}
@@ -2950,14 +3036,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ plane ? "B" : "A", size);
return size;
}
static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int unused,
- int pixel_size)
+ int planeb_clock, int sr_hdisplay, int unused,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct cxsr_latency *latency;
@@ -3069,13 +3155,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
entries_required = (((sr_latency_ns / line_time_us) +
1000) / 1000) * pixel_size * 64;
entries_required = DIV_ROUND_UP(entries_required,
- g4x_cursor_wm_info.cacheline_size);
+ g4x_cursor_wm_info.cacheline_size);
cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
if (cursor_sr > g4x_cursor_wm_info.max_wm)
@@ -3087,7 +3173,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
} else {
/* Turn off self refresh if both pipes are enabled */
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -3125,7 +3211,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
srwm = I965_FIFO_SIZE - sr_entries;
@@ -3134,11 +3220,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm &= 0x1ff;
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
+ pixel_size * 64;
sr_entries = DIV_ROUND_UP(sr_entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
- (sr_entries + i965_cursor_wm_info.guard_size);
+ (sr_entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3146,11 +3232,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
- if (IS_I965GM(dev))
+ if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
- if (IS_I965GM(dev))
+ if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
@@ -3180,9 +3266,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
int sr_clock, sr_entries = 0;
/* Create copies of the base settings for each pipe */
- if (IS_I965GM(dev) || IS_I945GM(dev))
+ if (IS_CRESTLINE(dev) || IS_I945GM(dev))
planea_params = planeb_params = i945_wm_info;
- else if (IS_I9XX(dev))
+ else if (!IS_GEN2(dev))
planea_params = planeb_params = i915_wm_info;
else
planea_params = planeb_params = i855_wm_info;
@@ -3217,7 +3303,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
+ pixel_size * sr_hdisplay;
sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
@@ -3242,7 +3328,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
+ planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
@@ -3276,146 +3362,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
-static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static bool ironlake_compute_wm0(struct drm_device *dev,
+ int pipe,
+ int *plane_wm,
+ int *cursor_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int sr_wm, cursor_wm;
- unsigned long line_time_us;
- int sr_clock, entries_required;
- u32 reg_value;
- int line_count;
- int planea_htotal = 0, planeb_htotal = 0;
struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size = 0;
+ int line_time_us, line_count, entries;
- /* Need htotal for all active display plane */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
- if (intel_crtc->plane == 0)
- planea_htotal = crtc->mode.htotal;
- else
- planeb_htotal = crtc->mode.htotal;
- }
- }
-
- /* Calculate and update the watermark for plane A */
- if (planea_clock) {
- entries_required = ((planea_clock / 1000) * pixel_size *
- ILK_LP0_PLANE_LATENCY) / 1000;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
- planea_wm = entries_required +
- ironlake_display_wm_info.guard_size;
-
- if (planea_wm > (int)ironlake_display_wm_info.max_wm)
- planea_wm = ironlake_display_wm_info.max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = (planea_htotal * 1000) / planea_clock;
-
- /* Use ns/us then divide to preserve precision */
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
-
- /* calculate the cursor watermark for cursor A */
- entries_required = line_count * 64 * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_wm_info.cacheline_size);
- cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
- if (cursora_wm > ironlake_cursor_wm_info.max_wm)
- cursora_wm = ironlake_cursor_wm_info.max_wm;
-
- reg_value = I915_READ(WM0_PIPEA_ILK);
- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
- (cursora_wm & WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, reg_value);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
- "cursor: %d\n", planea_wm, cursora_wm);
- }
- /* Calculate and update the watermark for plane B */
- if (planeb_clock) {
- entries_required = ((planeb_clock / 1000) * pixel_size *
- ILK_LP0_PLANE_LATENCY) / 1000;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
- planeb_wm = entries_required +
- ironlake_display_wm_info.guard_size;
-
- if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
- planeb_wm = ironlake_display_wm_info.max_wm;
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = (planeb_htotal * 1000) / planeb_clock;
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_display_wm_info.cacheline_size);
+ *plane_wm = entries + ironlake_display_wm_info.guard_size;
+ if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
+ *plane_wm = ironlake_display_wm_info.max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_cursor_wm_info.cacheline_size);
+ *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
+ if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
+ *cursor_wm = ironlake_cursor_wm_info.max_wm;
- /* Use ns/us then divide to preserve precision */
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ return true;
+}
- /* calculate the cursor watermark for cursor B */
- entries_required = line_count * 64 * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_wm_info.cacheline_size);
- cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
- if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
- cursorb_wm = ironlake_cursor_wm_info.max_wm;
+static void ironlake_update_wm(struct drm_device *dev,
+ int planea_clock, int planeb_clock,
+ int sr_hdisplay, int sr_htotal,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int plane_wm, cursor_wm, enabled;
+ int tmp;
+
+ enabled = 0;
+ if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
- reg_value = I915_READ(WM0_PIPEB_ILK);
- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
- (cursorb_wm & WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, reg_value);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
- "cursor: %d\n", planeb_wm, cursorb_wm);
+ if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
}
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
- if (!planea_clock || !planeb_clock) {
-
+ tmp = 0;
+ if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
+ unsigned long line_time_us;
+ int small, large, plane_fbc;
+ int sr_clock, entries;
+ int line_count, line_size;
/* Read the self-refresh latency. The unit is 0.5us */
int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = (sr_htotal * 1000) / sr_clock;
/* Use ns/us then divide to preserve precision */
line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
- / 1000;
+ / 1000;
+ line_size = sr_hdisplay * pixel_size;
- /* calculate the self-refresh watermark for display plane */
- entries_required = line_count * sr_hdisplay * pixel_size;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_srwm_info.cacheline_size);
- sr_wm = entries_required +
- ironlake_display_srwm_info.guard_size;
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
+ large = line_count * line_size;
- /* calculate the self-refresh watermark for display cursor */
- entries_required = line_count * pixel_size * 64;
- entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_srwm_info.cacheline_size);
- cursor_wm = entries_required +
- ironlake_cursor_srwm_info.guard_size;
+ entries = DIV_ROUND_UP(min(small, large),
+ ironlake_display_srwm_info.cacheline_size);
- /* configure watermark and enable self-refresh */
- reg_value = I915_READ(WM1_LP_ILK);
- reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
- WM1_LP_CURSOR_MASK);
- reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
- (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+ plane_fbc = entries * 64;
+ plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
- I915_WRITE(WM1_LP_ILK, reg_value);
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", sr_wm, cursor_wm);
+ plane_wm = entries + ironlake_display_srwm_info.guard_size;
+ if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
+ plane_wm = ironlake_display_srwm_info.max_wm;
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
- }
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ ironlake_cursor_srwm_info.cacheline_size);
+
+ cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
+ if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
+ cursor_wm = ironlake_cursor_srwm_info.max_wm;
+
+ /* configure watermark and enable self-refresh */
+ tmp = (WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (plane_fbc << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
+ " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
+ }
+ I915_WRITE(WM1_LP_ILK, tmp);
+ /* XXX setup WM2 and WM3 */
}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3447,7 +3517,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
*
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
- */
+ */
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3463,15 +3533,15 @@ static void intel_update_watermarks(struct drm_device *dev)
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ if (intel_crtc->active) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
+ intel_crtc->pipe, crtc->mode.clock);
planea_clock = crtc->mode.clock;
} else {
DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
+ intel_crtc->pipe, crtc->mode.clock);
planeb_clock = crtc->mode.clock;
}
sr_hdisplay = crtc->mode.hdisplay;
@@ -3502,62 +3572,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ u32 fp_reg, dpll_reg;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
- int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
- int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
- int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
- int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
- int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
- int lvds_reg = LVDS;
- u32 temp;
- int sdvo_pixel_multiply;
+ u32 reg, temp;
int target_clock;
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_encoder *intel_encoder;
-
- if (encoder->crtc != crtc)
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
continue;
- intel_encoder = enc_to_intel_encoder(encoder);
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
- if (intel_encoder->needs_tv_clock)
+ if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
@@ -3573,7 +3616,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = intel_encoder;
+ has_edp_encoder = encoder;
break;
}
@@ -3583,15 +3626,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else if (IS_I9XX(dev)) {
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
refclk = 96000;
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev) &&
+ (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
}
-
/*
* Returns a set of divisors for the desired target clock with the given
@@ -3607,13 +3650,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &reduced_clock);
+ dev_priv->lvds_downclock,
+ refclk,
+ &reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
@@ -3622,7 +3665,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
+ "LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
@@ -3630,14 +3673,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
+ && adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
+ && adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
@@ -3649,34 +3692,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
int lane = 0, link_bw, bpp;
- /* eDP doesn't require FDI link, so just set DP M/N
+ /* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder) {
+ if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
target_clock = mode->clock;
intel_edp_link_config(has_edp_encoder,
&lane, &link_bw);
} else {
- /* DP over FDI requires target mode clock
+ /* [e]DP over FDI requires target mode clock
instead of link clock */
- if (is_dp)
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- link_bw = 270000;
+
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
/* determine panel color depth */
- temp = I915_READ(pipeconf_reg);
+ temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
if (is_lvds) {
- int lvds_reg = I915_READ(PCH_LVDS);
/* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
- } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
- switch (dev_priv->edp_bpp/3) {
+ } else if (has_edp_encoder) {
+ switch (dev_priv->edp.bpp/3) {
case 8:
temp |= PIPE_8BPC;
break;
@@ -3692,8 +3742,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
} else
temp |= PIPE_8BPC;
- I915_WRITE(pipeconf_reg, temp);
- I915_READ(pipeconf_reg);
+ I915_WRITE(PIPECONF(pipe), temp);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
@@ -3738,33 +3787,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
-
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
if (has_edp_encoder) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
-
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (dev_priv->lvds_use_ssc)
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else {
- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
+ /* Enable SSC on PCH eDP if needed */
+ if (dev_priv->lvds_use_ssc) {
+ DRM_ERROR("enabling SSC on PCH\n");
+ temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
+ }
}
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
}
}
@@ -3780,23 +3835,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
+ dpll = 0;
if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ else if (HAS_PCH_SPLIT(dev))
+ dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ }
dpll |= DPLL_DVO_HIGH_SPEED;
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (HAS_PCH_SPLIT(dev))
- dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
- if (is_dp)
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -3824,7 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3851,7 +3909,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
- pipeconf = I915_READ(pipeconf_reg);
+ pipeconf = I915_READ(PIPECONF(pipe));
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -3865,7 +3923,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
- if (pipe == 0 && !IS_I965G(dev)) {
+ if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
*
@@ -3874,51 +3932,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
*/
if (mode->clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
- pipeconf |= PIPEACONF_DOUBLE_WIDE;
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
else
- pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+ pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
+ pipeconf |= PIPECONF_ENABLE;
dpll |= DPLL_VCO_ENABLE;
-
- /* Disable the panel fitter if it was on our pipe */
- if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
- I915_WRITE(PFIT_CONTROL, 0);
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* assign to Ironlake registers */
if (HAS_PCH_SPLIT(dev)) {
- fp_reg = pch_fp_reg;
- dpll_reg = pch_dpll_reg;
+ fp_reg = PCH_FP0(pipe);
+ dpll_reg = PCH_DPLL(pipe);
+ } else {
+ fp_reg = FP0(pipe);
+ dpll_reg = DPLL(pipe);
}
- if (!has_edp_encoder) {
+ /* PCH eDP needs FDI, but CPU eDP does not */
+ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- I915_READ(dpll_reg);
+
+ POSTING_READ(dpll_reg);
udelay(150);
}
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
- if (trans_dpll_sel == 0)
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ if (pipe == 0)
+ temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
else
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
I915_WRITE(PCH_DPLL_SEL, temp);
- I915_READ(PCH_DPLL_SEL);
- udelay(150);
- }
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf &= ~PIPE_ENABLE_DITHER;
- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ POSTING_READ(PCH_DPLL_SEL);
+ udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -3926,58 +3980,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* things on.
*/
if (is_lvds) {
- u32 lvds;
-
+ reg = LVDS;
if (HAS_PCH_SPLIT(dev))
- lvds_reg = PCH_LVDS;
+ reg = PCH_LVDS;
- lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ temp = I915_READ(reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
if (HAS_PCH_CPT(dev))
- lvds |= PORT_TRANS_B_SEL_CPT;
+ temp |= PORT_TRANS_B_SEL_CPT;
else
- lvds |= LVDS_PIPEB_SELECT;
+ temp |= LVDS_PIPEB_SELECT;
} else {
if (HAS_PCH_CPT(dev))
- lvds &= ~PORT_TRANS_SEL_MASK;
+ temp &= ~PORT_TRANS_SEL_MASK;
else
- lvds &= ~LVDS_PIPEB_SELECT;
+ temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
- lvds |= dev_priv->lvds_border_bits;
+ temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
- lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
- lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
- /* set the dithering flag */
- if (IS_I965G(dev)) {
- if (dev_priv->lvds_dither) {
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf |= PIPE_ENABLE_DITHER;
- pipeconf |= PIPE_DITHER_TYPE_ST01;
- } else
- lvds |= LVDS_ENABLE_DITHER;
- } else {
- if (!HAS_PCH_SPLIT(dev)) {
- lvds &= ~LVDS_ENABLE_DITHER;
- }
- }
+ /* set the dithering flag on non-PCH LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
}
- I915_WRITE(lvds_reg, lvds);
- I915_READ(lvds_reg);
+ I915_WRITE(reg, temp);
}
- if (is_dp)
+
+ /* set the dithering flag and clear for anything other than a panel. */
+ if (HAS_PCH_SPLIT(dev)) {
+ pipeconf &= ~PIPECONF_DITHER_EN;
+ pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
+ if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
+ pipeconf |= PIPECONF_DITHER_EN;
+ pipeconf |= PIPECONF_DITHER_TYPE_ST1;
+ }
+ }
+
+ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
- else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev)) {
/* For non-DP output, clear any trans DP clock recovery setting.*/
if (pipe == 0) {
I915_WRITE(TRANSA_DATA_M1, 0);
@@ -3992,29 +4048,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (!has_edp_encoder) {
+ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
- I915_READ(dpll_reg);
+
/* Wait for the clocks to stabilize. */
+ POSTING_READ(dpll_reg);
udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ temp = 0;
if (is_sdvo) {
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
- ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
- } else
- I915_WRITE(dpll_md_reg, 0);
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* write it again -- the BIOS does, after all */
I915_WRITE(dpll_reg, dpll);
}
- I915_READ(dpll_reg);
+
/* Wait for the clocks to stabilize. */
+ POSTING_READ(dpll_reg);
udelay(150);
}
+ intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(fp_reg + 4, fp2);
intel_crtc->lowfreq_avail = true;
@@ -4024,7 +4086,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
} else {
I915_WRITE(fp_reg + 4, fp);
- intel_crtc->lowfreq_avail = false;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -4043,70 +4104,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
- I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ I915_WRITE(HTOTAL(pipe),
+ (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ I915_WRITE(HBLANK(pipe),
+ (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ I915_WRITE(HSYNC(pipe),
+ (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+
+ I915_WRITE(VTOTAL(pipe),
+ (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ I915_WRITE(VBLANK(pipe),
+ (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ I915_WRITE(VSYNC(pipe),
+ (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
- /* pipesrc and dspsize control the size that is scaled from, which should
- * always be the user's requested size.
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
*/
if (!HAS_PCH_SPLIT(dev)) {
- I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
- (mode->hdisplay - 1));
- I915_WRITE(dsppos_reg, 0);
+ I915_WRITE(DSPSIZE(plane),
+ ((mode->vdisplay - 1) << 16) |
+ (mode->hdisplay - 1));
+ I915_WRITE(DSPPOS(plane), 0);
}
- I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
- I915_WRITE(link_m1_reg, m_n.link_m);
- I915_WRITE(link_n1_reg, m_n.link_n);
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- if (has_edp_encoder) {
+ if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- } else {
- /* enable FDI RX PLL too */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
- I915_READ(fdi_rx_reg);
- udelay(200);
-
- /* enable FDI TX PLL too */
- temp = I915_READ(fdi_tx_reg);
- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
-
- /* enable FDI RX PCDCLK */
- temp = I915_READ(fdi_rx_reg);
- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
- I915_READ(fdi_rx_reg);
- udelay(200);
}
}
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_READ(pipeconf_reg);
+ I915_WRITE(PIPECONF(pipe), pipeconf);
+ POSTING_READ(PIPECONF(pipe));
intel_wait_for_vblank(dev, pipe);
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
}
- I915_WRITE(dspcntr_reg, dspcntr);
+ I915_WRITE(DSPCNTR(plane), dspcntr);
- /* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
intel_update_watermarks(dev);
@@ -4199,7 +4252,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
-static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+ bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4212,7 +4266,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
pos = 0;
- if (intel_crtc->cursor_on && crtc->fb) {
+ if (on && crtc->enabled && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
@@ -4324,7 +4378,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->phys_obj->handle->busaddr;
}
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
finish:
@@ -4344,7 +4398,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
return 0;
fail_unpin:
@@ -4363,7 +4417,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
- intel_crtc_update_cursor(crtc);
+ intel_crtc_update_cursor(crtc, true);
return 0;
}
@@ -4432,7 +4486,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct intel_crtc *intel_crtc;
struct drm_crtc *possible_crtc;
struct drm_crtc *supported_crtc =NULL;
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4513,7 +4567,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector, int dpms_mode)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4559,7 +4613,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
if (IS_PINEVIEW(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -4663,8 +4717,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
@@ -4678,14 +4730,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4720,9 +4770,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
}
/* Schedule downclock */
- if (schedule)
- mod_timer(&intel_crtc->idle_timer, jiffies +
- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -4858,7 +4907,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
}
/* Non-busy -> busy, upclock */
- intel_increase_pllclock(crtc, true);
+ intel_increase_pllclock(crtc);
intel_crtc->busy = true;
} else {
/* Busy -> busy, put off timer */
@@ -4872,8 +4921,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (work) {
+ cancel_work_sync(&work->work);
+ kfree(work);
+ }
drm_crtc_cleanup(crtc);
+
kfree(intel_crtc);
}
@@ -4928,12 +4991,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = to_intel_bo(work->pending_flip_obj);
-
- /* Initial scanout buffer will have a 0 pending flip count */
- if ((atomic_read(&obj_priv->pending_flip) == 0) ||
- atomic_dec_and_test(&obj_priv->pending_flip))
- DRM_WAKEUP(&dev_priv->pending_flip_queue);
+ obj_priv = to_intel_bo(work->old_fb_obj);
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj_priv->pending_flip.counter);
+ if (atomic_read(&obj_priv->pending_flip) == 0)
+ wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5014,7 +5076,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, true);
if (ret)
goto cleanup_work;
@@ -5023,29 +5085,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(obj);
crtc->fb = fb;
- ret = i915_gem_object_flush_write_domain(obj);
- if (ret)
- goto cleanup_objs;
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
goto cleanup_objs;
- obj_priv = to_intel_bo(obj);
- atomic_inc(&obj_priv->pending_flip);
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_add(1 << intel_crtc->plane,
+ &to_intel_bo(work->old_fb_obj)->pending_flip);
+
work->pending_flip_obj = obj;
+ obj_priv = to_intel_bo(obj);
if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask;
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ BEGIN_LP_RING(2);
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(0);
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
@@ -5126,15 +5192,14 @@ cleanup_work:
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
.mode_set_base_atomic = intel_pipe_set_base_atomic,
- .prepare = intel_crtc_prepare,
- .commit = intel_crtc_commit,
.load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -5160,8 +5225,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
- intel_crtc->pipe = pipe;
- intel_crtc->plane = pipe;
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
@@ -5171,9 +5234,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+ if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
- intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ intel_crtc->plane = !pipe;
}
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@ -5183,6 +5246,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = -1;
+ intel_crtc->active = true; /* force the pipe off on setup_init_config */
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_helper_funcs.prepare = ironlake_crtc_prepare;
+ intel_helper_funcs.commit = ironlake_crtc_commit;
+ } else {
+ intel_helper_funcs.prepare = i9xx_crtc_prepare;
+ intel_helper_funcs.commit = i9xx_crtc_commit;
+ }
+
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
@@ -5218,38 +5291,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return 0;
}
-struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-{
- struct drm_crtc *crtc = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->pipe == pipe)
- break;
- }
- return crtc;
-}
-
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
+ struct intel_encoder *encoder;
int index_mask = 0;
- struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- if (type_mask & intel_encoder->clone_mask)
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ if (type_mask & encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
}
+
return index_mask;
}
-
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
bool dpd_is_edp = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -5338,12 +5398,10 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- encoder->possible_crtcs = intel_encoder->crtc_mask;
- encoder->possible_clones = intel_encoder_clones(dev,
- intel_encoder->clone_mask);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_clones =
+ intel_encoder_clones(dev, encoder->clone_mask);
}
}
@@ -5377,8 +5435,25 @@ int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj)
{
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ return -EINVAL;
+
+ if (mode_cmd->pitch & 63)
+ return -EINVAL;
+
+ switch (mode_cmd->bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -5487,6 +5562,10 @@ void ironlake_enable_drps(struct drm_device *dev)
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
I915_WRITE(RCDNEI, 100000);
@@ -5529,7 +5608,7 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
@@ -5660,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
/* Required for FBC */
dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
@@ -5674,13 +5753,20 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
/*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+
+ /*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
* The bit 22/21 of 0x42004
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
@@ -5728,20 +5814,20 @@ void intel_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
- } else if (IS_I965GM(dev)) {
+ } else if (IS_CRESTLINE(dev)) {
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
- } else if (IS_I965G(dev)) {
+ } else if (IS_BROADWATER(dev)) {
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
- } else if (IS_I9XX(dev)) {
+ } else if (IS_GEN3(dev)) {
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -5823,7 +5909,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_I965GM(dev)) {
+ } else if (IS_CRESTLINE(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -5856,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- if (IS_IRONLAKE(dev)) {
+ if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
else {
@@ -5883,9 +5969,9 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_wm = pineview_update_wm;
} else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
- else if (IS_I965G(dev))
+ else if (IS_GEN4(dev))
dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev)) {
+ else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_I85X(dev)) {
@@ -5999,24 +6085,24 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_display(dev);
- if (IS_I965G(dev)) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_I9XX(dev)) {
+ if (IS_GEN2(dev)) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else if (IS_GEN3(dev)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
/* set memory base */
- if (IS_I9XX(dev))
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- else
+ if (IS_GEN2(dev))
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+ else
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
- if (IS_MOBILE(dev) || IS_I9XX(dev))
+ if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
@@ -6052,10 +6138,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
+ drm_kms_helper_poll_fini(dev);
mutex_lock(&dev->struct_mutex);
- drm_kms_helper_poll_fini(dev);
- intel_fbdev_fini(dev);
+ intel_unregister_dsm_handler();
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
@@ -6063,12 +6150,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
continue;
intel_crtc = to_intel_crtc(crtc);
- intel_increase_pllclock(crtc, false);
- del_timer_sync(&intel_crtc->idle_timer);
+ intel_increase_pllclock(crtc);
}
- del_timer_sync(&dev_priv->idle_timer);
-
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
@@ -6097,33 +6181,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
+ /* Disable the irq before mode object teardown, for the irq might
+ * enqueue unpin/hotplug work. */
+ drm_irq_uninstall(dev);
+ cancel_work_sync(&dev_priv->hotplug_work);
+
+ /* Shut off idle work before the crtcs get freed. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ del_timer_sync(&intel_crtc->idle_timer);
+ }
+ del_timer_sync(&dev_priv->idle_timer);
+ cancel_work_sync(&dev_priv->idle_work);
+
drm_mode_config_cleanup(dev);
}
-
/*
* Return which encoder is currently attached for connector.
*/
-struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- continue;
+ return &intel_attached_encoder(connector)->base;
+}
- encoder = obj_to_encoder(obj);
- return encoder;
- }
- return NULL;
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9ab8708ac6ba..891f4f1d63b1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,15 +42,13 @@
#define DP_LINK_CONFIGURATION_SIZE 9
-#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
-#define IS_PCH_eDP(i) ((i)->is_pch_edp)
-
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
+ int force_audio;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
@@ -58,14 +56,69 @@ struct intel_dp {
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ struct drm_property *force_audio_property;
};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
+{
+ return intel_dp->base.type == INTEL_OUTPUT_EDP;
+}
+
+/**
+ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a PCH DP port attached
+ * to an eDP panel, false otherwise. Helpful for determining whether we
+ * may need FDI resources for a given DP output or not.
+ */
+static bool is_pch_edp(struct intel_dp *intel_dp)
+{
+ return intel_dp->is_pch_edp;
+}
+
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
+ return container_of(encoder, struct intel_dp, base.base);
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dp, base);
+}
+
+/**
+ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
+ * @encoder: DRM encoder
+ *
+ * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
+ * by intel_display.c.
+ */
+bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp;
+
+ if (!encoder)
+ return false;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ return is_pch_edp(intel_dp);
}
-static void intel_dp_link_train(struct intel_dp *intel_dp);
+static void intel_dp_start_link_train(struct intel_dp *intel_dp);
+static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
@@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- return (pixel_clock * dev_priv->edp_bpp) / 8;
+ if (is_edp(intel_dp))
+ return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
else
return pixel_clock * 3;
}
@@ -145,15 +198,13 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
- dev_priv->panel_fixed_mode) {
+ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
return MODE_PANEL;
@@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
- if (!IS_eDP(intel_dp) &&
+ if (!is_edp(intel_dp) &&
(intel_dp_link_required(connector->dev, intel_dp, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
@@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
+ *
+ * Note that PCH attached eDP panels should use a 125MHz input
+ * clock divider.
*/
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
- dev_priv->panel_fixed_mode) {
+ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
@@ -531,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = dev_priv->panel_fixed_mode->clock;
}
+ /* Just use VBT values for eDP */
+ if (is_edp(intel_dp)) {
+ intel_dp->lane_count = dev_priv->edp.lanes;
+ intel_dp->link_bw = dev_priv->edp.rate;
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -549,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
- /* okay we failed just pick the highest */
- intel_dp->lane_count = max_lane_count;
- intel_dp->link_bw = bws[max_clock];
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
- "count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
-
- return true;
- }
-
return false;
}
@@ -598,25 +649,6 @@ intel_dp_compute_m_n(int bpp,
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
-bool intel_pch_has_edp(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
-
- if (encoder->crtc != crtc)
- continue;
-
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
- return intel_dp->is_pch_edp;
- }
- return false;
-}
-
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -641,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
- if (IS_PCH_eDP(intel_dp))
- bpp = dev_priv->edp_bpp;
+ break;
+ } else if (is_edp(intel_dp)) {
+ lane_count = dev_priv->edp.lanes;
+ bpp = dev_priv->edp.bpp;
break;
}
}
@@ -698,7 +732,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.enc.crtc;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_dp->DP = (DP_VOLTAGE_0_4 |
@@ -709,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
intel_dp->DP |= DP_LINK_TRAIN_OFF;
@@ -744,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
intel_dp->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
@@ -754,13 +788,15 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_panel_on (struct drm_device *dev)
+/* Returns true if the panel was already on when called */
+static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
if (I915_READ(PCH_PP_STATUS) & PP_ON)
- return;
+ return true;
pp = I915_READ(PCH_PP_CONTROL);
@@ -771,21 +807,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Ouch. We need to wait here for some panels, like Dell e6510
+ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+ */
+ msleep(300);
- if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
+ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
+ 5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
- pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ return false;
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
+ PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
pp = I915_READ(PCH_PP_CONTROL);
@@ -796,15 +841,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
+ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
DRM_ERROR("panel off wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
- /* Make sure VDD is enabled so DP AUX will work */
- pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ /* Ouch. We need to wait here for some panels, like Dell e6510
+ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+ */
+ msleep(300);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -813,6 +863,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
u32 pp;
DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ msleep(300);
pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
@@ -837,8 +894,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_ENABLE;
+ dpa_ctl |= DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
+ udelay(200);
}
static void ironlake_edp_pll_off(struct drm_encoder *encoder)
@@ -848,8 +907,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
u32 dpa_ctl;
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
+ dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
udelay(200);
}
@@ -857,29 +917,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(dev);
- ironlake_edp_pll_on(encoder);
+ ironlake_edp_panel_on(intel_dp);
+ if (!is_pch_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+ else
+ ironlake_edp_pll_off(encoder);
}
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(intel_dp);
}
static void intel_dp_commit(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_dp);
- }
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ intel_dp_start_link_train(intel_dp);
+
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_on(intel_dp);
+
+ intel_dp_complete_link_train(intel_dp);
+
+ if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
}
@@ -892,22 +954,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (is_edp(intel_dp))
ironlake_edp_backlight_off(dev);
+ intel_dp_link_down(intel_dp);
+ if (is_edp(intel_dp))
ironlake_edp_panel_off(dev);
- }
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
ironlake_edp_pll_off(encoder);
} else {
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_on(intel_dp);
if (!(dp_reg & DP_PORT_EN)) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_panel_on(dev);
- intel_dp_link_train(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_backlight_on(dev);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
}
+ if (is_edp(intel_dp))
+ ironlake_edp_backlight_on(dev);
}
intel_dp->dpms_mode = mode;
}
@@ -917,14 +979,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp,
- uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp)
{
int ret;
ret = intel_dp_aux_native_read(intel_dp,
DP_LANE0_1_STATUS,
- link_status, DP_LINK_STATUS_SIZE);
+ intel_dp->link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
return false;
return true;
@@ -999,18 +1060,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
- uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane_count,
- uint8_t train_set[4])
+intel_get_adjust_train(struct intel_dp *intel_dp)
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
- for (lane = 0; lane < lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
if (this_v > v)
v = this_v;
@@ -1025,15 +1083,25 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
- train_set[lane] = v | p;
+ intel_dp->train_set[lane] = v | p;
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set, int lane_count)
+intel_dp_signal_levels(struct intel_dp *intel_dp)
{
- uint32_t signal_levels = 0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t signal_levels = 0;
+ u8 train_set = intel_dp->train_set[0];
+ u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
+ u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
+
+ if (is_edp(intel_dp)) {
+ vswing = dev_priv->edp.vswing;
+ preemphasis = dev_priv->edp.preemphasis;
+ }
- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ switch (vswing) {
case DP_TRAIN_VOLTAGE_SWING_400:
default:
signal_levels |= DP_VOLTAGE_0_4;
@@ -1048,7 +1116,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
signal_levels |= DP_VOLTAGE_1_2;
break;
}
- switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ switch (preemphasis) {
case DP_TRAIN_PRE_EMPHASIS_0:
default:
signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1116,18 +1184,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+intel_channel_eq_ok(struct intel_dp *intel_dp)
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
- lane_align = intel_dp_link_status(link_status,
+ lane_align = intel_dp_link_status(intel_dp->link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = intel_get_lane_status(intel_dp->link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
@@ -1135,159 +1203,194 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
+intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
+ return false;
+
+ return true;
+}
+
+static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
- uint8_t dp_train_pat,
- uint8_t train_set[4])
+ uint8_t dp_train_pat)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
+ if (!intel_dp_aux_handshake_required(intel_dp))
+ return true;
+
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
ret = intel_dp_aux_native_write(intel_dp,
- DP_TRAINING_LANE0_SET, train_set, 4);
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, 4);
if (ret != 4)
return false;
return true;
}
+/* Enable corresponding port and start training pattern 1 */
static void
-intel_dp_link_train(struct intel_dp *intel_dp)
+intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint8_t train_set[4];
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
- bool channel_eq = false;
int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
/* Enable output, wait for it to become active */
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
- /* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ if (intel_dp_aux_handshake_required(intel_dp))
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
- memset(train_set, 0, 4);
+ memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ if (IS_GEN6(dev) && is_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1, train_set))
+ DP_TRAINING_PATTERN_1))
break;
/* Set training pattern 1 */
- udelay(100);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ udelay(500);
+ if (intel_dp_aux_handshake_required(intel_dp)) {
break;
+ } else {
+ if (!intel_dp_get_link_status(intel_dp))
+ break;
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
- clock_recovery = true;
- break;
- }
-
- /* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
- if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ clock_recovery = true;
break;
- if (i == intel_dp->lane_count)
- break;
+ }
- /* Check to see if we've tried the same voltage 5 times */
- if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == intel_dp->lane_count)
break;
- } else
- tries = 0;
- voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
+ }
}
+ intel_dp->DP = DP;
+}
+
+static void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool channel_eq = false;
+ int tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+
/* channel equalization */
tries = 0;
channel_eq = false;
for (;;) {
- /* Use train_set[0] to set the voltage and pre emphasis values */
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ if (IS_GEN6(dev) && is_edp(intel_dp)) {
+ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2, train_set))
+ DP_TRAINING_PATTERN_2))
break;
- udelay(400);
- if (!intel_dp_get_link_status(intel_dp, link_status))
- break;
+ udelay(500);
- if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
- channel_eq = true;
+ if (!intel_dp_aux_handshake_required(intel_dp)) {
break;
- }
+ } else {
+ if (!intel_dp_get_link_status(intel_dp))
+ break;
- /* Try 5 times */
- if (tries > 5)
- break;
+ if (intel_channel_eq_ok(intel_dp)) {
+ channel_eq = true;
+ break;
+ }
- /* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
- ++tries;
- }
+ /* Try 5 times */
+ if (tries > 5)
+ break;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
+ ++tries;
+ }
+ }
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1301,32 +1404,31 @@ intel_dp_link_train(struct intel_dp *intel_dp)
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
DP &= ~DP_PLL_ENABLE;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
- POSTING_READ(intel_dp->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(intel_dp->output_reg);
}
+ POSTING_READ(intel_dp->output_reg);
- udelay(17000);
+ msleep(17);
- if (IS_eDP(intel_dp))
+ if (is_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
@@ -1344,32 +1446,34 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- uint8_t link_status[DP_LINK_STATUS_SIZE];
-
- if (!intel_dp->base.enc.crtc)
+ if (!intel_dp->base.base.crtc)
return;
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ if (!intel_dp_get_link_status(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
- if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
- intel_dp_link_train(intel_dp);
+ if (!intel_channel_eq_ok(intel_dp)) {
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
}
static enum drm_connector_status
-ironlake_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum drm_connector_status status;
+ /* Can't disconnect eDP */
+ if (is_edp(intel_dp))
+ return connector_status_connected;
+
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_dp,
0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
- {
+ sizeof (intel_dp->dpcd))
+ == sizeof(intel_dp->dpcd)) {
if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
@@ -1378,26 +1482,13 @@ ironlake_dp_detect(struct drm_connector *connector)
return status;
}
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
- *
- * \return true if DP port is connected.
- * \return false if DP port is disconnected.
- */
static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+g4x_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t temp, bit;
enum drm_connector_status status;
-
- intel_dp->has_audio = false;
-
- if (HAS_PCH_SPLIT(dev))
- return ironlake_dp_detect(connector);
+ uint32_t temp, bit;
switch (intel_dp->output_reg) {
case DP_B:
@@ -1419,31 +1510,66 @@ intel_dp_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
+ if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
if (intel_dp->dpcd[0] != 0)
status = connector_status_connected;
}
- return status;
+
+ return bit;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+
+ intel_dp->has_audio = false;
+
+ if (HAS_PCH_SPLIT(dev))
+ status = ironlake_dp_detect(intel_dp);
+ else
+ status = g4x_dp_detect(intel_dp);
+ if (status != connector_status_connected)
+ return status;
+
+ if (intel_dp->force_audio) {
+ intel_dp->has_audio = intel_dp->force_audio > 0;
+ } else {
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ }
+
+ return connector_status_connected;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
+ ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
if (ret) {
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
- !dev_priv->panel_fixed_mode) {
+ if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
head) {
@@ -1459,7 +1585,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1470,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
return 0;
}
+static int
+intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == intel_dp->force_audio_property) {
+ if (val == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = val;
+
+ if (val > 0 && intel_dp->has_audio)
+ return 0;
+ if (val < 0 && !intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = val > 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_dp->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
+}
+
static void
intel_dp_destroy (struct drm_connector *connector)
{
@@ -1478,6 +1644,15 @@ intel_dp_destroy (struct drm_connector *connector)
kfree(connector);
}
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ i2c_del_adapter(&intel_dp->adapter);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dp);
+}
+
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
@@ -1490,20 +1665,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_set_property,
.destroy = intel_dp_destroy,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dp_encoder_destroy,
};
-void
+static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -1554,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
return false;
}
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ intel_dp->force_audio_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+ if (intel_dp->force_audio_property) {
+ intel_dp->force_audio_property->values[0] = -1;
+ intel_dp->force_audio_property->values[1] = 1;
+ drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
+ }
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1580,7 +1770,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (output_reg == DP_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
@@ -1601,7 +1791,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_dp))
+ if (is_edp(intel_dp))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -1612,12 +1802,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp->has_audio = false;
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
- drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* Set up the DDC bus. */
@@ -1647,10 +1836,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_i2c_init(intel_dp, intel_connector, name);
- intel_encoder->ddc_bus = &intel_dp->adapter;
+ /* Cache some DPCD data in the eDP case */
+ if (is_edp(intel_dp)) {
+ int ret;
+ bool was_on;
+
+ was_on = ironlake_edp_panel_on(intel_dp);
+ ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
+ intel_dp->dpcd,
+ sizeof(intel_dp->dpcd));
+ if (ret == sizeof(intel_dp->dpcd)) {
+ if (intel_dp->dpcd[0] >= 0x11)
+ dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+ } else {
+ DRM_ERROR("failed to retrieve link info\n");
+ }
+ if (!was_on)
+ ironlake_edp_panel_off(dev);
+ }
+
intel_encoder->hot_plug = intel_dp_hot_plug;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (is_edp(intel_dp)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
@@ -1662,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
}
+ intel_dp_add_properties(intel_dp, connector);
+
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8828b3ac6414..9af9f86a8765 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,14 +26,12 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
-#include <linux/i2c-algo-bit.h>
#include "i915_drv.h"
#include "drm_crtc.h"
-
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
-#define wait_for(COND, MS, W) ({ \
+#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (! (COND)) { \
@@ -41,11 +39,24 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W) msleep(W); \
+ if (W && !in_dbg_master()) msleep(W); \
} \
ret__; \
})
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#define MSLEEP(x) do { \
+ if (in_dbg_master()) \
+ mdelay(x); \
+ else \
+ msleep(x); \
+} while(0)
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
/*
* Display related stuff
*/
@@ -96,24 +107,39 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-struct intel_i2c_chan {
- struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
- u32 reg; /* GPIO reg */
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
-};
+/* drm_display_mode->private_flags */
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
+ struct drm_display_mode *our_mode;
+};
struct intel_encoder {
- struct drm_encoder enc;
+ struct drm_encoder base;
int type;
- struct i2c_adapter *i2c_bus;
- struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void (*hot_plug)(struct intel_encoder *);
@@ -123,32 +149,7 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
-};
-
-struct intel_crtc;
-struct intel_overlay {
- struct drm_device *dev;
- struct intel_crtc *crtc;
- struct drm_i915_gem_object *vid_bo;
- struct drm_i915_gem_object *old_vid_bo;
- int active;
- int pfit_active;
- u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
- u32 color_key;
- u32 brightness, contrast, saturation;
- u32 old_xscale, old_yscale;
- /* register access */
- u32 flip_addr;
- struct drm_i915_gem_object *reg_bo;
- void *virt_addr;
- /* flip handling */
- uint32_t last_flip_req;
- int hw_wedged;
-#define HW_WEDGED 1
-#define NEEDS_WAIT_FOR_FLIP 2
-#define RELEASE_OLD_VID 3
-#define SWITCH_OFF_STAGE_1 4
-#define SWITCH_OFF_STAGE_2 5
+ struct intel_encoder *encoder;
};
struct intel_crtc {
@@ -157,6 +158,7 @@ struct intel_crtc {
enum plane plane;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
+ bool active; /* is the crtc on? independent of the dpms mode */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
@@ -168,14 +170,53 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
- bool cursor_visible, cursor_on;
+ bool cursor_visible;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
-#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define DIP_TYPE_AVI 0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI 13
+
+struct dip_infoframe {
+ uint8_t type; /* HB0 */
+ uint8_t ver; /* HB1 */
+ uint8_t len; /* HB2 - body len, not including checksum */
+ uint8_t ecc; /* Header ECC */
+ uint8_t checksum; /* PB0 */
+ union {
+ struct {
+ /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+ uint8_t Y_A_B_S;
+ /* PB2 - C 7:6, M 5:4, R 3:0 */
+ uint8_t C_M_R;
+ /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+ uint8_t ITC_EC_Q_SC;
+ /* PB4 - VIC 6:0 */
+ uint8_t VIC;
+ /* PB5 - PR 3:0 */
+ uint8_t PR;
+ /* PB6 to PB13 */
+ uint16_t top_bar_end;
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+ } avi;
+ uint8_t payload[27];
+ } __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
@@ -186,16 +227,12 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
-void intel_i2c_reset_gmbus(struct drm_device *dev);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
@@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
-extern bool intel_pch_has_edp(struct drm_crtc *crtc);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
-
+/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern u32 intel_panel_get_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+ return to_intel_connector(connector)->encoder;
+}
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
@@ -252,7 +298,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj);
+ struct drm_gem_object *obj,
+ bool pipelined);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
@@ -267,9 +314,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay,
+ bool interruptible);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7c9ec1472d46..ea373283c93b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
- .gpio = GPIOE,
+ .gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
}
};
@@ -88,7 +88,13 @@ struct intel_dvo {
static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+ return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dvo, base);
}
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
+ intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
struct drm_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
if (mode) {
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
- struct i2c_adapter *i2cbus = NULL;
- int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
}
intel_encoder = &intel_dvo->base;
-
- /* Set up the DDC bus */
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
- if (!intel_encoder->ddc_bus)
- goto free_intel;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type);
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
int gpio;
/* Allow the I2C driver info to specify the GPIO to be used in
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
if (dvo->gpio != 0)
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
- gpio = GPIOB;
+ gpio = GMBUS_PORT_SSC;
else
- gpio = GPIOE;
+ gpio = GMBUS_PORT_DPB;
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
+ i2c = &dev_priv->gmbus[gpio].adapter;
intel_dvo->dev = *dvo;
- ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
- if (!ret)
+ if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
continue;
intel_encoder->type = INTEL_OUTPUT_DVO;
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_encoder_init(dev, &intel_encoder->enc,
- &intel_dvo_enc_funcs, encoder_type);
- drm_encoder_helper_add(&intel_encoder->enc,
+ drm_encoder_helper_add(&intel_encoder->base,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
return;
}
- intel_i2c_destroy(intel_encoder->ddc_bus);
- /* Didn't find a chip, so tear down. */
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
-free_intel:
+ drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b61966c126d3..af2a1dddc28e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,13 +44,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_fbdev {
- struct drm_fb_helper helper;
- struct intel_framebuffer ifb;
- struct list_head fbdev_list;
- struct drm_display_mode *our_mode;
-};
-
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
struct device *device = &dev->pdev->dev;
- int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, fbo);
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
}
- /* Flush everything out, we'll be doing GTT only from now on */
- ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
- if (ret) {
- DRM_ERROR("failed to bind fb: %d.\n", ret);
- goto out_unpin;
- }
-
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- if (IS_I9XX(dev))
+ if (!IS_GEN2(dev))
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intel_fb_find_or_create_single,
};
-int intel_fbdev_destroy(struct drm_device *dev,
- struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
{
struct fb_info *info;
struct intel_framebuffer *ifb = &ifbdev->ifb;
@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
- drm_gem_object_unreference(ifb->obj);
+ drm_gem_object_unreference_unlocked(ifb->obj);
ifb->obj = NULL;
}
-
- return 0;
}
int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 926934a482ec..0d0273e7b029 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -40,12 +40,76 @@
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
+ int ddc_bus;
bool has_hdmi_sink;
+ bool has_audio;
+ int force_audio;
+ struct drm_property *force_audio_property;
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+ return container_of(encoder, struct intel_hdmi, base.base);
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_hdmi, base);
+}
+
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
+{
+ uint8_t *data = (uint8_t *)avi_if;
+ uint8_t sum = 0;
+ unsigned i;
+
+ avi_if->checksum = 0;
+ avi_if->ecc = 0;
+
+ for (i = 0; i < sizeof(*avi_if); i++)
+ sum += data[i];
+
+ avi_if->checksum = 0x100 - sum;
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint32_t *data = (uint32_t *)&avi_if;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 port;
+ unsigned i;
+
+ if (!intel_hdmi->has_hdmi_sink)
+ return;
+
+ /* XXX first guess at handling video port, is this corrent? */
+ if (intel_hdmi->sdvox_reg == SDVOB)
+ port = VIDEO_DIP_PORT_B;
+ else if (intel_hdmi->sdvox_reg == SDVOC)
+ port = VIDEO_DIP_PORT_C;
+ else
+ return;
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
+
+ intel_dip_infoframe_csum(&avi_if);
+ for (i = 0; i < sizeof(avi_if); i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
+ VIDEO_DIP_ENABLE_AVI);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_hdmi->has_hdmi_sink) {
+ /* Required on CPT */
+ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+ sdvox |= HDMI_MODE_SELECT;
+
+ if (intel_hdmi->has_audio) {
sdvox |= SDVO_AUDIO_ENABLE;
- if (HAS_PCH_CPT(dev))
- sdvox |= HDMI_MODE_SELECT;
+ sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
}
if (intel_crtc->pipe == 1) {
@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
+
+ intel_hdmi_set_avi_infoframe(encoder);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct edid *edid = NULL;
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
intel_hdmi->has_hdmi_sink = false;
- edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
+ intel_hdmi->has_audio = false;
+ edid = drm_get_edid(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
}
+ if (status == connector_status_connected) {
+ if (intel_hdmi->force_audio)
+ intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
+ }
+
return status;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
+ return intel_ddc_get_modes(connector,
+ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == intel_hdmi->force_audio_property) {
+ if (val == intel_hdmi->force_audio)
+ return 0;
+
+ intel_hdmi->force_audio = val;
+
+ if (val > 0 && intel_hdmi->has_audio)
+ return 0;
+ if (val < 0 && !intel_hdmi->has_audio)
+ return 0;
+
+ intel_hdmi->has_audio = val > 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_hdmi->base.base.crtc) {
+ struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->fb);
+ }
+
+ return 0;
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_hdmi_set_property,
.destroy = intel_hdmi_destroy,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_encoder_destroy,
};
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ intel_hdmi->force_audio_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+ if (intel_hdmi->force_audio_property) {
+ intel_hdmi->force_audio_property->values[0] = -1;
+ intel_hdmi->force_audio_property->values[1] = 1;
+ drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
+ }
+}
+
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
}
intel_encoder = &intel_hdmi->base;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
- "HDMIB");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
- "HDMIC");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
- "HDMID");
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
- if (!intel_encoder->ddc_bus)
- goto err_connector;
intel_hdmi->sdvox_reg = sdvox_reg;
- drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_hdmi_add_properties(intel_hdmi, connector);
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
-
- return;
-
-err_connector:
- drm_connector_cleanup(connector);
- kfree(intel_hdmi);
- kfree(intel_connector);
-
- return;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2649c7df14c..2be4f728ed0c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
- * Copyright © 2006-2008 Intel Corporation
+ * Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,10 +24,9 @@
*
* Authors:
* Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
*/
#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm.h"
@@ -35,79 +34,106 @@
#include "i915_drm.h"
#include "i915_drv.h"
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_i915_private *dev_priv;
+ u32 reg;
+};
+
+void
+intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_GMBUS0, 0);
+ else
+ I915_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev))
+ if (!IS_PINEVIEW(dev_priv->dev))
return;
+
+ val = I915_READ(DSPCLK_GATE_D);
if (enable)
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
else
- I915_WRITE(DSPCLK_GATE_D,
- I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
}
-/*
- * Intel GPIO access functions
- */
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
-#define I2C_RISEFALL_TIME 20
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(dev) && !IS_845G(dev))
+ reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
static int get_clock(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_CLOCK_VAL_IN) != 0);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE(gpio->reg, reserved);
+ return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
- struct intel_i2c_chan *chan = data;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
- return ((val & GPIO_DATA_VAL_IN) != 0);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE(gpio->reg, reserved);
+ return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, clock_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 clock_bits;
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(chan->reg, reserved | clock_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+
+ I915_WRITE(gpio->reg, reserved | clock_bits);
+ POSTING_READ(gpio->reg);
}
static void set_data(void *data, int state_high)
{
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
- u32 reserved = 0, data_bits;
-
- /* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ struct intel_gpio *gpio = data;
+ struct drm_i915_private *dev_priv = gpio->dev_priv;
+ u32 reserved = get_reserved(gpio);
+ u32 data_bits;
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -115,109 +141,313 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(chan->reg, reserved | data_bits);
- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+ I915_WRITE(gpio->reg, reserved | data_bits);
+ POSTING_READ(gpio->reg);
}
-/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
- * engine, but if the BIOS leaves it enabled, then that can break our use
- * of the bit-banging I2C interfaces. This is notably the case with the
- * Mac Mini in EFI mode.
- */
-void
-intel_i2c_reset_gmbus(struct drm_device *dev)
+static struct i2c_adapter *
+intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ 0,
+ GPIOF,
+ };
+ struct intel_gpio *gpio;
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_GMBUS0, 0);
- } else {
- I915_WRITE(GMBUS0, 0);
+ if (pin < 1 || pin > 7)
+ return NULL;
+
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
+
+ gpio->reg = map_pin_to_reg[pin];
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ gpio->reg += PCH_GPIOA - GPIOA;
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
+ int ret;
+
+ intel_i2c_reset(dev_priv->dev);
+
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ udelay(I2C_RISEFALL_TIME);
+
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ int i, reg_offset;
+
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
+
+ reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+
+ I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+ u8 *buf = msgs[i].buf;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2+reg_offset);
+ do {
+ u32 val, loop = 0;
+
+ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ } while (len);
+ } else {
+ u32 val, loop;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ POSTING_READ(GMBUS2+reg_offset);
+
+ while (len) {
+ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ POSTING_READ(GMBUS2+reg_offset);
+ }
+ }
+
+ if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ goto timeout;
+ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ return 0;
}
+
+ return num;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ bus->reg0 & 0xff, bus->adapter.name);
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
}
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
/**
- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev: DRM device
- * @output: driver specific output device
- * @reg: GPIO reg to use
- * @name: name for this bus
- * @slave_addr: slave address (if fixed)
- *
- * Creates and registers a new i2c bus with the Linux i2c layer, for use
- * in output probing and control (e.g. DDC or SDVO control functions).
- *
- * Possible values for @reg include:
- * %GPIOA
- * %GPIOB
- * %GPIOC
- * %GPIOD
- * %GPIOE
- * %GPIOF
- * %GPIOG
- * %GPIOH
- * see PRM for details on how these different busses are used.
*/
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+int intel_setup_gmbus(struct drm_device *dev)
{
- struct intel_i2c_chan *chan;
+ static const char *names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved"
+ "dpd",
+ };
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
- chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
- if (!chan)
- goto out_free;
+ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ GFP_KERNEL);
+ if (dev_priv->gmbus == NULL)
+ return -ENOMEM;
- chan->drm_dev = dev;
- chan->reg = reg;
- snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
- chan->adapter.owner = THIS_MODULE;
- chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &dev->pdev->dev;
- chan->algo.setsda = set_data;
- chan->algo.setscl = set_clock;
- chan->algo.getsda = get_data;
- chan->algo.getscl = get_clock;
- chan->algo.udelay = 20;
- chan->algo.timeout = usecs_to_jiffies(2200);
- chan->algo.data = chan;
-
- i2c_set_adapdata(&chan->adapter, chan);
-
- if(i2c_bit_add_bus(&chan->adapter))
- goto out_free;
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
- intel_i2c_reset_gmbus(dev);
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ I2C_NAME_SIZE,
+ "gmbus %s",
+ names[i]);
- /* JJJ: raise SCL and SDA? */
- intel_i2c_quirk_set(dev, true);
- set_data(chan, 1);
- set_clock(chan, 1);
- intel_i2c_quirk_set(dev, false);
- udelay(20);
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.algo_data = dev_priv;
- return &chan->adapter;
+ bus->adapter.algo = &gmbus_algorithm;
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
-out_free:
- kfree(chan);
- return NULL;
+ /* By default use a conservative clock rate */
+ bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ bus->force_bit = intel_gpio_create(dev_priv, i);
+ }
+
+ intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+ return ret;
}
-/**
- * intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
- *
- * Unregister the adapter from the i2c layer, then free the structure.
- */
-void intel_i2c_destroy(struct i2c_adapter *adapter)
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_i915_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
{
- struct intel_i2c_chan *chan;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
- if (!adapter)
+ if (dev_priv->gmbus == NULL)
return;
- chan = container_of(adapter,
- struct intel_i2c_chan,
- adapter);
- i2c_del_adapter(&chan->adapter);
- kfree(chan);
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
+ i2c_del_adapter(&bus->adapter);
+ }
+
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6ec39a86ed06..f1a649990ea9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -43,102 +43,76 @@
/* Private structure for the integrated LVDS support */
struct intel_lvds {
struct intel_encoder base;
+
+ struct edid *edid;
+
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
+ bool pfit_dirty;
+
+ struct drm_display_mode *fixed_mode;
};
-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
-}
-
-/**
- * Sets the backlight level.
- *
- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
- */
-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blc_pwm_ctl, reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL;
- else
- reg = BLC_PWM_CTL;
-
- blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(reg, (blc_pwm_ctl |
- (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ return container_of(encoder, struct intel_lvds, base.base);
}
-/**
- * Returns the maximum level of the backlight duty cycle field.
- */
-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_PCH_CTL2;
- else
- reg = BLC_PWM_CTL;
-
- return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+ return container_of(intel_attached_encoder(connector),
+ struct intel_lvds, base);
}
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_set_power(struct drm_device *dev, bool on)
+static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
{
+ struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, status_reg, lvds_reg;
+ u32 ctl_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
- status_reg = PCH_PP_STATUS;
lvds_reg = PCH_LVDS;
} else {
ctl_reg = PP_CONTROL;
- status_reg = PP_STATUS;
lvds_reg = LVDS;
}
if (on) {
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
-
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
- POWER_TARGET_ON);
- if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
- DRM_ERROR("timed out waiting to enable LVDS pipe");
-
- intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
} else {
- intel_lvds_set_backlight(dev, 0);
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+ intel_panel_set_backlight(dev, 0);
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
- ~POWER_TARGET_ON);
- if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
- DRM_ERROR("timed out waiting for LVDS pipe to turn off");
+ if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_control = 0;
+ intel_lvds->pfit_dirty = false;
+ }
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_reg);
}
+ POSTING_READ(lvds_reg);
}
static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
{
- struct drm_device *dev = encoder->dev;
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(dev, true);
+ intel_lvds_set_power(intel_lvds, true);
else
- intel_lvds_set_power(dev, false);
+ intel_lvds_set_power(intel_lvds, false);
/* XXX: We never power down the LVDS pairs. */
}
@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- }
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
return MODE_OK;
}
@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
- if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
return false;
}
@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
}
- /* If we don't have a panel mode, there is nothing we can do */
- if (dev_priv->panel_fixed_mode == NULL)
- return true;
/*
* We have timings from the BIOS for the panel, put them in
@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* Make sure pre-965s set dither correctly */
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
}
@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
goto out;
/* 965+ wants fuzzy fitting */
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
PFIT_FILTER_FUZZY);
@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* Fortunately this is all done for us in hw.
*/
pfit_control |= PFIT_ENABLE;
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ if (pfit_control != intel_lvds->pfit_control ||
+ pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
+ intel_lvds->pfit_control = pfit_control;
+ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ intel_lvds->pfit_dirty = true;
+ }
dev_priv->lvds_border_bits = border;
/*
@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
-
- if (HAS_PCH_SPLIT(dev))
- reg = BLC_PWM_CPU_CTL;
- else
- reg = BLC_PWM_CTL;
-
- dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+ /* We try to do the minimum that is necessary in order to unlock
+ * the registers for mode setting.
+ *
+ * On Ironlake, this is quite simple as we just set the unlock key
+ * and ignore all subtleties. (This may cause some issues...)
+ *
+ * Prior to Ironlake, we must disable the pipe if we want to adjust
+ * the panel fitter. However at all other times we can just reset
+ * the registers regardless.
+ */
- intel_lvds_set_power(dev, false);
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else if (intel_lvds->pfit_dirty) {
+ I915_WRITE(PP_CONTROL,
+ (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
+ & ~POWER_TARGET_ON);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
}
-static void intel_lvds_commit( struct drm_encoder *encoder)
+static void intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
+ if (dev_priv->backlight_level == 0)
+ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+ /* Undo any unlocking done in prepare to prevent accidental
+ * adjustment of the registers.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 val = I915_READ(PCH_PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PCH_PP_CONTROL, val & 0x3);
+ } else {
+ u32 val = I915_READ(PP_CONTROL);
+ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+ I915_WRITE(PP_CONTROL, val & 0x3);
+ }
- intel_lvds_set_power(dev, true);
+ /* Always do a full power on as we do not know what state
+ * we were left in.
+ */
+ intel_lvds_set_power(intel_lvds, true);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
/*
* The LVDS pin pair will already have been turned on in the
@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_SPLIT(dev))
return;
+ if (!intel_lvds->pfit_dirty)
+ return;
+
/*
* Enable automatic panel scaling so that non-native modes fill the
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
}
/**
@@ -465,38 +477,22 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- if (dev_priv->lvds_edid_good) {
- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+ struct drm_display_mode *mode;
- if (ret)
- return ret;
+ if (intel_lvds->edid) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_lvds->edid);
+ return drm_add_edid_modes(connector, intel_lvds->edid);
}
- /* Didn't get an EDID, so
- * Set wide sync ranges so we get all modes
- * handed to valid_mode for checking
- */
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
-
- if (dev_priv->panel_fixed_mode != NULL) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
- drm_mode_probed_add(connector, mode);
-
- return 1;
- }
+ mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ if (mode == 0)
+ return 0;
- return 0;
+ drm_mode_probed_add(connector, mode);
+ return 1;
}
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
@@ -587,18 +583,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
- if (property == dev->mode_config.scaling_mode_property &&
- connector->encoder) {
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct drm_encoder *encoder = connector->encoder;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct drm_crtc *crtc = intel_lvds->base.base.crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
- return 0;
+ return -EINVAL;
}
+
if (intel_lvds->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
@@ -628,7 +623,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -726,16 +721,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
* Find the reduced downclock for LVDS in EDID.
*/
static void intel_find_lvds_downclock(struct drm_device *dev,
- struct drm_connector *connector)
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *scan, *panel_fixed_mode;
+ struct drm_display_mode *scan;
int temp_downclock;
- panel_fixed_mode = dev_priv->panel_fixed_mode;
- temp_downclock = panel_fixed_mode->clock;
-
- mutex_lock(&dev->mode_config.mutex);
+ temp_downclock = fixed_mode->clock;
list_for_each_entry(scan, &connector->probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
@@ -744,14 +737,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
- if (scan->hdisplay == panel_fixed_mode->hdisplay &&
- scan->hsync_start == panel_fixed_mode->hsync_start &&
- scan->hsync_end == panel_fixed_mode->hsync_end &&
- scan->htotal == panel_fixed_mode->htotal &&
- scan->vdisplay == panel_fixed_mode->vdisplay &&
- scan->vsync_start == panel_fixed_mode->vsync_start &&
- scan->vsync_end == panel_fixed_mode->vsync_end &&
- scan->vtotal == panel_fixed_mode->vtotal) {
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
@@ -761,17 +754,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
}
}
}
- mutex_unlock(&dev->mode_config.mutex);
- if (temp_downclock < panel_fixed_mode->clock &&
- i915_lvds_downclock) {
+ if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
- "Normal clock %dKhz, downclock %dKhz\n",
- panel_fixed_mode->clock, temp_downclock);
+ "Normal clock %dKhz, downclock %dKhz\n",
+ fixed_mode->clock, temp_downclock);
}
- return;
}
/*
@@ -780,38 +770,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
* If it is present, return 1.
* If it is not present, return false.
* If no child dev is parsed from VBT, it assumes that the LVDS is present.
- * Note: The addin_offset should also be checked for LVDS panel.
- * Only when it is non-zero, it is assumed that it is present.
*/
-static int lvds_is_present_in_vbt(struct drm_device *dev)
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, ret;
+ int i;
if (!dev_priv->child_dev_num)
- return 1;
+ return true;
- ret = 0;
for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not LFP, continue.
- * If the device type is 0x22, it is also regarded as LFP.
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
*/
- if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
- p_child->device_type != DEVICE_TYPE_LFP)
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
continue;
- /* The addin_offset should be checked. Only when it is
- * non-zero, it is regarded as present.
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
*/
- if (p_child->addin_offset) {
- ret = 1;
- break;
- }
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
}
- return ret;
+
+ return false;
+}
+
+static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 buf = 0;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0xA0,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf,
+ },
+ };
+ struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
+ /* XXX this only appears to work when using GMBUS */
+ if (intel_gmbus_is_forced_bit(i2c))
+ return true;
+ return i2c_transfer(i2c, msgs, 1) == 1;
}
/**
@@ -832,13 +851,15 @@ void intel_lvds_init(struct drm_device *dev)
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
u32 lvds;
- int pipe, gpio = GPIOC;
+ int pipe;
+ u8 pin;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return;
- if (!lvds_is_present_in_vbt(dev)) {
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
@@ -846,11 +867,15 @@ void intel_lvds_init(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
- if (dev_priv->edp_support) {
+ if (dev_priv->edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
- gpio = PCH_GPIOC;
+ }
+
+ if (!intel_lvds_ddc_probe(dev, pin)) {
+ DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
+ return;
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
@@ -864,16 +889,20 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+ }
+
intel_encoder = &intel_lvds->base;
- encoder = &intel_encoder->enc;
+ encoder = &intel_encoder->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
@@ -904,43 +933,41 @@ void intel_lvds_init(struct drm_device *dev)
* if closed, act like it's not there for now
*/
- /* Set up the DDC bus. */
- intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
- if (!intel_encoder->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- goto failed;
- }
-
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- dev_priv->lvds_edid_good = true;
+ intel_lvds->edid = drm_get_edid(connector,
+ &dev_priv->gmbus[pin].adapter);
- if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
- dev_priv->lvds_edid_good = false;
+ if (!intel_lvds->edid) {
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ }
list_for_each_entry(scan, &connector->probed_modes, head) {
- mutex_lock(&dev->mode_config.mutex);
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- dev_priv->panel_fixed_mode =
+ intel_lvds->fixed_mode =
drm_mode_duplicate(dev, scan);
- mutex_unlock(&dev->mode_config.mutex);
- intel_find_lvds_downclock(dev, connector);
+ intel_find_lvds_downclock(dev,
+ intel_lvds->fixed_mode,
+ connector);
goto out;
}
- mutex_unlock(&dev->mode_config.mutex);
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- mutex_lock(&dev->mode_config.mutex);
- dev_priv->panel_fixed_mode =
+ intel_lvds->fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- mutex_unlock(&dev->mode_config.mutex);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out;
}
@@ -958,19 +985,19 @@ void intel_lvds_init(struct drm_device *dev)
lvds = I915_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_from_pipe(dev, pipe);
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (dev_priv->panel_fixed_mode) {
- dev_priv->panel_fixed_mode->type |=
+ intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!dev_priv->panel_fixed_mode)
+ if (!intel_lvds->fixed_mode)
goto failed;
out:
@@ -997,8 +1024,6 @@ out:
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 4b1fd3d9c73c..f70b7cf32bff 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007 Intel Corporation
+ * Copyright (c) 2007, 2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,11 +34,11 @@
* intel_ddc_probe
*
*/
-bool intel_ddc_probe(struct intel_encoder *intel_encoder)
+bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
{
+ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
- int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
- intel_i2c_quirk_set(intel_encoder->enc.dev, true);
- ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_encoder->enc.dev, false);
- if (ret == 2)
- return true;
-
- return false;
+ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
}
/**
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(connector->dev, true);
edid = drm_get_edid(connector, adapter);
- intel_i2c_quirk_set(connector->dev, false);
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ea5d3fea4b61..917c7dc3cd6b 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -31,17 +31,16 @@
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_drv.h"
#define PCI_ASLE 0xe4
-#define PCI_LBPC 0xf4
#define PCI_ASLS 0xfc
-#define OPREGION_SZ (8*1024)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
-#define OPREGION_VBT_OFFSET 0x1000
+#define OPREGION_VBT_OFFSET 0x400
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
@@ -143,40 +142,22 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#ifdef CONFIG_ACPI
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 blc_pwm_ctl, blc_pwm_ctl2;
- u32 max_backlight, level, shift;
+ u32 max;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
+ if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
- blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
-
- if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
- pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
- else {
- if (IS_PINEVIEW(dev)) {
- blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
- max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
- } else {
- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
- shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
- }
- level = (bclp * max_backlight) / 255;
- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
- }
+ max = intel_panel_get_max_backlight(dev);
+ intel_panel_set_backlight(dev, bclp * max / 255);
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
@@ -211,7 +192,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
return 0;
}
-void opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -243,37 +224,8 @@ void opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
-static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 cpu_pwm_ctl, pch_pwm_ctl2;
- u32 max_backlight, level;
-
- if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
-
- bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
-
- cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
- pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
- /* get the max PWM frequency */
- max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
- /* calculate the expected PMW frequency */
- level = (bclp * max_backlight) / 255;
- /* reserve the high 16 bits */
- cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
- /* write the updated PWM frequency */
- I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
-
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
- return 0;
-}
-
-void ironlake_opregion_gse_intr(struct drm_device *dev)
+/* Only present on Ironlake+ */
+void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -296,7 +248,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -315,7 +267,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
-void opregion_enable_asle(struct drm_device *dev)
+void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -464,7 +416,58 @@ blind_set:
goto end;
}
-int intel_opregion_init(struct drm_device *dev, int resume)
+void intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_didl_outputs(dev);
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+ * We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ if (opregion->asle)
+ intel_opregion_enable_asle(dev);
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+#endif
+
+int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -479,29 +482,23 @@ int intel_opregion_init(struct drm_device *dev, int resume)
return -ENOTSUPP;
}
- base = ioremap(asls, OPREGION_SZ);
+ base = ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
- opregion->header = base;
- if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_didl_outputs(dev);
- } else {
- DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
- err = -ENOTSUPP;
- goto err_out;
}
- opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
@@ -510,53 +507,11 @@ int intel_opregion_init(struct drm_device *dev, int resume)
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
- opregion_enable_asle(dev);
}
- if (!resume)
- acpi_video_register();
-
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
-
return 0;
err_out:
iounmap(opregion->header);
- opregion->header = NULL;
- acpi_video_register();
return err;
}
-
-void intel_opregion_free(struct drm_device *dev, int suspend)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->enabled)
- return;
-
- if (!suspend)
- acpi_video_unregister();
-
- opregion->acpi->drdy = 0;
-
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
-
- /* just clear all opregion memory pointers now */
- iounmap(opregion->header);
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
-
- opregion->enabled = 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1d306a458be6..afb96d25219a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -170,57 +170,143 @@ struct overlay_registers {
u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
};
-/* overlay flip addr flag */
-#define OFC_UPDATE 0x1
-
-#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
-
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ /* flip handling */
+ uint32_t last_flip_req;
+ void (*flip_tail)(struct intel_overlay *);
+};
-static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+static struct overlay_registers *
+intel_overlay_map_regs(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
struct overlay_registers *regs;
- /* no recursive mappings */
- BUG_ON(overlay->virt_addr);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
- if (OVERLAY_NONPHYSICAL(overlay->dev)) {
- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset,
- KM_USER0);
+ return regs;
+}
- if (!regs) {
- DRM_ERROR("failed to map overlay regs in GTT\n");
- return NULL;
- }
- } else
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap(regs);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ struct drm_i915_gem_request *request,
+ bool interruptible,
+ void (*tail)(struct intel_overlay *))
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
- return overlay->virt_addr = regs;
+ BUG_ON(overlay->last_flip_req);
+ overlay->last_flip_req =
+ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ overlay->flip_tail = tail;
+ ret = i915_do_wait_request(dev,
+ overlay->last_flip_req, true,
+ &dev_priv->render_ring);
+ if (ret)
+ return ret;
+
+ overlay->last_flip_req = 0;
+ return 0;
}
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+/* Workaround for i830 bug where pipe a must be enable to change control regs */
+static int
+i830_activate_pipe_a(struct drm_device *dev)
{
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_display_mode vesa_640x480 = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ }, *mode;
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
+ if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
+ return 0;
- overlay->virt_addr = NULL;
+ /* most i8xx have pipe a forced on, so don't trust dpms mode */
+ if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+ return 0;
- return;
+ crtc_funcs = crtc->base.helper_private;
+ if (crtc_funcs->dpms == NULL)
+ return 0;
+
+ DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
+
+ mode = drm_mode_duplicate(dev, &vesa_640x480);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ if(!drm_crtc_helper_set_mode(&crtc->base, mode,
+ crtc->base.x, crtc->base.y,
+ crtc->base.fb))
+ return 0;
+
+ crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
+ return 1;
+}
+
+static void
+i830_deactivate_pipe_a(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_gem_request *request;
+ int pipe_a_quirk = 0;
int ret;
- drm_i915_private_t *dev_priv = dev->dev_private;
BUG_ON(overlay->active);
-
overlay->active = 1;
- overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+ if (IS_I830(dev)) {
+ pipe_a_quirk = i830_activate_pipe_a(dev);
+ if (pipe_a_quirk < 0)
+ return pipe_a_quirk;
+ }
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev,
- overlay->last_flip_req, 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
+ ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+out:
+ if (pipe_a_quirk)
+ i830_deactivate_pipe_a(dev);
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ return ret;
}
/* overlay needs to be enabled in OCMD reg */
-static void intel_overlay_continue(struct intel_overlay *overlay,
- bool load_polyphase_filter)
+static int intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
BUG_ON(!overlay->active);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
ADVANCE_LP_RING();
overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ return 0;
}
-static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- u32 tmp;
-
- if (overlay->last_flip_req != 0) {
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret == 0) {
- overlay->last_flip_req = 0;
-
- tmp = I915_READ(ISR);
+ struct drm_gem_object *obj = &overlay->old_vid_bo->base;
- if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
- return 0;
- }
- }
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
- /* synchronous slowpath */
- overlay->hw_wedged = RELEASE_OLD_VID;
+ overlay->old_vid_bo = NULL;
+}
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_gem_object *obj;
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+ obj = &overlay->vid_bo->base;
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->vid_bo = NULL;
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return 0;
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
}
/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay)
+static int intel_overlay_off(struct intel_overlay *overlay,
+ bool interruptible)
{
- u32 flip_addr = overlay->flip_addr;
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ u32 flip_addr = overlay->flip_addr;
+ struct drm_i915_gem_request *request;
BUG_ON(!overlay->active);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
* this applies to the disabling of the overlay or to the switching off
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
+ BEGIN_LP_RING(6);
/* wait for overlay to go idle */
- overlay->hw_wedged = SWITCH_OFF_STAGE_1;
-
- BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- 1, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
- overlay->hw_wedged = 0;
- overlay->last_flip_req = 0;
- return ret;
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
- struct drm_gem_object *obj;
-
- /* never have the overlay hw on without showing a frame */
- BUG_ON(!overlay->vid_bo);
- obj = &overlay->vid_bo->base;
-
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->vid_bo = NULL;
-
- overlay->crtc->overlay = NULL;
- overlay->crtc = NULL;
- overlay->active = 0;
+ return intel_overlay_do_wait_request(overlay, request, interruptible,
+ intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
-int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- int interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ bool interruptible)
{
struct drm_device *dev = overlay->dev;
- struct drm_gem_object *obj;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 flip_addr;
int ret;
- if (overlay->hw_wedged == HW_WEDGED)
- return -EIO;
-
- if (overlay->last_flip_req == 0) {
- overlay->last_flip_req =
- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
- }
+ if (overlay->last_flip_req == 0)
+ return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
- if (ret != 0)
+ interruptible, &dev_priv->render_ring);
+ if (ret)
return ret;
- switch (overlay->hw_wedged) {
- case RELEASE_OLD_VID:
- obj = &overlay->old_vid_bo->base;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
- break;
- case SWITCH_OFF_STAGE_1:
- flip_addr = overlay->flip_addr;
- flip_addr |= OFC_UPDATE;
-
- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
-
- overlay->last_flip_req = i915_add_request(dev, NULL,
- 0, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
- if (ret != 0)
- return ret;
-
- case SWITCH_OFF_STAGE_2:
- intel_overlay_off_tail(overlay);
- break;
- default:
- BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
- }
+ if (overlay->flip_tail)
+ overlay->flip_tail(overlay);
- overlay->hw_wedged = 0;
overlay->last_flip_req = 0;
return 0;
}
/* Wait for pending overlay flip and release old frame.
* Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs_atomic */
+ * via intel_overlay_(un)map_regs
+ */
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- struct drm_gem_object *obj;
- /* only wait if there is actually an old frame to release to
- * guarantee forward progress */
+ /* Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
if (!overlay->old_vid_bo)
return 0;
- ret = intel_overlay_wait_flip(overlay);
- if (ret != 0)
- return ret;
+ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ struct drm_i915_gem_request *request;
- obj = &overlay->old_vid_bo->base;
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- overlay->old_vid_bo = NULL;
+ /* synchronous slowpath */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ ret = intel_overlay_do_wait_request(overlay, request, true,
+ intel_overlay_release_old_vid_tail);
+ if (ret)
+ return ret;
+ }
+
+ intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -506,65 +502,65 @@ struct put_image_params {
static int packed_depth_bytes(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return 4;
- case I915_OVERLAY_YUV411:
- /* return 6; not implemented */
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
}
}
static int packed_width_bytes(u32 format, short width)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- return width << 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
}
}
static int uv_hsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV420:
- return 2;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- return 4;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
}
}
static int uv_vsubsampling(u32 format)
{
switch (format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV420:
- case I915_OVERLAY_YUV410:
- return 2;
- case I915_OVERLAY_YUV422:
- case I915_OVERLAY_YUV411:
- return 1;
- default:
- return -EINVAL;
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
}
}
static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
{
u32 mask, shift, ret;
- if (IS_I9XX(dev)) {
- mask = 0x3f;
- shift = 6;
- } else {
+ if (IS_GEN2(dev)) {
mask = 0x1f;
shift = 5;
+ } else {
+ mask = 0x3f;
+ shift = 6;
}
ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (IS_I9XX(dev))
+ if (!IS_GEN2(dev))
ret <<= 1;
ret -=1;
return ret << 2;
@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
- 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
- 0x3000, 0x0800, 0x3000};
+ 0x3000, 0x0800, 0x3000
+};
static void update_polyphase_filter(struct overlay_registers *regs)
{
@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
yscale = 1 << FP_SHIFT;
/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
- xscale_UV = xscale/uv_hscale;
- yscale_UV = yscale/uv_vscale;
- /* make the Y scale to UV scale ratio an exact multiply */
- xscale = xscale_UV * uv_hscale;
- yscale = yscale_UV * uv_vscale;
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
/*} else {
- xscale_UV = 0;
- yscale_UV = 0;
- }*/
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
scale_changed = true;
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
- | ((xscale >> FP_SHIFT) << 16)
- | ((xscale & FRACT_MASK) << 3);
- regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
- | ((xscale_UV >> FP_SHIFT) << 16)
- | ((xscale_UV & FRACT_MASK) << 3);
- regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
- | ((yscale_UV >> FP_SHIFT) << 0);
+ regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3));
+
+ regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3));
+
+ regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)));
if (scale_changed)
update_polyphase_filter(regs);
@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
struct overlay_registers *regs)
{
u32 key = overlay->color_key;
+
switch (overlay->crtc->base.fb->bits_per_pixel) {
- case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
- case 16:
- if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
- } else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
- }
- case 24:
- case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ break;
+
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ break;
+
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ break;
}
}
@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
if (params->format & I915_OVERLAY_YUV_PLANAR) {
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PLANAR;
- break;
- case I915_OVERLAY_YUV420:
- cmd |= OCMD_YUV_420_PLANAR;
- break;
- case I915_OVERLAY_YUV411:
- case I915_OVERLAY_YUV410:
- cmd |= OCMD_YUV_410_PLANAR;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
}
} else { /* YUV packed */
switch (params->format & I915_OVERLAY_DEPTH_MASK) {
- case I915_OVERLAY_YUV422:
- cmd |= OCMD_YUV_422_PACKED;
- break;
- case I915_OVERLAY_YUV411:
- cmd |= OCMD_YUV_411_PACKED;
- break;
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
}
switch (params->format & I915_OVERLAY_SWAP_MASK) {
- case I915_OVERLAY_NO_SWAP:
- break;
- case I915_OVERLAY_UV_SWAP:
- cmd |= OCMD_UV_SWAP;
- break;
- case I915_OVERLAY_Y_SWAP:
- cmd |= OCMD_Y_SWAP;
- break;
- case I915_OVERLAY_Y_AND_UV_SWAP:
- cmd |= OCMD_Y_AND_UV_SWAP;
- break;
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
}
}
return cmd;
}
-int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
- struct put_image_params *params)
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_gem_object *new_bo,
+ struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers *regs;
@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
regs->OCONFIG = OCONF_CC_OUT_8BIT;
- if (IS_I965GM(overlay->dev))
+ if (IS_GEN4(overlay->dev))
regs->OCONFIG |= OCONF_CSC_MODE_BT709;
regs->OCONFIG |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
if (ret != 0)
goto out_unpin;
}
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTH = params->src_w;
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
+ params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
u32 tmp_U, tmp_V;
regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
- params->src_w/uv_hscale);
+ params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
- params->src_w/uv_hscale);
+ params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->OCMD = overlay_cmd_reg(params);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
- intel_overlay_continue(overlay, scale_changed);
+ ret = intel_overlay_continue(overlay, scale_changed);
+ if (ret)
+ goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = to_intel_bo(new_bo);
@@ -829,20 +838,19 @@ out_unpin:
return ret;
}
-int intel_overlay_switch_off(struct intel_overlay *overlay)
+int intel_overlay_switch_off(struct intel_overlay *overlay,
+ bool interruptible)
{
- int ret;
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
+ int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- return ret;
- }
+ ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+ if (ret != 0)
+ return ret;
if (!overlay->active)
return 0;
@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
regs->OCMD = 0;
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_off(overlay);
+ ret = intel_overlay_off(overlay, interruptible);
if (ret != 0)
return ret;
intel_overlay_off_tail(overlay);
-
return 0;
}
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
struct intel_crtc *crtc)
{
- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- u32 pipeconf;
- int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+ if (!crtc->active)
return -EINVAL;
- pipeconf = I915_READ(pipeconf_reg);
-
/* can't use the overlay with double wide pipe */
- if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+ if (INTEL_INFO(overlay->dev)->gen < 4 &&
+ (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
return -EINVAL;
return 0;
@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 ratio;
+ drm_i915_private_t *dev_priv = dev->dev_private;
u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
- * line with the intel documentation for the i965 */
- if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
- ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
- } else { /* on i965 use the PGM reg to read out the autoscaler values */
- ratio = I915_READ(PFIT_PGM_RATIOS);
- if (IS_I965G(dev))
- ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+ * line with the intel documentation for the i965
+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = I915_READ(PFIT_AUTO_RATIOS);
else
- ratio >>= PFIT_VERT_SCALE_SHIFT;
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
}
overlay->pfit_vscale_ratio = ratio;
@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
- if ((rec->dst_x < mode->crtc_hdisplay)
- && (rec->dst_x + rec->dst_width
- <= mode->crtc_hdisplay)
- && (rec->dst_y < mode->crtc_vdisplay)
- && (rec->dst_y + rec->dst_height
- <= mode->crtc_vdisplay))
+ if (rec->dst_x < mode->crtc_hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
+ rec->dst_y < mode->crtc_vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
return 0;
else
return -EINVAL;
@@ -940,53 +944,57 @@ static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
struct drm_gem_object *new_bo)
{
- u32 stride_mask;
- int depth;
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
- size_t tmp;
+ u32 stride_mask, depth, tmp;
/* check src dimensions */
if (IS_845G(dev) || IS_I830(dev)) {
- if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
- || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
} else {
- if (rec->src_height > IMAGE_MAX_HEIGHT
- || rec->src_width > IMAGE_MAX_WIDTH)
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
return -EINVAL;
}
+
/* better safe than sorry, use 4 as the maximal subsampling ratio */
- if (rec->src_height < N_VERT_Y_TAPS*4
- || rec->src_width < N_HORIZ_Y_TAPS*4)
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
return -EINVAL;
/* check alignment constraints */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- /* not implemented */
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
return -EINVAL;
- case I915_OVERLAY_YUV_PACKED:
- depth = packed_depth_bytes(rec->flags);
- if (uv_vscale != 1)
- return -EINVAL;
- if (depth < 0)
- return depth;
- /* ignore UV planes */
- rec->stride_UV = 0;
- rec->offset_U = 0;
- rec->offset_V = 0;
- /* check pixel alignment */
- if (rec->offset_Y % depth)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (uv_vscale < 0 || uv_hscale < 0)
- return -EINVAL;
- /* no offset restrictions for planar formats */
- break;
- default:
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
}
if (rec->src_width % uv_hscale)
@@ -1000,47 +1008,74 @@ static int check_overlay_src(struct drm_device *dev,
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_I965G(dev) && rec->stride_Y < 512)
+ if (IS_GEN4(dev) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
- 4 : 8;
- if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
return -EINVAL;
/* check buffer dimensions */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
- case I915_OVERLAY_RGB:
- case I915_OVERLAY_YUV_PACKED:
- /* always 4 Y values per depth pixels */
- if (packed_width_bytes(rec->flags, rec->src_width)
- > rec->stride_Y)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- break;
- case I915_OVERLAY_YUV_PLANAR:
- if (rec->src_width > rec->stride_Y)
- return -EINVAL;
- if (rec->src_width/uv_hscale > rec->stride_UV)
- return -EINVAL;
-
- tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
- return -EINVAL;
- tmp = rec->stride_UV*rec->src_height;
- tmp /= uv_vscale;
- if (rec->offset_U + tmp > new_bo->size
- || rec->offset_V + tmp > new_bo->size)
- return -EINVAL;
- break;
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->size ||
+ rec->offset_V + tmp > new_bo->size)
+ return -EINVAL;
+ break;
}
return 0;
}
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (IS_I830(dev))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_GEN4(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1068,7 +1103,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- ret = intel_overlay_switch_off(overlay);
+ ret = intel_overlay_switch_off(overlay, true);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1081,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
return -ENOMEM;
drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
+ DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
ret = -ENOENT;
goto out_free;
@@ -1089,7 +1124,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
+ put_image_rec->bo_handle);
if (!new_bo) {
ret = -ENOENT;
goto out_free;
@@ -1098,15 +1133,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- if (overlay->hw_wedged) {
- ret = intel_overlay_recover_from_interrupt(overlay, 1);
- if (ret != 0)
- goto out_unlock;
- }
+ ret = intel_overlay_recover_from_interrupt(overlay, true);
+ if (ret != 0)
+ goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
- ret = intel_overlay_switch_off(overlay);
+ ret = intel_overlay_switch_off(overlay, true);
if (ret != 0)
goto out_unlock;
@@ -1117,9 +1150,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
overlay->crtc = crtc;
crtc->overlay = overlay;
- if (intel_panel_fitter_pipe(dev) == crtc->pipe
- /* and line to wide, i.e. one-line-mode */
- && mode->hdisplay > 1024) {
+ /* line too wide, i.e. one-line-mode */
+ if (mode->hdisplay > 1024 &&
+ intel_panel_fitter_pipe(dev) == crtc->pipe) {
overlay->pfit_active = 1;
update_pfit_vscale_ratio(overlay);
} else
@@ -1132,10 +1165,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (overlay->pfit_active) {
params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
- overlay->pfit_vscale_ratio);
+ overlay->pfit_vscale_ratio);
/* shifting right rounds downwards, so add 1 */
params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
- overlay->pfit_vscale_ratio) + 1;
+ overlay->pfit_vscale_ratio) + 1;
} else {
params->dst_y = put_image_rec->dst_y;
params->dst_h = put_image_rec->dst_height;
@@ -1147,8 +1180,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
params->src_h = put_image_rec->src_height;
params->src_scan_w = put_image_rec->src_scan_width;
params->src_scan_h = put_image_rec->src_scan_height;
- if (params->src_scan_h > params->src_h
- || params->src_scan_w > params->src_w) {
+ if (params->src_scan_h > params->src_h ||
+ params->src_scan_w > params->src_w) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1204,7 +1237,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
return false;
for (i = 0; i < 3; i++) {
- if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
return false;
}
@@ -1225,16 +1258,18 @@ static bool check_gamma5_errata(u32 gamma5)
static int check_gamma(struct drm_intel_overlay_attrs *attrs)
{
- if (!check_gamma_bounds(0, attrs->gamma0)
- || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
- || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
- || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
- || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
- || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
- || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
return -EINVAL;
+
if (!check_gamma5_errata(attrs->gamma5))
return -EINVAL;
+
return 0;
}
@@ -1261,13 +1296,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
- attrs->color_key = overlay->color_key;
+ attrs->color_key = overlay->color_key;
attrs->brightness = overlay->brightness;
- attrs->contrast = overlay->contrast;
+ attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (IS_I9XX(dev)) {
+ if (!IS_GEN2(dev)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1275,29 +1311,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->gamma4 = I915_READ(OGAMC4);
attrs->gamma5 = I915_READ(OGAMC5);
}
- ret = 0;
} else {
- overlay->color_key = attrs->color_key;
- if (attrs->brightness >= -128 && attrs->brightness <= 127) {
- overlay->brightness = attrs->brightness;
- } else {
- ret = -EINVAL;
+ if (attrs->brightness < -128 || attrs->brightness > 127)
goto out_unlock;
- }
- if (attrs->contrast <= 255) {
- overlay->contrast = attrs->contrast;
- } else {
- ret = -EINVAL;
+ if (attrs->contrast > 255)
goto out_unlock;
- }
- if (attrs->saturation <= 1023) {
- overlay->saturation = attrs->saturation;
- } else {
- ret = -EINVAL;
+ if (attrs->saturation > 1023)
goto out_unlock;
- }
- regs = intel_overlay_map_regs_atomic(overlay);
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unlock;
@@ -1305,13 +1332,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (!IS_I9XX(dev)) {
- ret = -EINVAL;
+ if (IS_GEN2(dev))
goto out_unlock;
- }
if (overlay->active) {
ret = -EBUSY;
@@ -1319,7 +1344,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
}
ret = check_gamma(attrs);
- if (ret != 0)
+ if (ret)
goto out_unlock;
I915_WRITE(OGAMC0, attrs->gamma0);
@@ -1329,9 +1354,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
I915_WRITE(OGAMC4, attrs->gamma4);
I915_WRITE(OGAMC5, attrs->gamma5);
}
- ret = 0;
}
+ ret = 0;
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1347,7 +1372,7 @@ void intel_setup_overlay(struct drm_device *dev)
struct overlay_registers *regs;
int ret;
- if (!OVERLAY_EXISTS(dev))
+ if (!HAS_OVERLAY(dev))
return;
overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
@@ -1360,22 +1385,28 @@ void intel_setup_overlay(struct drm_device *dev)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);
- if (OVERLAY_NONPHYSICAL(dev)) {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to pin overlay register bo\n");
- goto out_free_bo;
- }
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
- } else {
+ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
- 0);
+ PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ } else {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->gtt_offset;
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
}
/* init all values */
@@ -1384,21 +1415,22 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->contrast = 75;
overlay->saturation = 146;
- regs = intel_overlay_map_regs_atomic(overlay);
+ regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_free_bo;
memset(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
-
update_reg_attrs(overlay, regs);
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
DRM_INFO("initialized overlay support\n");
return;
+out_unpin_bo:
+ i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(reg_bo);
out_free:
@@ -1408,18 +1440,23 @@ out_free:
void intel_cleanup_overlay(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->overlay) {
- /* The bo's should be free'd by the generic code already.
- * Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already */
- BUG_ON(dev_priv->overlay->active);
+ if (!dev_priv->overlay)
+ return;
- kfree(dev_priv->overlay);
- }
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ kfree(dev_priv->overlay);
}
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1427,6 +1464,29 @@ struct intel_overlay_error_state {
u32 isr;
};
+static struct overlay_registers *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
+
+ return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(regs);
+}
+
+
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_device *dev)
{
@@ -1444,17 +1504,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->gtt_offset;
- else
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ error->base = (long) overlay->reg_bo->gtt_offset;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
goto err;
memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
- intel_overlay_unmap_regs_atomic(overlay);
+ intel_overlay_unmap_regs_atomic(overlay, regs);
return error;
@@ -1515,3 +1575,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
P(UVSCALEV);
#undef P
}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e7f5299d9d57..92ff8f385278 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,6 +30,8 @@
#include "intel_drv.h"
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
void
intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -109,3 +111,110 @@ done:
dev_priv->pch_pf_pos = (x << 16) | y;
dev_priv->pch_pf_size = (width << 16) | height;
}
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+ if (IS_GEN2(dev))
+ return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+ return 0;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 max;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+ } else {
+ max = I915_READ(BLC_PWM_CTL);
+ if (IS_PINEVIEW(dev)) {
+ max >>= 17;
+ } else {
+ max >>= 16;
+ if (INTEL_INFO(dev)->gen < 4)
+ max &= ~1;
+ }
+
+ if (is_backlight_combination_mode(dev))
+ max *= 0xff;
+ }
+
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ DRM_ERROR("fixme: max PWM is zero.\n");
+ max = 1;
+ }
+
+ DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+ return max;
+}
+
+u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (IS_PINEVIEW(dev))
+ val >>= 1;
+
+ if (is_backlight_combination_mode(dev)){
+ u8 lbpc;
+
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+ val *= lbpc;
+ val >>= 1;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_pch_panel_set_backlight(dev, level);
+
+ if (is_backlight_combination_mode(dev)){
+ u32 max = intel_panel_get_max_backlight(dev);
+ u8 lpbc;
+
+ lpbc = level * 0xfe / max + 1;
+ level /= lpbc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL);
+ if (IS_PINEVIEW(dev)) {
+ tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ level <<= 1;
+ } else
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cb3508f78bc3..09f2dc353ae2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_drm.h"
#include "i915_trace.h"
+#include "intel_drv.h"
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
static void
render_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
if ((invalidate_domains|flush_domains) &
I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen < 4) {
/*
* On the 965, the sampler cache always gets flushed
* and this bit is reserved.
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
}
}
-static unsigned int render_ring_get_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(PRB0_HEAD) & HEAD_ADDR;
-}
-
-static unsigned int render_ring_get_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void ring_write_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ I915_WRITE_TAIL(ring, value);
}
-static unsigned int render_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+ u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+ RING_ACTHD(ring->mmio_base) : ACTHD;
return I915_READ(acthd_reg);
}
-static void render_ring_advance_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(PRB0_TAIL, ring->tail);
-}
-
static int init_ring_common(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
u32 head;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
obj_priv = to_intel_bo(ring->gem_object);
/* Stop the ring if it's running. */
- I915_WRITE(ring->regs.ctl, 0);
- I915_WRITE(ring->regs.head, 0);
- I915_WRITE(ring->regs.tail, 0);
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(dev, ring, 0);
/* Initialize the ring. */
- I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
- head = ring->get_head(dev, ring);
+ I915_WRITE_START(ring, obj_priv->gtt_offset);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
DRM_ERROR("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
- I915_WRITE(ring->regs.head, 0);
+ I915_WRITE_HEAD(ring, 0);
DRM_ERROR("%s head forced to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
}
- I915_WRITE(ring->regs.ctl,
+ I915_WRITE_CTL(ring,
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_NO_REPORT | RING_VALID);
- head = I915_READ(ring->regs.head) & HEAD_ADDR;
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
if (head != 0) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
- I915_READ(ring->regs.ctl),
- I915_READ(ring->regs.head),
- I915_READ(ring->regs.tail),
- I915_READ(ring->regs.start));
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
return -EIO;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
- ring->head = ring->get_head(dev, ring);
- ring->tail = ring->get_tail(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
}
static int init_render_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = init_ring_common(dev, ring);
int mode;
- if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ if (INTEL_INFO(dev)->gen > 3) {
mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
@@ -250,9 +239,8 @@ do { \
*/
static u32
render_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
- u32 flush_domains)
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
}
static u32
-render_ring_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+render_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (HAS_PIPE_CONTROL(dev))
@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
static void
render_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
static void
render_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
-static void render_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (IS_GEN6(dev)) {
- I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
- I915_READ(HWS_PGA_GEN6); /* posting read */
+ I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
+ ring->status_page.gfx_addr);
+ I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
} else {
- I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
+ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+ ring->status_page.gfx_addr);
+ I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
}
}
-void
+static void
bsd_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return I915_READ(BSD_RING_ACTHD);
-}
-
-static inline void bsd_ring_advance_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(BSD_RING_TAIL, ring->tail);
-}
-
static int init_bsd_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
return init_ring_common(dev, ring);
}
static u32
-bsd_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
- u32 flush_domains)
+ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
{
u32 seqno;
@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
return seqno;
}
-static void bsd_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
- I915_READ(BSD_HWS_PGA);
-}
-
static void
bsd_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
/* do nothing */
}
static void
bsd_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
/* do nothing */
}
static u32
-bsd_ring_get_gem_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_status_page_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static int
-bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
{
uint32_t exec_start;
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
@@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
return 0;
}
-
static int
render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = exec->num_cliprects;
@@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_emit(dev, ring, exec_start + exec_len - 4);
intel_ring_emit(dev, ring, 0);
} else {
- intel_ring_begin(dev, ring, 4);
- if (IS_I965G(dev)) {
+ intel_ring_begin(dev, ring, 2);
+ if (INTEL_INFO(dev)->gen >= 4) {
intel_ring_emit(dev, ring,
MI_BATCH_BUFFER_START | (2 << 6)
| MI_BATCH_NON_SECURE_I965);
@@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_GEN5(dev)) {
intel_ring_begin(dev, ring, 2);
intel_ring_emit(dev, ring, MI_FLUSH |
MI_NO_WRITE_FLUSH |
@@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
}
static void cleanup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
@@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev,
}
static int init_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
@@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev,
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- ring->setup_status_page(dev, ring);
+ intel_ring_setup_status_page(dev, ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -617,15 +569,18 @@ err:
return ret;
}
-
int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
int ret;
ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(dev, ring);
@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
ring->gem_object = obj;
- ret = i915_gem_object_pin(obj, ring->alignment);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE);
if (ret)
goto err_unref;
@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
else {
- ring->head = ring->get_head(dev, ring);
- ring->tail = ring->get_tail(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
}
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
return ret;
err_unmap:
@@ -691,7 +644,7 @@ err_hws:
}
void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
if (ring->gem_object == NULL)
return;
@@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
cleanup_status_page(dev, ring);
}
-int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
unsigned int *virt;
int rem;
@@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n)
+ struct intel_ring_buffer *ring, int n)
{
unsigned long end;
+ drm_i915_private_t *dev_priv = dev->dev_private;
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- ring->head = ring->get_head(dev, ring);
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -753,14 +707,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
}
- yield();
+ msleep(1);
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
}
void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int num_dwords)
+ struct intel_ring_buffer *ring,
+ int num_dwords)
{
int n = 4*num_dwords;
if (unlikely(ring->tail + n > ring->size))
@@ -772,97 +727,181 @@ void intel_ring_begin(struct drm_device *dev,
}
void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
- ring->advance_ring(dev, ring);
-}
-
-void intel_fill_struct(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- void *data,
- unsigned int len)
-{
- unsigned int *virt = ring->virtual_start + ring->tail;
- BUG_ON((len&~(4-1)) != 0);
- intel_ring_begin(dev, ring, len/4);
- memcpy(virt, data, len);
- ring->tail += len;
- ring->tail &= ring->size - 1;
- ring->space -= len;
- intel_ring_advance(dev, ring);
+ ring->write_tail(dev, ring, ring->tail);
}
-struct intel_ring_buffer render_ring = {
+static const struct intel_ring_buffer render_ring = {
.name = "render ring",
- .regs = {
- .ctl = PRB0_CTL,
- .head = PRB0_HEAD,
- .tail = PRB0_TAIL,
- .start = PRB0_START
- },
- .ring_flag = I915_EXEC_RENDER,
+ .id = RING_RENDER,
+ .mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
- .alignment = PAGE_SIZE,
- .virtual_start = NULL,
- .dev = NULL,
- .gem_object = NULL,
- .head = 0,
- .tail = 0,
- .space = 0,
- .user_irq_refcount = 0,
- .irq_gem_seqno = 0,
- .waiting_gem_seqno = 0,
- .setup_status_page = render_setup_status_page,
.init = init_render_ring,
- .get_head = render_ring_get_head,
- .get_tail = render_ring_get_tail,
- .get_active_head = render_ring_get_active_head,
- .advance_ring = render_ring_advance_ring,
+ .write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
- .get_gem_seqno = render_ring_get_gem_seqno,
+ .get_seqno = render_ring_get_seqno,
.user_irq_get = render_ring_get_user_irq,
.user_irq_put = render_ring_put_user_irq,
.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
- .status_page = {NULL, 0, NULL},
- .map = {0,}
};
/* ring buffer for bit-stream decoder */
-struct intel_ring_buffer bsd_ring = {
+static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
- .regs = {
- .ctl = BSD_RING_CTL,
- .head = BSD_RING_HEAD,
- .tail = BSD_RING_TAIL,
- .start = BSD_RING_START
- },
- .ring_flag = I915_EXEC_BSD,
+ .id = RING_BSD,
+ .mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
- .alignment = PAGE_SIZE,
- .virtual_start = NULL,
- .dev = NULL,
- .gem_object = NULL,
- .head = 0,
- .tail = 0,
- .space = 0,
- .user_irq_refcount = 0,
- .irq_gem_seqno = 0,
- .waiting_gem_seqno = 0,
- .setup_status_page = bsd_setup_status_page,
.init = init_bsd_ring,
- .get_head = bsd_ring_get_head,
- .get_tail = bsd_ring_get_tail,
- .get_active_head = bsd_ring_get_active_head,
- .advance_ring = bsd_ring_advance_ring,
+ .write_tail = ring_write_tail,
.flush = bsd_ring_flush,
- .add_request = bsd_ring_add_request,
- .get_gem_seqno = bsd_ring_get_gem_seqno,
+ .add_request = ring_add_request,
+ .get_seqno = ring_status_page_get_seqno,
.user_irq_get = bsd_ring_get_user_irq,
.user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
- .status_page = {NULL, 0, NULL},
- .map = {0,}
+ .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
};
+
+
+static void gen6_bsd_ring_write_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Every tail move must follow the sequence below */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+ I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+ I915_WRITE_TAIL(ring, value);
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+}
+
+static void gen6_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_FLUSH_DW);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_advance(dev, ring);
+}
+
+static int
+gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(dev, ring, exec_start);
+ intel_ring_advance(dev, ring);
+
+ return 0;
+}
+
+/* ring buffer for Video Codec for Gen6+ */
+static const struct intel_ring_buffer gen6_bsd_ring = {
+ .name = "gen6 bsd ring",
+ .id = RING_BSD,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_bsd_ring,
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = ring_add_request,
+ .get_seqno = ring_status_page_get_seqno,
+ .user_irq_get = bsd_ring_get_user_irq,
+ .user_irq_put = bsd_ring_put_user_irq,
+ .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+};
+
+/* Blitter support (SandyBridge+) */
+
+static void
+blt_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+static void
+blt_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+
+static const struct intel_ring_buffer gen6_blt_ring = {
+ .name = "blt ring",
+ .id = RING_BLT,
+ .mmio_base = BLT_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = ring_add_request,
+ .get_seqno = ring_status_page_get_seqno,
+ .user_irq_get = blt_ring_get_user_irq,
+ .user_irq_put = blt_ring_put_user_irq,
+ .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+};
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->render_ring = render_ring;
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ dev_priv->render_ring.status_page.page_addr
+ = dev_priv->status_page_dmah->vaddr;
+ memset(dev_priv->render_ring.status_page.page_addr,
+ 0, PAGE_SIZE);
+ }
+
+ return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_GEN6(dev))
+ dev_priv->bsd_ring = gen6_bsd_ring;
+ else
+ dev_priv->bsd_ring = bsd_ring;
+
+ return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->blt_ring = gen6_blt_ring;
+
+ return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 525e7d3edda8..a05aff0e5764 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,25 +7,32 @@ struct intel_hw_status_page {
struct drm_gem_object *obj;
};
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
+#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
+#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+
struct drm_i915_gem_execbuffer2;
struct intel_ring_buffer {
const char *name;
- struct ring_regs {
- u32 ctl;
- u32 head;
- u32 tail;
- u32 start;
- } regs;
- unsigned int ring_flag;
+ enum intel_ring_id {
+ RING_RENDER = 0x1,
+ RING_BSD = 0x2,
+ RING_BLT = 0x4,
+ } id;
+ u32 mmio_base;
unsigned long size;
- unsigned int alignment;
void *virtual_start;
struct drm_device *dev;
struct drm_gem_object *gem_object;
unsigned int head;
unsigned int tail;
- unsigned int space;
+ int space;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -35,30 +42,22 @@ struct intel_ring_buffer {
struct intel_ring_buffer *ring);
void (*user_irq_put)(struct drm_device *dev,
struct intel_ring_buffer *ring);
- void (*setup_status_page)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
int (*init)(struct drm_device *dev,
struct intel_ring_buffer *ring);
- unsigned int (*get_head)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- unsigned int (*get_tail)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- unsigned int (*get_active_head)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- void (*advance_ring)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ void (*write_tail)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 value);
void (*flush)(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
u32 (*add_request)(struct drm_device *dev,
struct intel_ring_buffer *ring,
- struct drm_file *file_priv,
u32 flush_domains);
- u32 (*get_gem_seqno)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ u32 (*get_seqno)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
int (*dispatch_gem_execbuffer)(struct drm_device *dev,
struct intel_ring_buffer *ring,
struct drm_i915_gem_execbuffer2 *exec,
@@ -83,6 +82,20 @@ struct intel_ring_buffer {
*/
struct list_head request_list;
+ /**
+ * List of objects currently pending a GPU write flush.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
+ * Do we have some not yet emitted requests outstanding?
+ */
+ bool outstanding_lazy_request;
+
wait_queue_head_t irq_queue;
drm_local_map_t map;
};
@@ -96,15 +109,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
}
int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring);
void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring);
int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ struct intel_ring_buffer *ring, int n);
void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
+ struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct drm_device *dev,
struct intel_ring_buffer *ring,
@@ -115,17 +126,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
ring->tail += 4;
}
-void intel_fill_struct(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- void *data,
- unsigned int len);
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring);
-extern struct intel_ring_buffer render_ring;
-extern struct intel_ring_buffer bsd_ring;
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ee73e428a84a..de158b76bcd5 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
struct intel_sdvo {
struct intel_encoder base;
+ struct i2c_adapter *i2c;
u8 slave_addr;
+ struct i2c_adapter ddc;
+
/* Register for the SDVO device: SDVOB or SDVOC */
int sdvo_reg;
@@ -104,34 +107,24 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
+ bool has_audio;
/**
- * This is set if we detect output of sdvo device as LVDS.
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
*/
bool is_lvds;
/**
- * This is sdvo flags for input timing.
- */
- uint8_t sdvo_flags;
-
- /**
* This is sdvo fixed pannel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /*
- * supported encoding mode, used to determine whether HDMI is
- * supported
- */
- struct intel_sdvo_encode encode;
-
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
- /* Mac mini hack -- use the same DDC as the analog connector */
- struct i2c_adapter *analog_ddc_bus;
-
+ /* Input timings for adjusted_mode */
+ struct intel_sdvo_dtd input_dtd;
};
struct intel_sdvo_connector {
@@ -140,11 +133,15 @@ struct intel_sdvo_connector {
/* Mark the type of connector */
uint16_t output_flag;
+ int force_audio;
+
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
struct drm_property *tv_format;
+ struct drm_property *force_audio_property;
+
/* add the property for the SDVO-TV */
struct drm_property *left;
struct drm_property *right;
@@ -186,9 +183,15 @@ struct intel_sdvo_connector {
u32 cur_dot_crawl, max_dot_crawl;
};
-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
+ return container_of(intel_attached_encoder(connector),
+ struct intel_sdvo, base);
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -213,7 +216,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
*/
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 bval = val, cval = val;
int i;
@@ -245,49 +248,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
{
- u8 out_buf[2] = { addr, 0 };
- u8 buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &addr,
},
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = intel_sdvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
- .buf = buf,
+ .buf = ch,
}
};
int ret;
- if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
- {
- *ch = buf[0];
+ if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
return true;
- }
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
return false;
}
-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
-{
- u8 out_buf[2] = { addr, ch };
- struct i2c_msg msgs[] = {
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
-}
-
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
@@ -432,22 +415,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
DRM_LOG_KMS("\n");
}
-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
-{
- int i;
-
- intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
-
- for (i = 0; i < args_len; i++) {
- if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]))
- return false;
- }
-
- return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
-}
-
static const char *cmd_status_names[] = {
"Power on",
"Success",
@@ -458,54 +425,115 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
- void *response, int response_len,
- u8 status)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
{
- int i;
+ u8 buf[args_len*2 + 2], status;
+ struct i2c_msg msgs[args_len + 3];
+ int i, ret;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
- for (i = 0; i < response_len; i++)
- DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
- for (; i < 8; i++)
- DRM_LOG_KMS(" ");
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
- else
- DRM_LOG_KMS("(??? %d)", status);
- DRM_LOG_KMS("\n");
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return false;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+
+ i = 3;
+ while (status == SDVO_CMD_STATUS_PENDING && i--) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ return false;
+ }
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("command returns response %s [%d]\n",
+ status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
+ status);
+ return false;
+ }
+
+ return true;
}
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
- int i;
+ u8 retry = 5;
u8 status;
- u8 retry = 50;
-
- while (retry--) {
- /* Read the command response */
- for (i = 0; i < response_len; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]))
- return false;
- }
+ int i;
- /* read the return status */
- if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ do {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
&status))
return false;
+ } while (status == SDVO_CMD_STATUS_PENDING && --retry);
- intel_sdvo_debug_response(intel_sdvo, response, response_len,
- status);
- if (status != SDVO_CMD_STATUS_PENDING)
- break;
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
- mdelay(50);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
}
+ DRM_LOG_KMS("\n");
+ return true;
- return status == SDVO_CMD_STATUS_SUCCESS;
+log_fail:
+ DRM_LOG_KMS("\n");
+ return false;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -518,71 +546,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
return 4;
}
-/**
- * Try to read the response after issuie the DDC switch command. But it
- * is noted that we must do the action of reading response and issuing DDC
- * switch command in one I2C transaction. Otherwise when we try to start
- * another I2C transaction after issuing the DDC bus switch, it will be
- * switched to the internal SDVO register.
- */
-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
- u8 target)
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
{
- u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
- struct i2c_msg msgs[] = {
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- },
- /* the following two are to read the response */
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = 0,
- .len = 1,
- .buf = cmd_buf,
- },
- {
- .addr = intel_sdvo->slave_addr >> 1,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = ret_value,
- },
- };
-
- intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
- &target, 1);
- /* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
-
- out_buf[0] = SDVO_I2C_OPCODE;
- out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
- cmd_buf[0] = SDVO_I2C_CMD_STATUS;
- cmd_buf[1] = 0;
- ret_value[0] = 0;
- ret_value[1] = 0;
-
- ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
- if (ret != 3) {
- /* failure in I2C transfer */
- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return;
- }
- if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("DDC switch command returns response %d\n",
- ret_value[0]);
- return;
- }
- return;
+ return intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
}
static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
{
- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
- return false;
-
- return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+ return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
}
static bool
@@ -819,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_encode *encode)
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
- if (intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPP_ENCODE,
- encode, sizeof(*encode)))
- return true;
+ struct intel_sdvo_encode encode;
- /* non-support means DVI */
- memset(encode, 0, sizeof(*encode));
- return false;
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
}
static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
@@ -874,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
}
#endif
-static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
- int index,
- uint8_t *data, int8_t size, uint8_t tx_rate)
-{
- uint8_t set_buf_index[2];
-
- set_buf_index[0] = index;
- set_buf_index[1] = 0;
-
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (; size > 0; size -= 8) {
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
- return false;
-
- data += 8;
- }
-
- return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
-}
-
-static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
-{
- uint8_t csum = 0;
- int i;
-
- for (i = 0; i < size; i++)
- csum += data[i];
-
- return 0x100 - csum;
-}
-
-#define DIP_TYPE_AVI 0x82
-#define DIP_VERSION_AVI 0x2
-#define DIP_LEN_AVI 13
-
-struct dip_infoframe {
- uint8_t type;
- uint8_t version;
- uint8_t len;
- uint8_t checksum;
- union {
- struct {
- /* Packet Byte #1 */
- uint8_t S:2;
- uint8_t B:2;
- uint8_t A:1;
- uint8_t Y:2;
- uint8_t rsvd1:1;
- /* Packet Byte #2 */
- uint8_t R:4;
- uint8_t M:2;
- uint8_t C:2;
- /* Packet Byte #3 */
- uint8_t SC:2;
- uint8_t Q:2;
- uint8_t EC:3;
- uint8_t ITC:1;
- /* Packet Byte #4 */
- uint8_t VIC:7;
- uint8_t rsvd2:1;
- /* Packet Byte #5 */
- uint8_t PR:4;
- uint8_t rsvd3:4;
- /* Packet Byte #6~13 */
- uint16_t top_bar_end;
- uint16_t bottom_bar_start;
- uint16_t left_bar_end;
- uint16_t right_bar_start;
- } avi;
- struct {
- /* Packet Byte #1 */
- uint8_t channel_count:3;
- uint8_t rsvd1:1;
- uint8_t coding_type:4;
- /* Packet Byte #2 */
- uint8_t sample_size:2; /* SS0, SS1 */
- uint8_t sample_frequency:3;
- uint8_t rsvd2:3;
- /* Packet Byte #3 */
- uint8_t coding_type_private:5;
- uint8_t rsvd3:3;
- /* Packet Byte #4 */
- uint8_t channel_allocation;
- /* Packet Byte #5 */
- uint8_t rsvd4:3;
- uint8_t level_shift:4;
- uint8_t downmix_inhibit:1;
- } audio;
- uint8_t payload[28];
- } __attribute__ ((packed)) u;
-} __attribute__((packed));
-
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode * mode)
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
- .version = DIP_VERSION_AVI,
+ .ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
+ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+ uint8_t set_buf_index[2] = { 1, 0 };
+ uint64_t *data = (uint64_t *)&avi_if;
+ unsigned i;
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
- avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
- 4 + avi_if.len);
- return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
- 4 + avi_if.len,
- SDVO_HBUF_TX_VSYNC);
+ for (i = 0; i < sizeof(avi_if); i += 8) {
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+ data, 8))
+ return false;
+ data++;
+ }
+
+ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -1022,8 +910,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo_dtd input_dtd;
-
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
@@ -1035,14 +921,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &input_dtd))
+ &intel_sdvo->input_dtd))
return false;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- mode->clock = adjusted_mode->clock;
return true;
}
@@ -1050,7 +934,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ int multiplier;
/* We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
@@ -1065,10 +950,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
mode,
adjusted_mode);
} else if (intel_sdvo->is_lvds) {
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
-
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
+ intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
@@ -1077,9 +960,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
+ * SDVO device will factor out the multiplier during mode_set.
*/
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
return true;
}
@@ -1092,11 +976,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- u32 sdvox = 0;
- int sdvo_pixel_multiply, rate;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
if (!mode)
return;
@@ -1114,28 +999,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
- if (intel_sdvo->is_hdmi) {
- if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
- return;
-
- sdvox |= SDVO_AUDIO_ENABLE;
- }
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
/* We have tried to get input timing in mode_fixup, and filled into
- adjusted_mode */
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
- input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
-
- /* If it's a TV, we already set the output timing in mode_fixup.
- * Otherwise, the output timing is equal to the input timing.
+ * adjusted_mode.
*/
- if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
+ input_dtd = intel_sdvo->input_dtd;
+ } else {
/* Set the output timing to the screen */
if (!intel_sdvo_set_target_output(intel_sdvo,
intel_sdvo->attached_output))
return;
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
}
@@ -1143,31 +1023,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->is_tv) {
- if (!intel_sdvo_set_tv_format(intel_sdvo))
- return;
- }
+ if (intel_sdvo->is_hdmi &&
+ !intel_sdvo_set_avi_infoframe(intel_sdvo))
+ return;
- /* We would like to use intel_sdvo_create_preferred_input_timing() to
- * provide the device with a timing it can support, if it supports that
- * feature. However, presumably we would need to adjust the CRTC to
- * output the preferred timing, and we don't support that currently.
- */
-#if 0
- success = intel_sdvo_create_preferred_input_timing(encoder, clock,
- width, height);
- if (success) {
- struct intel_sdvo_dtd *input_dtd;
+ if (intel_sdvo->is_tv &&
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
- intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
- intel_sdvo_set_input_timing(encoder, &input_dtd);
- }
-#else
(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
-#endif
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
- switch (sdvo_pixel_multiply) {
+ switch (pixel_multiplier) {
+ default:
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1176,14 +1043,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
return;
/* Set the SDVO control regs. */
- if (IS_I965G(dev)) {
- sdvox |= SDVO_BORDER_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ sdvox = SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
@@ -1196,16 +1063,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_sdvo->has_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1214,7 +1083,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 temp;
@@ -1260,8 +1129,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -1285,7 +1153,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
}
/* No use! */
@@ -1389,22 +1288,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
return (caps > 1);
}
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct intel_sdvo *intel_sdvo;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (encoder == intel_attached_encoder(connector))
+ struct intel_sdvo *encoder;
+
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ base.base.head) {
+ if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ if (&encoder->base ==
+ intel_attached_encoder(connector))
return connector;
}
}
}
+
return NULL;
}
@@ -1424,64 +1334,72 @@ intel_analog_is_connected(struct drm_device *dev)
return true;
}
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ if (!intel_analog_is_connected(connector->dev))
+ return NULL;
+
+ return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+}
+
enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- enum drm_connector_status status = connector_status_connected;
- struct edid *edid = NULL;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+ edid = intel_sdvo_get_edid(connector);
- /* This is only applied to SDVO cards with multiple outputs */
if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
- uint8_t saved_ddc, temp_ddc;
- saved_ddc = intel_sdvo->ddc_bus;
- temp_ddc = intel_sdvo->ddc_bus >> 1;
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
- while(temp_ddc > 1) {
- intel_sdvo->ddc_bus = temp_ddc;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
- if (edid) {
- /*
- * When we can get the EDID, maybe it is the
- * correct DDC bus. Update it.
- */
- intel_sdvo->ddc_bus = temp_ddc;
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
break;
- }
- temp_ddc >>= 1;
}
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
if (edid == NULL)
intel_sdvo->ddc_bus = saved_ddc;
}
- /* when there is no edid and no monitor is connected with VGA
- * port, try to use the CRT ddc to read the EDID for DVI-connector
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
*/
- if (edid == NULL && intel_sdvo->analog_ddc_bus &&
- !intel_analog_is_connected(connector->dev))
- edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ status = connector_status_unknown;
if (edid != NULL) {
- bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
-
/* DDC bus is shared, match EDID to connector type */
- if (is_digital && need_digital)
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
- else if (is_digital != need_digital)
- status = connector_status_disconnected;
-
+ intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
+ }
connector->display_info.raw_edid = NULL;
- } else
- status = connector_status_disconnected;
-
- kfree(edid);
+ kfree(edid);
+ }
+
+ if (status == connector_status_connected) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ if (intel_sdvo_connector->force_audio)
+ intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
+ }
return status;
}
@@ -1490,13 +1408,12 @@ static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
if (intel_sdvo->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
@@ -1505,7 +1422,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
- DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
if (response == 0)
return connector_status_disconnected;
@@ -1538,12 +1457,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- int num_modes;
+ struct edid *edid;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ edid = intel_sdvo_get_edid(connector);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1551,12 +1468,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
- if (num_modes == 0 &&
- intel_sdvo->analog_ddc_bus &&
- !intel_analog_is_connected(connector->dev)) {
- /* Switch to the analog ddc bus and try that
- */
- (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
}
}
@@ -1627,8 +1546,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1644,7 +1562,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
return;
BUILD_BUG_ON(sizeof(tv_res) != 3);
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
return;
if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
@@ -1662,8 +1581,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
@@ -1672,7 +1590,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ intel_ddc_get_modes(connector, intel_sdvo->i2c);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1693,6 +1611,10 @@ end:
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
intel_sdvo->is_lvds = true;
break;
}
@@ -1775,8 +1697,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
uint16_t temp_value;
uint8_t cmd;
@@ -1786,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret)
return ret;
+ if (property == intel_sdvo_connector->force_audio_property) {
+ if (val == intel_sdvo_connector->force_audio)
+ return 0;
+
+ intel_sdvo_connector->force_audio = val;
+
+ if (val > 0 && intel_sdvo->has_audio)
+ return 0;
+ if (val < 0 && !intel_sdvo->has_audio)
+ return 0;
+
+ intel_sdvo->has_audio = val > 0;
+ goto done;
+ }
+
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -1879,9 +1815,8 @@ set_value:
done:
- if (encoder->crtc) {
- struct drm_crtc *crtc = encoder->crtc;
-
+ if (intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->fb);
}
@@ -1909,20 +1844,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-
- if (intel_sdvo->analog_ddc_bus)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
intel_sdvo->sdvo_lvds_fixed_mode);
+ i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder);
}
@@ -1990,53 +1923,48 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
intel_sdvo_guess_ddc_bus(sdvo);
}
-static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
{
- return intel_sdvo_set_target_output(intel_sdvo,
- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
- &intel_sdvo->is_hdmi, 1);
-}
+ struct sdvo_device_mapping *mapping;
+ u8 pin, speed;
-static struct intel_sdvo *
-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
-{
- struct drm_device *dev = chan->drm_dev;
- struct drm_encoder *encoder;
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.ddc_bus == &chan->adapter)
- return intel_sdvo;
+ pin = GMBUS_PORT_DPB;
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
+ pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
}
- return NULL;
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ intel_gmbus_set_speed(sdvo->i2c, speed);
+ intel_gmbus_force_bit(sdvo->i2c, true);
}
-static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg msgs[], int num)
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
{
- struct intel_sdvo *intel_sdvo;
- struct i2c_algo_bit_data *algo_data;
- const struct i2c_algorithm *algo;
+ int is_hdmi;
- algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_sdvo =
- intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
- (algo_data->data));
- if (intel_sdvo == NULL)
- return -EINVAL;
+ if (!intel_sdvo_check_supp_encode(intel_sdvo))
+ return false;
- algo = intel_sdvo->base.i2c_bus->algo;
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
+ return false;
- intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
- return algo->master_xfer(i2c_adap, msgs, num);
-}
+ is_hdmi = 0;
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
+ return false;
-static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
- .master_xfer = intel_sdvo_master_xfer,
-};
+ return !!is_hdmi;
+}
static u8
intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
@@ -2076,26 +2004,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
}
static void
-intel_sdvo_connector_init(struct drm_encoder *encoder,
- struct drm_connector *connector)
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
{
- drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 0;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_sysfs_connector_add(&connector->base.base);
+}
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+{
+ struct drm_device *dev = connector->base.base.dev;
- drm_mode_connector_attach_encoder(connector, encoder);
- drm_sysfs_connector_add(connector);
+ connector->force_audio_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+ if (connector->force_audio_property) {
+ connector->force_audio_property->values[0] = -1;
+ connector->force_audio_property->values[1] = 1;
+ drm_connector_attach_property(&connector->base.base,
+ connector->force_audio_property, 0);
+ }
}
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2118,19 +2064,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
- && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
- && intel_sdvo->is_hdmi) {
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
/* enable hdmi encoding mode if supported */
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
return true;
}
@@ -2138,36 +2085,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
static bool
intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- intel_sdvo->controlled_output |= type;
- intel_sdvo_connector->output_flag = type;
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = type;
- intel_sdvo->is_tv = true;
- intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ intel_sdvo->is_tv = true;
+ intel_sdvo->base.needs_tv_clock = true;
+ intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
- if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
goto err;
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- return true;
+ return true;
err:
intel_sdvo_destroy(connector);
@@ -2177,43 +2124,44 @@ err:
static bool
intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
return false;
intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
+ connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
- }
-
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
- return true;
+ intel_sdvo_connector_init(intel_sdvo_connector,
+ intel_sdvo);
+ return true;
}
static bool
intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
- struct drm_connector *connector;
- struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
if (!intel_sdvo_connector)
@@ -2221,22 +2169,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-
- if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
- } else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
- }
-
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT));
- intel_sdvo_connector_init(encoder, connector);
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
return true;
@@ -2307,7 +2255,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
@@ -2373,7 +2321,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
@@ -2502,7 +2450,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
struct intel_sdvo_enhancements_reply enhancements)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
@@ -2535,7 +2483,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
else
return true;
+}
+
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+ .master_xfer = intel_sdvo_ddc_proxy_xfer,
+ .functionality = intel_sdvo_ddc_proxy_func
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
}
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
@@ -2543,95 +2527,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
- u8 ch[0x40];
int i;
- u32 i2c_reg, ddc_reg, analog_ddc_reg;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
return false;
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
+ kfree(intel_sdvo);
+ return false;
+ }
+
intel_sdvo->sdvo_reg = sdvo_reg;
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
- if (HAS_PCH_SPLIT(dev)) {
- i2c_reg = PCH_GPIOE;
- ddc_reg = PCH_GPIOE;
- analog_ddc_reg = PCH_GPIOA;
- } else {
- i2c_reg = GPIOE;
- ddc_reg = GPIOE;
- analog_ddc_reg = GPIOA;
- }
-
- /* setup the DDC bus. */
- if (IS_SDVOB(sdvo_reg))
- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
- else
- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
-
- if (!intel_encoder->i2c_bus)
- goto err_inteloutput;
-
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
-
- /* Save the bit-banging i2c functionality for use by the DDC wrapper */
- intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
+ u8 byte;
+
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_i2c;
+ goto err;
}
}
- /* setup the DDC bus. */
- if (IS_SDVOB(sdvo_reg)) {
- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
- "SDVOB/VGA DDC BUS");
+ if (IS_SDVOB(sdvo_reg))
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
- } else {
- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
- "SDVOC/VGA DDC BUS");
+ else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
- }
- if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
- goto err_i2c;
- /* Wrap with our custom algo which switches to DDC mode */
- intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
-
- /* encoder type will be decided later */
- drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
- goto err_enc;
+ goto err;
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_enc;
+ goto err;
}
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err_enc;
+ goto err;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err_enc;
+ goto err;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@@ -2651,16 +2606,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
-err_enc:
- drm_encoder_cleanup(&intel_encoder->enc);
-err_i2c:
- if (intel_sdvo->analog_ddc_bus != NULL)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
- if (intel_encoder->ddc_bus != NULL)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (intel_encoder->i2c_bus != NULL)
- intel_i2c_destroy(intel_encoder->i2c_bus);
-err_inteloutput:
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ i2c_del_adapter(&intel_sdvo->ddc);
kfree(intel_sdvo);
return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4a117e318a73..2f7681989316 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -48,7 +48,7 @@ struct intel_tv {
struct intel_encoder base;
int type;
- char *tv_format;
+ const char *tv_format;
int margin[4];
u32 save_TV_H_CTL_1;
u32 save_TV_H_CTL_2;
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
struct tv_mode {
- char *name;
+ const char *name;
int clock;
int refresh; /* in millihertz (for precision) */
u32 oversample;
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
{
- return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+ return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_tv,
+ base);
}
static void
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
static const struct tv_mode *
-intel_tv_mode_lookup (char *tv_format)
+intel_tv_mode_lookup(const char *tv_format)
{
int i;
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_tv *intel_tv)
+intel_tv_mode_find(struct intel_tv *intel_tv)
{
return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
< 1000)
return MODE_OK;
+
return MODE_CLOCK_RANGE;
}
@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
color_conversion->av);
}
- if (IS_I965G(dev))
+ if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
/* Wait for vblank for the disable to take effect */
- if (!IS_I9XX(dev))
+ if (IS_GEN2(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
- I915_WRITE(TV_DAC, 0);
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
I915_WRITE(TV_CTL, tv_ctl);
}
@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
static int
intel_tv_detect_type (struct intel_tv *intel_tv)
{
- struct drm_encoder *encoder = &intel_tv->base.enc;
+ struct drm_encoder *encoder = &intel_tv->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
- int type = DRM_MODE_CONNECTOR_Unknown;
-
- tv_dac = I915_READ(TV_DAC);
+ int type;
/* Disable TV interrupts around load detect or we'll recurse */
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
- /*
- * Detect TV by polling)
- */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
+ save_tv_dac = tv_dac = I915_READ(TV_DAC);
+ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL |
@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
DAC_A_0_7_V |
DAC_B_0_7_V |
DAC_C_0_7_V);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- msleep(20);
- tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- POSTING_READ(TV_CTL);
- msleep(20);
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
- /*
- * A B C
- * 0 1 1 Composite
- * 1 0 X svideo
- * 0 0 0 Component
- */
- if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
- type = DRM_MODE_CONNECTOR_Composite;
- } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
- type = DRM_MODE_CONNECTOR_SVIDEO;
- } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
- type = DRM_MODE_CONNECTOR_Component;
- } else {
- DRM_DEBUG_KMS("No TV connection detected\n");
- type = -1;
+ type = -1;
+ if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ }
}
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+
/* Restore interrupt config */
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
@@ -1344,14 +1347,13 @@ static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_display_mode mode;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
- if (encoder->crtc && encoder->crtc->enabled) {
+ if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv);
} else if (force) {
struct drm_crtc *crtc;
@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static struct input_res {
- char *name;
+static const struct input_res {
+ const char *name;
int w, h;
-} input_res_table[] =
-{
+} input_res_table[] = {
{"640x480", 640, 480},
{"800x600", 800, 600},
{"1024x768", 1024, 768},
@@ -1396,8 +1397,7 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1422,15 +1422,14 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
for (j = 0; j < ARRAY_SIZE(input_res_table);
j++) {
- struct input_res *input = &input_res_table[j];
+ const struct input_res *input = &input_res_table[j];
unsigned int hactive_s = input->w;
unsigned int vactive_s = input->h;
@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- struct drm_crtc *crtc = encoder->crtc;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct drm_crtc *crtc = intel_tv->base.base.crtc;
int ret = 0;
bool changed = false;
@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_attached_encoder,
+ .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
- char **tv_format_names;
+ char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i, initial_mode = 0;
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
+ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
- intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
- intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
intel_tv->margin[TV_MARGIN_RIGHT] = 46;
intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
- intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ intel_tv->tv_format = tv_modes[initial_mode].name;
- drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
- tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
- GFP_KERNEL);
- if (!tv_format_names)
- goto out;
for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
- tv_format_names[i] = tv_modes[i].name;
- drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
+ tv_format_names[i] = (char *)tv_modes[i].name;
+ drm_mode_create_tv_properties(dev,
+ ARRAY_SIZE(tv_modes),
+ tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
initial_mode);
@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
-out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index ac64f0b0392e..0aaf5f67a436 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -60,8 +60,6 @@ static struct drm_driver driver = {
.irq_uninstall = mga_driver_irq_uninstall,
.irq_handler = mga_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = {
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d2d28048efb2..72730e9ca06c 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,6 +10,7 @@ config DRM_NOUVEAU
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ select ACPI_VIDEO if ACPI
help
Choose this option for open-source nVidia support.
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e9b06e4ef2a2..23fa82d667d6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,7 +9,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o \
+ nouveau_dp.o nouveau_ramht.o \
+ nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -23,7 +24,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
nv10_gpio.o nv50_gpio.o \
- nv50_calc.o
+ nv50_calc.o \
+ nv04_pm.o nv50_pm.o nva3_pm.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index c17a055ee3e5..119152606e4c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -292,6 +292,6 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return ret;
- nv_connector->edid = edid;
+ nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 974b0f8ae048..5f21030a293b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -43,9 +43,6 @@
#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
#define LOG_OLD_VALUE(x)
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-
struct init_exec {
bool execute;
bool repeat;
@@ -272,12 +269,6 @@ struct init_tbl_entry {
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
-struct bit_entry {
- uint8_t id[2];
- uint16_t length;
- uint16_t offset;
-};
-
static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
#define MACRO_INDEX_SIZE 2
@@ -1231,7 +1222,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 3;
}
- if (cond & 1)
+ if (!(cond & 1))
iexec->execute = false;
}
break;
@@ -2167,11 +2158,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p =
- io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
val = ioread32(p + (off & ~PAGE_MASK));
- io_mapping_unmap_atomic(p, KM_USER0);
+ io_mapping_unmap_atomic(p);
}
return val;
@@ -2183,12 +2174,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
{
if (off < pci_resource_len(dev->pdev, 1)) {
uint8_t __iomem *p =
- io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
iowrite32(val, p + (off & ~PAGE_MASK));
wmb();
- io_mapping_unmap_atomic(p, KM_USER0);
+ io_mapping_unmap_atomic(p);
}
}
@@ -4675,6 +4666,92 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
return 0;
}
+struct pll_mapping {
+ u8 type;
+ u32 reg;
+};
+
+static struct pll_mapping nv04_pll_mapping[] = {
+ { PLL_CORE , NV_PRAMDAC_NVPLL_COEFF },
+ { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
+ { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+ { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+ {}
+};
+
+static struct pll_mapping nv40_pll_mapping[] = {
+ { PLL_CORE , 0x004000 },
+ { PLL_MEMORY, 0x004020 },
+ { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+ { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+ {}
+};
+
+static struct pll_mapping nv50_pll_mapping[] = {
+ { PLL_CORE , 0x004028 },
+ { PLL_SHADER, 0x004020 },
+ { PLL_UNK03 , 0x004000 },
+ { PLL_MEMORY, 0x004008 },
+ { PLL_UNK40 , 0x00e810 },
+ { PLL_UNK41 , 0x00e818 },
+ { PLL_UNK42 , 0x00e824 },
+ { PLL_VPLL0 , 0x614100 },
+ { PLL_VPLL1 , 0x614900 },
+ {}
+};
+
+static struct pll_mapping nv84_pll_mapping[] = {
+ { PLL_CORE , 0x004028 },
+ { PLL_SHADER, 0x004020 },
+ { PLL_MEMORY, 0x004008 },
+ { PLL_UNK05 , 0x004030 },
+ { PLL_UNK41 , 0x00e818 },
+ { PLL_VPLL0 , 0x614100 },
+ { PLL_VPLL1 , 0x614900 },
+ {}
+};
+
+u32
+get_pll_register(struct drm_device *dev, enum pll_types type)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct pll_mapping *map;
+ int i;
+
+ if (dev_priv->card_type < NV_40)
+ map = nv04_pll_mapping;
+ else
+ if (dev_priv->card_type < NV_50)
+ map = nv40_pll_mapping;
+ else {
+ u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
+
+ if (plim[0] >= 0x30) {
+ u8 *entry = plim + plim[1];
+ for (i = 0; i < plim[3]; i++, entry += plim[2]) {
+ if (entry[0] == type)
+ return ROM32(entry[3]);
+ }
+
+ return 0;
+ }
+
+ if (dev_priv->chipset == 0x50)
+ map = nv50_pll_mapping;
+ else
+ map = nv84_pll_mapping;
+ }
+
+ while (map->reg) {
+ if (map->type == type)
+ return map->reg;
+ map++;
+ }
+
+ return 0;
+}
+
int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
{
/*
@@ -4750,6 +4827,17 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
/* initialize all members to zero */
memset(pll_lim, 0, sizeof(struct pll_lims));
+ /* if we were passed a type rather than a register, figure
+ * out the register and store it
+ */
+ if (limit_match > PLL_MAX)
+ pll_lim->reg = limit_match;
+ else {
+ pll_lim->reg = get_pll_register(dev, limit_match);
+ if (!pll_lim->reg)
+ return -ENOENT;
+ }
+
if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
@@ -4785,7 +4873,6 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
pll_lim->max_usable_log2p = 0x6;
} else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
- uint32_t reg = 0; /* default match */
uint8_t *pll_rec;
int i;
@@ -4797,37 +4884,22 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
NV_WARN(dev, "Default PLL limit entry has non-zero "
"register field\n");
- if (limit_match > MAX_PLL_TYPES)
- /* we've been passed a reg as the match */
- reg = limit_match;
- else /* limit match is a pll type */
- for (i = 1; i < entries && !reg; i++) {
- uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
-
- if (limit_match == NVPLL &&
- (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
- reg = cmpreg;
- if (limit_match == MPLL &&
- (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
- reg = cmpreg;
- if (limit_match == VPLL1 &&
- (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
- reg = cmpreg;
- if (limit_match == VPLL2 &&
- (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
- reg = cmpreg;
- }
-
for (i = 1; i < entries; i++)
- if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+ if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
pllindex = i;
break;
}
+ if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
+ NV_ERROR(dev, "Register 0x%08x not found in PLL "
+ "limits table", pll_lim->reg);
+ return -ENOENT;
+ }
+
pll_rec = &bios->data[plloffs + recordlen * pllindex];
BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
- pllindex ? reg : 0);
+ pllindex ? pll_lim->reg : 0);
/*
* Frequencies are stored in tables in MHz, kHz are more
@@ -4877,8 +4949,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
if (cv == 0x51 && !pll_lim->refclk) {
uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
- if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
- ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+ if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
+ (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
pll_lim->refclk = 200000;
else
@@ -4891,10 +4963,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
int i;
BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
- limit_match);
+ pll_lim->reg);
for (i = 0; i < entries; i++, entry += recordlen) {
- if (ROM32(entry[3]) == limit_match) {
+ if (ROM32(entry[3]) == pll_lim->reg) {
record = &bios->data[ROM16(entry[1])];
break;
}
@@ -4902,7 +4974,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
if (!record) {
NV_ERROR(dev, "Register 0x%08x not found in PLL "
- "limits table", limit_match);
+ "limits table", pll_lim->reg);
return -ENOENT;
}
@@ -4931,10 +5003,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
int i;
BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
- limit_match);
+ pll_lim->reg);
for (i = 0; i < entries; i++, entry += recordlen) {
- if (ROM32(entry[3]) == limit_match) {
+ if (ROM32(entry[3]) == pll_lim->reg) {
record = &bios->data[ROM16(entry[1])];
break;
}
@@ -4942,7 +5014,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
if (!record) {
NV_ERROR(dev, "Register 0x%08x not found in PLL "
- "limits table", limit_match);
+ "limits table", pll_lim->reg);
return -ENOENT;
}
@@ -5293,7 +5365,7 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
if (bitentry->length < 0x5)
return 0;
- if (bitentry->id[1] < 2) {
+ if (bitentry->version < 2) {
bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
} else {
@@ -5403,27 +5475,40 @@ struct bit_table {
#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+int
+bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ u8 entries, *entry;
+
+ entries = bios->data[bios->offset + 10];
+ entry = &bios->data[bios->offset + 12];
+ while (entries--) {
+ if (entry[0] == id) {
+ bit->id = entry[0];
+ bit->version = entry[1];
+ bit->length = ROM16(entry[2]);
+ bit->offset = ROM16(entry[4]);
+ bit->data = ROMPTR(bios, entry[4]);
+ return 0;
+ }
+
+ entry += bios->data[bios->offset + 9];
+ }
+
+ return -ENOENT;
+}
+
static int
parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
struct bit_table *table)
{
struct drm_device *dev = bios->dev;
- uint8_t maxentries = bios->data[bitoffset + 4];
- int i, offset;
struct bit_entry bitentry;
- for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
- bitentry.id[0] = bios->data[offset];
-
- if (bitentry.id[0] != table->id)
- continue;
-
- bitentry.id[1] = bios->data[offset + 1];
- bitentry.length = ROM16(bios->data[offset + 2]);
- bitentry.offset = ROM16(bios->data[offset + 4]);
-
+ if (bit_table(dev, table->id, &bitentry) == 0)
return table->parse_fn(dev, bios, &bitentry);
- }
NV_INFO(dev, "BIT table '%c' not found\n", table->id);
return -ENOSYS;
@@ -5683,8 +5768,14 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
+ struct drm_device *dev = bios->dev;
struct dcb_gpio_table *gpio = &bios->dcb.gpio;
+ if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
+ NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
+ return NULL;
+ }
+
return &gpio->entry[gpio->entries++];
}
@@ -5706,113 +5797,90 @@ nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
}
static void
-parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
- struct dcb_gpio_entry *gpio;
- uint16_t ent = ROM16(bios->data[offset]);
- uint8_t line = ent & 0x1f,
- tag = ent >> 5 & 0x3f,
- flags = ent >> 11 & 0x1f;
-
- if (tag == 0x3f)
- return;
-
- gpio = new_gpio_entry(bios);
-
- gpio->tag = tag;
- gpio->line = line;
- gpio->invert = flags != 4;
- gpio->entry = ent;
-}
-
-static void
-parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
- uint32_t entry = ROM32(bios->data[offset]);
- struct dcb_gpio_entry *gpio;
-
- if ((entry & 0x0000ff00) == 0x0000ff00)
- return;
-
- gpio = new_gpio_entry(bios);
- gpio->tag = (entry & 0x0000ff00) >> 8;
- gpio->line = (entry & 0x0000001f) >> 0;
- gpio->state_default = (entry & 0x01000000) >> 24;
- gpio->state[0] = (entry & 0x18000000) >> 27;
- gpio->state[1] = (entry & 0x60000000) >> 29;
- gpio->entry = entry;
-}
-
-static void
parse_dcb_gpio_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
- uint8_t *gpio_table = &bios->data[gpio_table_ptr];
- int header_len = gpio_table[1],
- entries = gpio_table[2],
- entry_len = gpio_table[3];
- void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+ struct dcb_gpio_entry *e;
+ u8 headerlen, entries, recordlen;
+ u8 *dcb, *gpio = NULL, *entry;
int i;
- if (bios->dcb.version >= 0x40) {
- if (gpio_table_ptr && entry_len != 4) {
- NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
- return;
- }
+ dcb = ROMPTR(bios, bios->data[0x36]);
+ if (dcb[0] >= 0x30) {
+ gpio = ROMPTR(bios, dcb[10]);
+ if (!gpio)
+ goto no_table;
- parse_entry = parse_dcb40_gpio_entry;
+ headerlen = gpio[1];
+ entries = gpio[2];
+ recordlen = gpio[3];
+ } else
+ if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
+ gpio = ROMPTR(bios, dcb[-15]);
+ if (!gpio)
+ goto no_table;
+
+ headerlen = 3;
+ entries = gpio[2];
+ recordlen = gpio[1];
+ } else
+ if (dcb[0] >= 0x22) {
+ /* No GPIO table present, parse the TVDAC GPIO data. */
+ uint8_t *tvdac_gpio = &dcb[-5];
- } else if (bios->dcb.version >= 0x30) {
- if (gpio_table_ptr && entry_len != 2) {
- NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
- return;
+ if (tvdac_gpio[0] & 1) {
+ e = new_gpio_entry(bios);
+ e->tag = DCB_GPIO_TVDAC0;
+ e->line = tvdac_gpio[1] >> 4;
+ e->invert = tvdac_gpio[0] & 2;
}
- parse_entry = parse_dcb30_gpio_entry;
-
- } else if (bios->dcb.version >= 0x22) {
- /*
- * DCBs older than v3.0 don't really have a GPIO
- * table, instead they keep some GPIO info at fixed
- * locations.
- */
- uint16_t dcbptr = ROM16(bios->data[0x36]);
- uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+ goto no_table;
+ } else {
+ NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
+ goto no_table;
+ }
- if (tvdac_gpio[0] & 1) {
- struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+ entry = gpio + headerlen;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ e = new_gpio_entry(bios);
+ if (!e)
+ break;
- gpio->tag = DCB_GPIO_TVDAC0;
- gpio->line = tvdac_gpio[1] >> 4;
- gpio->invert = tvdac_gpio[0] & 2;
- }
- } else {
- /*
- * No systematic way to store GPIO info on pre-v2.2
- * DCBs, try to match the PCI device IDs.
- */
+ if (gpio[0] < 0x40) {
+ e->entry = ROM16(entry[0]);
+ e->tag = (e->entry & 0x07e0) >> 5;
+ if (e->tag == 0x3f) {
+ bios->dcb.gpio.entries--;
+ continue;
+ }
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+ e->line = (e->entry & 0x001f);
+ e->invert = ((e->entry & 0xf800) >> 11) != 4;
+ } else {
+ e->entry = ROM32(entry[0]);
+ e->tag = (e->entry & 0x0000ff00) >> 8;
+ if (e->tag == 0xff) {
+ bios->dcb.gpio.entries--;
+ continue;
+ }
- gpio->tag = DCB_GPIO_TVDAC0;
- gpio->line = 4;
+ e->line = (e->entry & 0x0000001f) >> 0;
+ e->state_default = (e->entry & 0x01000000) >> 24;
+ e->state[0] = (e->entry & 0x18000000) >> 27;
+ e->state[1] = (e->entry & 0x60000000) >> 29;
}
-
}
- if (!gpio_table_ptr)
- return;
-
- if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
- NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
- entries = DCB_MAX_NUM_GPIO_ENTRIES;
+no_table:
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ e = new_gpio_entry(bios);
+ if (e) {
+ e->tag = DCB_GPIO_TVDAC0;
+ e->line = 4;
+ }
}
-
- for (i = 0; i < entries; i++)
- parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
}
struct dcb_connector_table_entry *
@@ -6680,6 +6748,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
bit_signature, sizeof(bit_signature));
if (offset) {
NV_TRACE(dev, "BIT BIOS found\n");
+ bios->type = NVBIOS_BIT;
+ bios->offset = offset;
return parse_bit_structure(bios, offset + 6);
}
@@ -6687,6 +6757,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
bmp_signature, sizeof(bmp_signature));
if (offset) {
NV_TRACE(dev, "BMP BIOS found\n");
+ bios->type = NVBIOS_BMP;
+ bios->offset = offset;
return parse_bmp_structure(dev, bios, offset);
}
@@ -6806,6 +6878,8 @@ nouveau_bios_init(struct drm_device *dev)
"running VBIOS init tables.\n");
bios->execute = true;
}
+ if (nouveau_force_post)
+ bios->execute = true;
ret = nouveau_run_vbios_init(dev);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c1de2f3fcb0e..50a648e01c49 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,6 +34,20 @@
#define DCB_LOC_ON_CHIP 0
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+
+struct bit_entry {
+ uint8_t id;
+ uint8_t version;
+ uint16_t length;
+ uint16_t offset;
+ uint8_t *data;
+};
+
+int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+
struct dcb_i2c_entry {
uint32_t entry;
uint8_t port_type;
@@ -170,16 +184,28 @@ enum LVDS_script {
LVDS_PANEL_OFF
};
-/* changing these requires matching changes to reg tables in nv_get_clock */
-#define MAX_PLL_TYPES 4
+/* these match types in pll limits table version 0x40,
+ * nouveau uses them on all chipsets internally where a
+ * specific pll needs to be referenced, but the exact
+ * register isn't known.
+ */
enum pll_types {
- NVPLL,
- MPLL,
- VPLL1,
- VPLL2
+ PLL_CORE = 0x01,
+ PLL_SHADER = 0x02,
+ PLL_UNK03 = 0x03,
+ PLL_MEMORY = 0x04,
+ PLL_UNK05 = 0x05,
+ PLL_UNK40 = 0x40,
+ PLL_UNK41 = 0x41,
+ PLL_UNK42 = 0x42,
+ PLL_VPLL0 = 0x80,
+ PLL_VPLL1 = 0x81,
+ PLL_MAX = 0xff
};
struct pll_lims {
+ u32 reg;
+
struct {
int minfreq;
int maxfreq;
@@ -212,6 +238,11 @@ struct pll_lims {
struct nvbios {
struct drm_device *dev;
+ enum {
+ NVBIOS_BMP,
+ NVBIOS_BIT
+ } type;
+ uint16_t offset;
uint8_t chip_version;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index f6f44779d82f..80353e2b8409 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,21 +36,6 @@
#include <linux/log2.h>
#include <linux/slab.h>
-int
-nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
-{
- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- int ret;
-
- if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
- return 0;
-
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
- return ret;
-}
-
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
@@ -58,8 +43,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- ttm_bo_kunmap(&nvbo->kmap);
-
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
@@ -164,8 +147,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
align >>= PAGE_SHIFT;
- nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -305,7 +286,8 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
- ttm_bo_kunmap(&nvbo->kmap);
+ if (nvbo)
+ ttm_bo_kunmap(&nvbo->kmap);
}
u16
@@ -399,14 +381,19 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- man->gpu_offset = dev_priv->vm_vram_base;
+ if (dev_priv->card_type == NV_50)
+ man->gpu_offset = 0x40000000;
+ else
+ man->gpu_offset = 0;
break;
case TTM_PL_TT:
+ man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -469,19 +456,26 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict || (nvbo->channel &&
- nvbo->channel != chan),
+ if (nvbo->channel) {
+ ret = nouveau_fence_sync(fence, nvbo->channel);
+ if (ret)
+ goto out;
+ }
+
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
no_wait_reserve, no_wait_gpu, new_mem);
+out:
nouveau_fence_unref((void *)&fence);
return ret;
}
static inline uint32_t
-nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
- struct ttm_mem_reg *mem)
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+ struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
- if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ if (nvbo->no_vm) {
if (mem->mem_type == TTM_PL_TT)
return NvDmaGART;
return NvDmaVRAM;
@@ -493,86 +487,181 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
}
static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct nouveau_channel *chan;
- uint64_t src_offset, dst_offset;
- uint32_t page_count;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ u64 length = (new_mem->num_pages << PAGE_SHIFT);
+ u64 src_offset, dst_offset;
int ret;
- chan = nvbo->channel;
- if (!chan || nvbo->tile_flags || nvbo->no_vm)
- chan = dev_priv->channel;
-
- src_offset = old_mem->mm_node->start << PAGE_SHIFT;
- dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
- if (chan != dev_priv->channel) {
- if (old_mem->mem_type == TTM_PL_TT)
- src_offset += dev_priv->vm_gart_base;
- else
+ src_offset = old_mem->start << PAGE_SHIFT;
+ dst_offset = new_mem->start << PAGE_SHIFT;
+ if (!nvbo->no_vm) {
+ if (old_mem->mem_type == TTM_PL_VRAM)
src_offset += dev_priv->vm_vram_base;
-
- if (new_mem->mem_type == TTM_PL_TT)
- dst_offset += dev_priv->vm_gart_base;
else
+ src_offset += dev_priv->vm_gart_base;
+
+ if (new_mem->mem_type == TTM_PL_VRAM)
dst_offset += dev_priv->vm_vram_base;
+ else
+ dst_offset += dev_priv->vm_gart_base;
}
ret = RING_SPACE(chan, 3);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
- OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
- OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
- if (dev_priv->card_type >= NV_50) {
- ret = RING_SPACE(chan, 4);
+ BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
+ while (length) {
+ u32 amount, stride, height;
+
+ amount = min(length, (u64)(4 * 1024 * 1024));
+ stride = 16 * 4;
+ height = amount / stride;
+
+ if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, height);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+ OUT_RING (chan, 1);
+ }
+ if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, height);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+ OUT_RING (chan, 1);
+ }
+
+ ret = RING_SPACE(chan, 14);
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
- OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
- OUT_RING(chan, 1);
+
+ BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, stride);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, stride);
+ OUT_RING (chan, height);
+ OUT_RING (chan, 0x00000101);
+ OUT_RING (chan, 0x00000000);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ OUT_RING (chan, 0);
+
+ length -= amount;
+ src_offset += amount;
+ dst_offset += amount;
}
+ return 0;
+}
+
+static int
+nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ u32 src_offset = old_mem->start << PAGE_SHIFT;
+ u32 dst_offset = new_mem->start << PAGE_SHIFT;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+ OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
page_count = new_mem->num_pages;
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
- if (dev_priv->card_type >= NV_50) {
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
- OUT_RING(chan, upper_32_bits(src_offset));
- OUT_RING(chan, upper_32_bits(dst_offset));
- }
ret = RING_SPACE(chan, 11);
if (ret)
return ret;
+
BEGIN_RING(chan, NvSubM2MF,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
- OUT_RING(chan, lower_32_bits(src_offset));
- OUT_RING(chan, lower_32_bits(dst_offset));
- OUT_RING(chan, PAGE_SIZE); /* src_pitch */
- OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
- OUT_RING(chan, PAGE_SIZE); /* line_length */
- OUT_RING(chan, line_count);
- OUT_RING(chan, (1<<8)|(1<<0));
- OUT_RING(chan, 0);
+ OUT_RING (chan, src_offset);
+ OUT_RING (chan, dst_offset);
+ OUT_RING (chan, PAGE_SIZE); /* src_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* line_length */
+ OUT_RING (chan, line_count);
+ OUT_RING (chan, 0x00000101);
+ OUT_RING (chan, 0x00000000);
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
- OUT_RING(chan, 0);
+ OUT_RING (chan, 0);
page_count -= line_count;
src_offset += (PAGE_SIZE * line_count);
dst_offset += (PAGE_SIZE * line_count);
}
+ return 0;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = nvbo->channel;
+ if (!chan || nvbo->no_vm)
+ chan = dev_priv->channel;
+
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ else
+ ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ if (ret)
+ return ret;
+
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
@@ -606,12 +695,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
- if (tmp_mem.mm_node) {
- spin_lock(&bo->bdev->glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
-
+ ttm_bo_mem_put(bo, &tmp_mem);
return ret;
}
@@ -644,12 +728,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
out:
- if (tmp_mem.mm_node) {
- spin_lock(&bo->bdev->glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
-
+ ttm_bo_mem_put(bo, &tmp_mem);
return ret;
}
@@ -669,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
return 0;
}
- offset = new_mem->mm_node->start << PAGE_SHIFT;
+ offset = new_mem->start << PAGE_SHIFT;
if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev,
@@ -719,12 +798,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- /* Software copy if the card isn't up and running yet. */
- if (!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
- goto out;
- }
-
/* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
BUG_ON(bo->mem.mm_node != NULL);
@@ -733,6 +806,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
+ /* Software copy if the card isn't up and running yet. */
+ if (!dev_priv->channel) {
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ goto out;
+ }
+
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
@@ -783,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
case TTM_PL_TT:
#if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->gart_info.aper_base;
mem->bus.is_iomem = true;
}
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
break;
@@ -808,7 +887,26 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- return 0;
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ /* as long as the bo isn't in vram, and isn't tiled, we've got
+ * nothing to do here.
+ */
+ if (bo->mem.mem_type != TTM_PL_VRAM) {
+ if (dev_priv->card_type < NV_50 || !nvbo->tile_flags)
+ return 0;
+ }
+
+ /* make sure bo is in mappable vram */
+ if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
+ return 0;
+
+
+ nvbo->placement.fpfn = 0;
+ nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+ nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+ return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
}
struct ttm_bo_driver nouveau_bo_driver = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ca85da784846..dad96cce5e39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -198,8 +198,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv_fifo_info fifo_data;
struct nv_sim_state sim_data;
- int MClk = nouveau_hw_get_clock(dev, MPLL);
- int NVClk = nouveau_hw_get_clock(dev, NVPLL);
+ int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
+ int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
sim_data.pclk_khz = VClk;
@@ -234,7 +234,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
}
static void
-nv30_update_arb(int *burst, int *lwm)
+nv20_update_arb(int *burst, int *lwm)
{
unsigned int fifo_size, burst_size, graphics_lwm;
@@ -251,14 +251,14 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->card_type < NV_30)
+ if (dev_priv->card_type < NV_20)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
(dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
- nv30_update_arb(burst, lwm);
+ nv20_update_arb(burst, lwm);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 0480f064f2c1..373950e34814 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RO, &pushbuf,
NULL);
- chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_VIDMEM, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -67,17 +67,11 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
- }
-
- ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
- if (ret) {
- NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
- if (pushbuf != dev_priv->gart_info.sg_ctxdma)
- nouveau_gpuobj_del(dev, &pushbuf);
- return ret;
+ chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
}
+ nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
+ nouveau_gpuobj_ref(NULL, &pushbuf);
return 0;
}
@@ -229,7 +223,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_dma_init(chan);
if (!ret)
- ret = nouveau_fence_init(chan);
+ ret = nouveau_fence_channel_init(chan);
if (ret) {
nouveau_channel_free(chan);
return ret;
@@ -276,7 +270,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
* above attempts at idling were OK, but if we failed this'll tell TTM
* we're done with the buffers.
*/
- nouveau_fence_fini(chan);
+ nouveau_fence_channel_fini(chan);
/* This will prevent pfifo from switching channels. */
pfifo->reassign(dev, false);
@@ -308,8 +302,9 @@ nouveau_channel_free(struct nouveau_channel *chan)
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Release the channel's resources */
- nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+ nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
+ nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_unpin(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fc737037f751..0871495096fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -76,6 +76,22 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
return NULL;
}
+/*TODO: This could use improvement, and learn to handle the fixed
+ * BIOS tables etc. It's fine currently, for its only user.
+ */
+int
+nouveau_connector_bpp(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+ if (nv_connector->edid && nv_connector->edid->revision >= 4) {
+ u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
+ if (bpc > 4)
+ return bpc;
+ }
+
+ return 18;
+}
static void
nouveau_connector_destroy(struct drm_connector *drm_connector)
@@ -130,6 +146,36 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
return NULL;
}
+static struct nouveau_encoder *
+nouveau_connector_of_detect(struct drm_connector *connector)
+{
+#ifdef __powerpc__
+ struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder;
+ struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
+
+ if (!dn ||
+ !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
+ (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
+ return NULL;
+
+ for_each_child_of_node(dn, cn) {
+ const char *name = of_get_property(cn, "name", NULL);
+ const void *edid = of_get_property(cn, "EDID", NULL);
+ int idx = name ? name[strlen(name) - 1] - 'A' : 0;
+
+ if (nv_encoder->dcb->i2c_index == idx && edid) {
+ nv_connector->edid =
+ kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ of_node_put(cn);
+ return nv_encoder;
+ }
+ }
+#endif
+ return NULL;
+}
+
static void
nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder)
@@ -225,6 +271,12 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
+ nv_encoder = nouveau_connector_of_detect(connector);
+ if (nv_encoder) {
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
@@ -630,7 +682,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
else
max_clock = nv_encoder->dp.link_nr * 162000;
- clock *= 3;
+ clock = clock * nouveau_connector_bpp(connector) / 8;
break;
default:
BUG_ON(1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 0d2e668ccfe5..c21ed6b16f88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -55,4 +55,7 @@ nouveau_connector_create(struct drm_device *, int index);
void
nouveau_connector_set_polling(struct drm_connector *);
+int
+nouveau_connector_bpp(struct drm_connector *);
+
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 7933de4aff2e..8e1592368cce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -157,7 +157,23 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
return 0;
}
+static int
+nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
+ int ret;
+
+ ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+ if (ret)
+ seq_printf(m, "failed: %d", ret);
+ else
+ seq_printf(m, "succeeded\n");
+ return 0;
+}
+
static struct drm_info_list nouveau_debugfs_list[] = {
+ { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 2e3c6caa97ee..82581e600dcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
void
nouveau_dma_pre_init(struct nouveau_channel *chan)
@@ -58,26 +59,17 @@ nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *m2mf = NULL;
- struct nouveau_gpuobj *nvsw = NULL;
+ struct nouveau_gpuobj *obj = NULL;
int ret, i;
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
- 0x0039 : 0x5039, &m2mf);
+ 0x0039 : 0x5039, &obj);
if (ret)
return ret;
- ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
- if (ret)
- return ret;
-
- /* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+ ret = nouveau_ramht_insert(chan, NvM2MF, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
@@ -91,11 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
if (ret)
return ret;
- /* Map M2MF notifier object - fbcon. */
- ret = nouveau_bo_map(chan->notifier_bo);
- if (ret)
- return ret;
-
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
@@ -113,13 +100,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
OUT_RING(chan, NvNotify0);
- /* Initialise NV_SW */
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
-
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
@@ -217,7 +197,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
chan->dma.ib_free = get - chan->dma.ib_put;
if (chan->dma.ib_free <= 0)
- chan->dma.ib_free += chan->dma.ib_max + 1;
+ chan->dma.ib_free += chan->dma.ib_max;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8b05c15866d5..d578c21d3c8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -72,6 +72,7 @@ enum {
NvGdiRect = 0x8000000c,
NvImageBlit = 0x8000000d,
NvSw = 0x8000000e,
+ NvSema = 0x8000000f,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 8a1b188b4cd1..4562f309ae3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -317,7 +317,8 @@ train:
return false;
config[0] = nv_encoder->dp.link_nr;
- if (nv_encoder->dp.dpcd_version >= 0x11)
+ if (nv_encoder->dp.dpcd_version >= 0x11 &&
+ nv_encoder->dp.enhanced_frame)
config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
ret = nouveau_dp_lane_count_set(encoder, config[0]);
@@ -468,10 +469,12 @@ nouveau_dp_detect(struct drm_encoder *encoder)
!nv_encoder->dcb->dpconf.link_bw)
nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
- nv_encoder->dp.link_nr = dpcd[2] & 0xf;
+ nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
+ nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP);
+
return true;
}
@@ -524,7 +527,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
- if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
+ if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
+ 0x00010000, 0x00000000)) {
NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
nv_rd32(dev, NV50_AUXCH_CTRL(index)));
ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index eb15345162a0..90875494a65a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -31,13 +31,14 @@
#include "nouveau_hw.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
+#include "nouveau_pm.h"
#include "nv50_display.h"
#include "drm_pciids.h"
-MODULE_PARM_DESC(noagp, "Disable AGP");
-int nouveau_noagp;
-module_param_named(noagp, nouveau_noagp, int, 0400);
+MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
+int nouveau_agpmode = -1;
+module_param_named(agpmode, nouveau_agpmode, int, 0400);
MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
static int nouveau_modeset = -1; /* kms */
@@ -79,6 +80,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+MODULE_PARM_DESC(force_post, "Force POST");
+int nouveau_force_post = 0;
+module_param_named(force_post, nouveau_force_post, int, 0400);
+
MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
int nouveau_override_conntype = 0;
module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
@@ -102,6 +107,14 @@ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
int nouveau_reg_debug;
module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
+char *nouveau_perflvl;
+module_param_named(perflvl, nouveau_perflvl, charp, 0400);
+
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
+int nouveau_perflvl_wr;
+module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -271,6 +284,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (ret)
return ret;
+ nouveau_pm_resume(dev);
+
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
ret = nouveau_mem_init_agp(dev);
if (ret) {
@@ -379,8 +394,6 @@ static struct drm_driver driver = {
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = nouveau_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b1be617373b6..3a07e580d27a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -133,22 +133,24 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_DISPLAY 2
#define NVOBJ_ENGINE_INT 0xdeadbeef
-#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
-#define NVOBJ_FLAG_FAKE (1 << 3)
struct nouveau_gpuobj {
+ struct drm_device *dev;
+ struct kref refcount;
struct list_head list;
- struct nouveau_channel *im_channel;
struct drm_mm_node *im_pramin;
struct nouveau_bo *im_backing;
- uint32_t im_backing_start;
uint32_t *im_backing_suspend;
int im_bound;
uint32_t flags;
- int refcount;
+
+ u32 size;
+ u32 pinst;
+ u32 cinst;
+ u64 vinst;
uint32_t engine;
uint32_t class;
@@ -157,16 +159,6 @@ struct nouveau_gpuobj {
void *priv;
};
-struct nouveau_gpuobj_ref {
- struct list_head list;
-
- struct nouveau_gpuobj *gpuobj;
- uint32_t instance;
-
- struct nouveau_channel *channel;
- int handle;
-};
-
struct nouveau_channel {
struct drm_device *dev;
int id;
@@ -192,33 +184,32 @@ struct nouveau_channel {
} fence;
/* DMA push buffer */
- struct nouveau_gpuobj_ref *pushbuf;
- struct nouveau_bo *pushbuf_bo;
- uint32_t pushbuf_base;
+ struct nouveau_gpuobj *pushbuf;
+ struct nouveau_bo *pushbuf_bo;
+ uint32_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
struct drm_mm notifier_heap;
/* PFIFO context */
- struct nouveau_gpuobj_ref *ramfc;
- struct nouveau_gpuobj_ref *cache;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
/* PGRAPH context */
/* XXX may be merge 2 pointers as private data ??? */
- struct nouveau_gpuobj_ref *ramin_grctx;
+ struct nouveau_gpuobj *ramin_grctx;
void *pgraph_ctx;
/* NV50 VM */
- struct nouveau_gpuobj *vm_pd;
- struct nouveau_gpuobj_ref *vm_gart_pt;
- struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
+ struct nouveau_gpuobj *vm_pd;
+ struct nouveau_gpuobj *vm_gart_pt;
+ struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
/* Objects */
- struct nouveau_gpuobj_ref *ramin; /* Private instmem */
- struct drm_mm ramin_heap; /* Private PRAMIN heap */
- struct nouveau_gpuobj_ref *ramht; /* Hash table */
- struct list_head ramht_refs; /* Objects referenced by RAMHT */
+ struct nouveau_gpuobj *ramin; /* Private instmem */
+ struct drm_mm ramin_heap; /* Private PRAMIN heap */
+ struct nouveau_ramht *ramht; /* Hash table */
/* GPU object info for stuff used in-kernel (mm_enabled) */
uint32_t m2mf_ntfy;
@@ -296,7 +287,7 @@ struct nouveau_fb_engine {
struct nouveau_fifo_engine {
int channels;
- struct nouveau_gpuobj_ref *playlist[2];
+ struct nouveau_gpuobj *playlist[2];
int cur_playlist;
int (*init)(struct drm_device *);
@@ -305,7 +296,6 @@ struct nouveau_fifo_engine {
void (*disable)(struct drm_device *);
void (*enable)(struct drm_device *);
bool (*reassign)(struct drm_device *, bool enable);
- bool (*cache_flush)(struct drm_device *dev);
bool (*cache_pull)(struct drm_device *dev, bool enable);
int (*channel_id)(struct drm_device *);
@@ -334,7 +324,7 @@ struct nouveau_pgraph_engine {
int grctx_size;
/* NV2x/NV3x context table (0x400780) */
- struct nouveau_gpuobj_ref *ctx_table;
+ struct nouveau_gpuobj *ctx_table;
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -369,6 +359,91 @@ struct nouveau_gpio_engine {
void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
};
+struct nouveau_pm_voltage_level {
+ u8 voltage;
+ u8 vid;
+};
+
+struct nouveau_pm_voltage {
+ bool supported;
+ u8 vid_mask;
+
+ struct nouveau_pm_voltage_level *level;
+ int nr_level;
+};
+
+#define NOUVEAU_PM_MAX_LEVEL 8
+struct nouveau_pm_level {
+ struct device_attribute dev_attr;
+ char name[32];
+ int id;
+
+ u32 core;
+ u32 memory;
+ u32 shader;
+ u32 unk05;
+
+ u8 voltage;
+ u8 fanspeed;
+
+ u16 memscript;
+};
+
+struct nouveau_pm_temp_sensor_constants {
+ u16 offset_constant;
+ s16 offset_mult;
+ u16 offset_div;
+ u16 slope_mult;
+ u16 slope_div;
+};
+
+struct nouveau_pm_threshold_temp {
+ s16 critical;
+ s16 down_clock;
+ s16 fan_boost;
+};
+
+struct nouveau_pm_memtiming {
+ u32 reg_100220;
+ u32 reg_100224;
+ u32 reg_100228;
+ u32 reg_10022c;
+ u32 reg_100230;
+ u32 reg_100234;
+ u32 reg_100238;
+ u32 reg_10023c;
+};
+
+struct nouveau_pm_memtimings {
+ bool supported;
+ struct nouveau_pm_memtiming *timing;
+ int nr_timing;
+};
+
+struct nouveau_pm_engine {
+ struct nouveau_pm_voltage voltage;
+ struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
+ int nr_perflvl;
+ struct nouveau_pm_memtimings memtimings;
+ struct nouveau_pm_temp_sensor_constants sensor_constants;
+ struct nouveau_pm_threshold_temp threshold_temp;
+
+ struct nouveau_pm_level boot;
+ struct nouveau_pm_level *cur;
+
+ struct device *hwmon;
+
+ int (*clock_get)(struct drm_device *, u32 id);
+ void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+ void (*clock_set)(struct drm_device *, void *);
+ int (*voltage_get)(struct drm_device *);
+ int (*voltage_set)(struct drm_device *, int voltage);
+ int (*fanspeed_get)(struct drm_device *);
+ int (*fanspeed_set)(struct drm_device *, int fanspeed);
+ int (*temp_get)(struct drm_device *);
+};
+
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
@@ -378,6 +453,7 @@ struct nouveau_engine {
struct nouveau_fifo_engine fifo;
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
+ struct nouveau_pm_engine pm;
};
struct nouveau_pll_vals {
@@ -522,8 +598,14 @@ struct drm_nouveau_private {
int flags;
void __iomem *mmio;
+
+ spinlock_t ramin_lock;
void __iomem *ramin;
- uint32_t ramin_size;
+ u32 ramin_size;
+ u32 ramin_base;
+ bool ramin_available;
+ struct drm_mm ramin_heap;
+ struct list_head gpuobj_list;
struct nouveau_bo *vga_ram;
@@ -540,6 +622,12 @@ struct drm_nouveau_private {
atomic_t validate_sequence;
} ttm;
+ struct {
+ spinlock_t lock;
+ struct drm_mm heap;
+ struct nouveau_bo *bo;
+ } fence;
+
int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
@@ -550,15 +638,11 @@ struct drm_nouveau_private {
spinlock_t context_switch_lock;
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
- struct nouveau_gpuobj *ramht;
+ struct nouveau_ramht *ramht;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *ramro;
+
uint32_t ramin_rsvd_vram;
- uint32_t ramht_offset;
- uint32_t ramht_size;
- uint32_t ramht_bits;
- uint32_t ramfc_offset;
- uint32_t ramfc_size;
- uint32_t ramro_offset;
- uint32_t ramro_size;
struct {
enum {
@@ -576,14 +660,12 @@ struct drm_nouveau_private {
} gart_info;
/* nv10-nv40 tiling regions */
- struct {
- struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
- spinlock_t lock;
- } tile;
+ struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
/* VRAM/fb configuration */
uint64_t vram_size;
uint64_t vram_sys_base;
+ u32 vram_rblock_size;
uint64_t fb_phys;
uint64_t fb_available_size;
@@ -600,10 +682,6 @@ struct drm_nouveau_private {
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
- struct drm_mm ramin_heap;
-
- struct list_head gpuobj_list;
-
struct nvbios vbios;
struct nv04_mode_state mode_reg;
@@ -634,6 +712,12 @@ struct drm_nouveau_private {
};
static inline struct drm_nouveau_private *
+nouveau_private(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
+static inline struct drm_nouveau_private *
nouveau_bdev(struct ttm_bo_device *bd)
{
return container_of(bd, struct drm_nouveau_private, ttm.bdev);
@@ -669,7 +753,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
} while (0)
/* nouveau_drv.c */
-extern int nouveau_noagp;
+extern int nouveau_agpmode;
extern int nouveau_duallink;
extern int nouveau_uscript_lvds;
extern int nouveau_uscript_tmds;
@@ -683,7 +767,10 @@ extern char *nouveau_vbios;
extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
+extern int nouveau_force_post;
extern int nouveau_override_conntype;
+extern char *nouveau_perflvl;
+extern int nouveau_perflvl_wr;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -704,8 +791,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
/* nouveau_mem.c */
-extern int nouveau_mem_detect(struct drm_device *dev);
-extern int nouveau_mem_init(struct drm_device *);
+extern int nouveau_mem_vram_init(struct drm_device *);
+extern void nouveau_mem_vram_fini(struct drm_device *);
+extern int nouveau_mem_gart_init(struct drm_device *);
+extern void nouveau_mem_gart_fini(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
@@ -749,7 +838,6 @@ extern void nouveau_channel_free(struct nouveau_channel *);
extern int nouveau_gpuobj_early_init(struct drm_device *);
extern int nouveau_gpuobj_init(struct drm_device *);
extern void nouveau_gpuobj_takedown(struct drm_device *);
-extern void nouveau_gpuobj_late_takedown(struct drm_device *);
extern int nouveau_gpuobj_suspend(struct drm_device *dev);
extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
extern void nouveau_gpuobj_resume(struct drm_device *dev);
@@ -759,24 +847,11 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
uint32_t size, int align, uint32_t flags,
struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
- uint32_t handle, struct nouveau_gpuobj *,
- struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_del(struct drm_device *,
- struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
- struct nouveau_gpuobj_ref **ref_ret);
-extern int nouveau_gpuobj_new_ref(struct drm_device *,
- struct nouveau_channel *alloc_chan,
- struct nouveau_channel *ref_chan,
- uint32_t handle, uint32_t size, int align,
- uint32_t flags, struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_new_fake(struct drm_device *,
- uint32_t p_offset, uint32_t b_offset,
- uint32_t size, uint32_t flags,
- struct nouveau_gpuobj **,
- struct nouveau_gpuobj_ref**);
+extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
+ struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
+ u32 size, u32 flags,
+ struct nouveau_gpuobj **);
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
uint64_t offset, uint64_t size, int access,
int target, struct nouveau_gpuobj **);
@@ -879,6 +954,7 @@ extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
enum dcb_gpio_tag);
extern struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
+extern u32 get_pll_register(struct drm_device *, enum pll_types);
extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
struct pll_lims *);
extern int nouveau_bios_run_display_table(struct drm_device *,
@@ -925,10 +1001,10 @@ extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
uint32_t, uint32_t);
-
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
+extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
/* nvc0_fb.c */
extern int nvc0_fb_init(struct drm_device *);
@@ -939,7 +1015,6 @@ extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
-extern bool nv04_fifo_cache_flush(struct drm_device *);
extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
extern int nv04_fifo_channel_id(struct drm_device *);
extern int nv04_fifo_create_context(struct nouveau_channel *);
@@ -977,7 +1052,6 @@ extern void nvc0_fifo_takedown(struct drm_device *);
extern void nvc0_fifo_disable(struct drm_device *);
extern void nvc0_fifo_enable(struct drm_device *);
extern bool nvc0_fifo_reassign(struct drm_device *, bool);
-extern bool nvc0_fifo_cache_flush(struct drm_device *);
extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
extern int nvc0_fifo_channel_id(struct drm_device *);
extern int nvc0_fifo_create_context(struct nouveau_channel *);
@@ -1169,15 +1243,21 @@ extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
/* nouveau_fence.c */
struct nouveau_fence;
-extern int nouveau_fence_init(struct nouveau_channel *);
-extern void nouveau_fence_fini(struct nouveau_channel *);
+extern int nouveau_fence_init(struct drm_device *);
+extern void nouveau_fence_fini(struct drm_device *);
+extern int nouveau_fence_channel_init(struct nouveau_channel *);
+extern void nouveau_fence_channel_fini(struct nouveau_channel *);
extern void nouveau_fence_update(struct nouveau_channel *);
extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
bool emit);
extern int nouveau_fence_emit(struct nouveau_fence *);
+extern void nouveau_fence_work(struct nouveau_fence *fence,
+ void (*work)(void *priv, bool signalled),
+ void *priv);
struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
extern bool nouveau_fence_signalled(void *obj, void *arg);
extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
extern int nouveau_fence_flush(void *obj, void *arg);
extern void nouveau_fence_unref(void **obj);
extern void *nouveau_fence_ref(void *obj);
@@ -1255,12 +1335,11 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
iowrite32_native(val, dev_priv->mmio + reg);
}
-static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
+static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
{
u32 tmp = nv_rd32(dev, reg);
- tmp &= ~mask;
- tmp |= val;
- nv_wr32(dev, reg, tmp);
+ nv_wr32(dev, reg, (tmp & ~mask) | val);
+ return tmp;
}
static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
@@ -1275,7 +1354,7 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
iowrite8(val, dev_priv->mmio + reg);
}
-#define nv_wait(reg, mask, val) \
+#define nv_wait(dev, reg, mask, val) \
nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
/* PRAMIN access */
@@ -1292,17 +1371,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
}
/* object access */
-static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
- unsigned index)
-{
- return nv_ri32(dev, obj->im_pramin->start + index * 4);
-}
-
-static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
- unsigned index, u32 val)
-{
- nv_wi32(dev, obj->im_pramin->start + index * 4, val);
-}
+extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
+extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
/*
* Logging
@@ -1403,6 +1473,7 @@ nv_match_device(struct drm_device *dev, unsigned device,
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
+#define NV_SW_YIELD 0x00000080
#define NV_SW_DMA_VBLSEM 0x0000018c
#define NV_SW_VBLSEM_OFFSET 0x00000400
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 7c82d68bc155..ae69b61d93db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -55,6 +55,7 @@ struct nouveau_encoder {
int dpcd_version;
int link_nr;
int link_bw;
+ bool enhanced_frame;
} dp;
};
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index dbd30b2e43fd..02a4d1fd4845 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -104,6 +104,8 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nv04_fbcon_ops = {
@@ -117,6 +119,8 @@ static struct fb_ops nv04_fbcon_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nv50_fbcon_ops = {
@@ -130,6 +134,8 @@ static struct fb_ops nv50_fbcon_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 87ac21ec23d2..441b12420bb1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,9 +28,11 @@
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
#include "nouveau_dma.h"
-#define USE_REFCNT (dev_priv->card_type >= NV_10)
+#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -39,6 +41,15 @@ struct nouveau_fence {
uint32_t sequence;
bool signalled;
+
+ void (*work)(void *priv, bool signalled);
+ void *priv;
+};
+
+struct nouveau_semaphore {
+ struct kref ref;
+ struct drm_device *dev;
+ struct drm_mm_node *mem;
};
static inline struct nouveau_fence *
@@ -59,14 +70,13 @@ nouveau_fence_del(struct kref *ref)
void
nouveau_fence_update(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct list_head *entry, *tmp;
- struct nouveau_fence *fence;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence *tmp, *fence;
uint32_t sequence;
spin_lock(&chan->fence.lock);
- if (USE_REFCNT)
+ if (USE_REFCNT(dev))
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
@@ -75,12 +85,14 @@ nouveau_fence_update(struct nouveau_channel *chan)
goto out;
chan->fence.sequence_ack = sequence;
- list_for_each_safe(entry, tmp, &chan->fence.pending) {
- fence = list_entry(entry, struct nouveau_fence, entry);
-
+ list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
sequence = fence->sequence;
fence->signalled = true;
list_del(&fence->entry);
+
+ if (unlikely(fence->work))
+ fence->work(fence->priv, true);
+
kref_put(&fence->refcount, nouveau_fence_del);
if (sequence == chan->fence.sequence_ack)
@@ -121,8 +133,8 @@ nouveau_fence_channel(struct nouveau_fence *fence)
int
nouveau_fence_emit(struct nouveau_fence *fence)
{
- struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
struct nouveau_channel *chan = fence->channel;
+ struct drm_device *dev = chan->dev;
int ret;
ret = RING_SPACE(chan, 2);
@@ -143,7 +155,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
- BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
+ BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
OUT_RING(chan, fence->sequence);
FIRE_RING(chan);
@@ -151,6 +163,25 @@ nouveau_fence_emit(struct nouveau_fence *fence)
}
void
+nouveau_fence_work(struct nouveau_fence *fence,
+ void (*work)(void *priv, bool signalled),
+ void *priv)
+{
+ BUG_ON(fence->work);
+
+ spin_lock(&fence->channel->fence.lock);
+
+ if (fence->signalled) {
+ work(priv, true);
+ } else {
+ fence->work = work;
+ fence->priv = priv;
+ }
+
+ spin_unlock(&fence->channel->fence.lock);
+}
+
+void
nouveau_fence_unref(void **sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
@@ -213,6 +244,162 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
return ret;
}
+static struct nouveau_semaphore *
+alloc_semaphore(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_semaphore *sema;
+
+ if (!USE_SEMA(dev))
+ return NULL;
+
+ sema = kmalloc(sizeof(*sema), GFP_KERNEL);
+ if (!sema)
+ goto fail;
+
+ spin_lock(&dev_priv->fence.lock);
+ sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+ if (sema->mem)
+ sema->mem = drm_mm_get_block(sema->mem, 4, 0);
+ spin_unlock(&dev_priv->fence.lock);
+
+ if (!sema->mem)
+ goto fail;
+
+ kref_init(&sema->ref);
+ sema->dev = dev;
+ nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+
+ return sema;
+fail:
+ kfree(sema);
+ return NULL;
+}
+
+static void
+free_semaphore(struct kref *ref)
+{
+ struct nouveau_semaphore *sema =
+ container_of(ref, struct nouveau_semaphore, ref);
+ struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+ spin_lock(&dev_priv->fence.lock);
+ drm_mm_put_block(sema->mem);
+ spin_unlock(&dev_priv->fence.lock);
+
+ kfree(sema);
+}
+
+static void
+semaphore_work(void *priv, bool signalled)
+{
+ struct nouveau_semaphore *sema = priv;
+ struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+ if (unlikely(!signalled))
+ nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
+
+ kref_put(&sema->ref, free_semaphore);
+}
+
+static int
+emit_semaphore(struct nouveau_channel *chan, int method,
+ struct nouveau_semaphore *sema)
+{
+ struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+ struct nouveau_fence *fence;
+ bool smart = (dev_priv->card_type >= NV_50);
+ int ret;
+
+ ret = RING_SPACE(chan, smart ? 8 : 4);
+ if (ret)
+ return ret;
+
+ if (smart) {
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING(chan, NvSema);
+ }
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ OUT_RING(chan, sema->mem->start);
+
+ if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
+ /*
+ * NV50 tries to be too smart and context-switch
+ * between semaphores instead of doing a "first come,
+ * first served" strategy like previous cards
+ * do.
+ *
+ * That's bad because the ACQUIRE latency can get as
+ * large as the PFIFO context time slice in the
+ * typical DRI2 case where you have several
+ * outstanding semaphores at the same moment.
+ *
+ * If we're going to ACQUIRE, force the card to
+ * context switch before, just in case the matching
+ * RELEASE is already scheduled to be executed in
+ * another channel.
+ */
+ BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+ OUT_RING(chan, 0);
+ }
+
+ BEGIN_RING(chan, NvSubSw, method, 1);
+ OUT_RING(chan, 1);
+
+ if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
+ /*
+ * Force the card to context switch, there may be
+ * another channel waiting for the semaphore we just
+ * released.
+ */
+ BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+ OUT_RING(chan, 0);
+ }
+
+ /* Delay semaphore destruction until its work is done */
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret)
+ return ret;
+
+ kref_get(&sema->ref);
+ nouveau_fence_work(fence, semaphore_work, sema);
+ nouveau_fence_unref((void *)&fence);
+
+ return 0;
+}
+
+int
+nouveau_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *wchan)
+{
+ struct nouveau_channel *chan = nouveau_fence_channel(fence);
+ struct drm_device *dev = wchan->dev;
+ struct nouveau_semaphore *sema;
+ int ret;
+
+ if (likely(!fence || chan == wchan ||
+ nouveau_fence_signalled(fence, NULL)))
+ return 0;
+
+ sema = alloc_semaphore(dev);
+ if (!sema) {
+ /* Early card or broken userspace, fall back to
+ * software sync. */
+ return nouveau_fence_wait(fence, NULL, false, false);
+ }
+
+ /* Make wchan wait until it gets signalled */
+ ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+ if (ret)
+ goto out;
+
+ /* Signal the semaphore from chan */
+ ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+out:
+ kref_put(&sema->ref, free_semaphore);
+ return ret;
+}
+
int
nouveau_fence_flush(void *sync_obj, void *sync_arg)
{
@@ -220,26 +407,123 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
}
int
-nouveau_fence_init(struct nouveau_channel *chan)
+nouveau_fence_channel_init(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ /* Create an NV_SW object for various sync purposes */
+ ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvSw, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+
+ /* Create a DMA object for the shared cross-channel sync area. */
+ if (USE_SEMA(dev)) {
+ struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ mem->start << PAGE_SHIFT,
+ mem->size << PAGE_SHIFT,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &obj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING(chan, NvSema);
+ }
+
+ FIRE_RING(chan);
+
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
+
return 0;
}
void
-nouveau_fence_fini(struct nouveau_channel *chan)
+nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
- struct list_head *entry, *tmp;
- struct nouveau_fence *fence;
-
- list_for_each_safe(entry, tmp, &chan->fence.pending) {
- fence = list_entry(entry, struct nouveau_fence, entry);
+ struct nouveau_fence *tmp, *fence;
+ list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
+
+ if (unlikely(fence->work))
+ fence->work(fence->priv, false);
+
kref_put(&fence->refcount, nouveau_fence_del);
}
}
+int
+nouveau_fence_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* Create a shared VRAM heap for cross-channel sync. */
+ if (USE_SEMA(dev)) {
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, false, true, &dev_priv->fence.bo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ goto fail;
+
+ ret = nouveau_bo_map(dev_priv->fence.bo);
+ if (ret)
+ goto fail;
+
+ ret = drm_mm_init(&dev_priv->fence.heap, 0,
+ dev_priv->fence.bo->bo.mem.size);
+ if (ret)
+ goto fail;
+
+ spin_lock_init(&dev_priv->fence.lock);
+ }
+
+ return 0;
+fail:
+ nouveau_bo_unmap(dev_priv->fence.bo);
+ nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+ return ret;
+}
+
+void
+nouveau_fence_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (USE_SEMA(dev)) {
+ drm_mm_takedown(&dev_priv->fence.heap);
+ nouveau_bo_unmap(dev_priv->fence.bo);
+ nouveau_bo_unpin(dev_priv->fence.bo);
+ nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 19620a6709f5..5c4c929d7f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -362,7 +362,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = nouveau_bo_sync_gpu(nvbo, chan);
+ ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
if (unlikely(ret)) {
NV_ERROR(dev, "fail pre-validate sync\n");
return ret;
@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_bo_sync_gpu(nvbo, chan);
+ ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
if (unlikely(ret)) {
NV_ERROR(dev, "fail post-validate sync\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 5d39c4ce8006..4a8ad1307fa4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
reg = (reg - 0x00400000) / 4;
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
- nv_wo32(ctx->dev, ctx->data, reg, val);
+ nv_wo32(ctx->data, reg * 4, val);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 7b613682e400..bed669a54a2d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -305,7 +305,7 @@ setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
bool mpll = Preg == 0x4020;
uint32_t oldPval = nvReadMC(dev, Preg);
uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
- uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
+ uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
0xc << 28 | pv->log2P << 16;
uint32_t saved4600 = 0;
/* some cards have different maskc040s */
@@ -427,22 +427,12 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
struct nouveau_pll_vals *pllvals)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
- NV_PRAMDAC_MPLL_COEFF,
- NV_PRAMDAC_VPLL_COEFF,
- NV_RAMDAC_VPLL2 };
- const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
- 0x4020,
- NV_PRAMDAC_VPLL_COEFF,
- NV_RAMDAC_VPLL2 };
- uint32_t reg1, pll1, pll2 = 0;
+ uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0;
struct pll_lims pll_lim;
int ret;
- if (dev_priv->card_type < NV_40)
- reg1 = nv04_regs[plltype];
- else
- reg1 = nv40_regs[plltype];
+ if (reg1 == 0)
+ return -ENOENT;
pll1 = nvReadMC(dev, reg1);
@@ -491,8 +481,10 @@ int
nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
{
struct nouveau_pll_vals pllvals;
+ int ret;
- if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+ if (plltype == PLL_MEMORY &&
+ (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -501,14 +493,17 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
return 400000 / mpllP;
} else
- if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+ if (plltype == PLL_MEMORY &&
+ (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
return clock;
}
- nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+ ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+ if (ret)
+ return ret;
return nouveau_hw_pllvals_to_clk(&pllvals);
}
@@ -526,9 +521,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
struct nouveau_pll_vals pv;
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
- if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
+ if (get_pll_limits(dev, pllreg, &pll_lim))
return;
- nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
+ nouveau_hw_get_pllvals(dev, pllreg, &pv);
if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -661,7 +656,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
if (dev_priv->card_type >= NV_10)
regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
- nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
+ nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
if (nv_two_heads(dev))
state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
@@ -866,10 +861,11 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
- if (dev_priv->card_type >= NV_30) {
+ if (dev_priv->card_type >= NV_20)
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+ if (dev_priv->card_type >= NV_30)
rd_cio_state(dev, head, regp, 0x9f);
- }
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
@@ -976,10 +972,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (dev_priv->card_type >= NV_30) {
+ if (dev_priv->card_type >= NV_20)
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+ if (dev_priv->card_type >= NV_30)
wr_cio_state(dev, head, regp, 0x9f);
- }
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 84614858728b..fdd7e3de79c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -299,7 +299,10 @@ nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
int
nouveau_i2c_identify(struct drm_device *dev, const char *what,
- struct i2c_board_info *info, int index)
+ struct i2c_board_info *info,
+ bool (*match)(struct nouveau_i2c_chan *,
+ struct i2c_board_info *),
+ int index)
{
struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
int i;
@@ -307,7 +310,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
for (i = 0; info[i].addr; i++) {
- if (nouveau_probe_i2c_addr(i2c, info[i].addr)) {
+ if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
+ (!match || match(i2c, &info[i]))) {
NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
return i;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index cfe7c8426d1d..422b62fd8272 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -43,7 +43,10 @@ void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
int nouveau_i2c_identify(struct drm_device *dev, const char *what,
- struct i2c_board_info *info, int index);
+ struct i2c_board_info *info,
+ bool (*match)(struct nouveau_i2c_chan *,
+ struct i2c_board_info *),
+ int index);
extern const struct i2c_algorithm nouveau_dp_i2c_algo;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 794b0ee30cf6..6fd51a51c608 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -35,6 +35,7 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_reg.h"
+#include "nouveau_ramht.h"
#include <linux/ratelimit.h>
/* needed for hotplug irq */
@@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
const int mthd = addr & 0x1ffc;
if (mthd == 0x0000) {
- struct nouveau_gpuobj_ref *ref = NULL;
+ struct nouveau_gpuobj *gpuobj;
- if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ gpuobj = nouveau_ramht_find(chan, data);
+ if (!gpuobj)
return false;
- if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
+ if (gpuobj->engine != NVOBJ_ENGINE_SW)
return false;
- chan->sw_subchannel[subc] = ref->gpuobj->class;
+ chan->sw_subchannel[subc] = gpuobj->class;
nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
return true;
@@ -200,16 +202,45 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
+ u32 get = nv_rd32(dev, 0x003244);
+ u32 put = nv_rd32(dev, 0x003240);
+ u32 push = nv_rd32(dev, 0x003220);
+ u32 state = nv_rd32(dev, 0x003228);
+
+ if (dev_priv->card_type == NV_50) {
+ u32 ho_get = nv_rd32(dev, 0x003328);
+ u32 ho_put = nv_rd32(dev, 0x003320);
+ u32 ib_get = nv_rd32(dev, 0x003334);
+ u32 ib_put = nv_rd32(dev, 0x003330);
+
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x Push 0x%08x\n",
+ chid, ho_get, get, ho_put, put, ib_get, ib_put,
+ state, push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(dev, 0x003364, 0x00000000);
+ if (get != put || ho_get != ho_put) {
+ nv_wr32(dev, 0x003244, put);
+ nv_wr32(dev, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put) {
+ nv_wr32(dev, 0x003334, ib_put);
+ }
+ } else {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+ "Put 0x%08x State 0x%08x Push 0x%08x\n",
+ chid, get, put, state, push);
- status &= ~NV_PFIFO_INTR_DMA_PUSHER;
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_DMA_PUSHER);
+ if (get != put)
+ nv_wr32(dev, 0x003244, put);
+ }
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
- if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
- get + 4);
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000001);
+ nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
if (status & NV_PFIFO_INTR_SEMAPHORE) {
@@ -226,6 +257,14 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
+ if (dev_priv->card_type == NV_50) {
+ if (status & 0x00000010) {
+ nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ status &= ~0x00000010;
+ nv_wr32(dev, 0x002100, 0x00000010);
+ }
+ }
+
if (status) {
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
@@ -357,7 +396,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
if (!chan || !chan->ramin_grctx)
continue;
- if (inst == chan->ramin_grctx->instance)
+ if (inst == chan->ramin_grctx->pinst)
break;
}
} else {
@@ -369,7 +408,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
if (!chan || !chan->ramin)
continue;
- if (inst == chan->ramin->instance)
+ if (inst == chan->ramin->vinst)
break;
}
}
@@ -605,40 +644,6 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
}
-static void
-nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t trap[6];
- int i, ch;
- uint32_t idx = nv_rd32(dev, 0x100c90);
- if (idx & 0x80000000) {
- idx &= 0xffffff;
- if (display) {
- for (i = 0; i < 6; i++) {
- nv_wr32(dev, 0x100c90, idx | i << 24);
- trap[i] = nv_rd32(dev, 0x100c94);
- }
- for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
- struct nouveau_channel *chan = dev_priv->fifos[ch];
-
- if (!chan || !chan->ramin)
- continue;
-
- if (trap[1] == chan->ramin->instance >> 12)
- break;
- }
- NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
- name, (trap[5]&0x100?"read":"write"),
- trap[5]&0xff, trap[4]&0xffff,
- trap[3]&0xffff, trap[0], trap[2], ch);
- }
- nv_wr32(dev, 0x100c90, idx | 0x80000000);
- } else if (display) {
- NV_INFO(dev, "%s - no VM fault?\n", name);
- }
-}
-
static struct nouveau_enum_names nv50_mp_exec_error_names[] =
{
{ 3, "STACK_UNDERFLOW" },
@@ -711,7 +716,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
- nv50_pfb_vm_trap(dev, display, name);
+ nv50_fb_vm_trap(dev, display, name);
if (display) {
NV_ERROR(dev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -734,7 +739,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_pfb_vm_trap(dev, display, name);
+ nv50_fb_vm_trap(dev, display, name);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
@@ -817,7 +822,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
/* Known to be triggered by screwed up NOTIFY and COND... */
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
nv_wr32(dev, 0x400500, 0);
if (nv_rd32(dev, 0x400808) & 0x80000000) {
if (display) {
@@ -842,7 +847,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
nv_wr32(dev, 0x400500, 0);
if (nv_rd32(dev, 0x40084c) & 0x80000000) {
if (display) {
@@ -884,15 +889,15 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
ustatus &= ~0x00000002;
}
if (ustatus & 0x00000004) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
ustatus &= ~0x00000004;
}
NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
@@ -917,7 +922,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
nv_rd32(dev, 0x400c00),
nv_rd32(dev, 0x400c08),
@@ -939,7 +944,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
nv_rd32(dev, 0x401804),
nv_rd32(dev, 0x401808),
@@ -964,7 +969,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
}
if (ustatus & 0x00000001) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
nv_rd32(dev, 0x405800),
nv_rd32(dev, 0x405804),
@@ -986,7 +991,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
* remaining, so try to handle it anyway. Perhaps related to that
* unknown DMA slot on tesla? */
if (status & 0x20) {
- nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+ nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
if (display)
NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 9689d4147686..a163c7c612e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -35,6 +35,8 @@
#include "drm_sarea.h"
#include "nouveau_drv.h"
+#define MIN(a,b) a < b ? a : b
+
/*
* NV10-NV40 tiling helpers
*/
@@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ struct nouveau_tile_reg *tile = &dev_priv->tile[i];
tile->addr = addr;
tile->size = size;
tile->used = !!pitch;
nouveau_fence_unref((void **)&tile->fence);
- if (!pfifo->cache_flush(dev))
- return;
-
pfifo->reassign(dev, false);
- pfifo->cache_flush(dev);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
@@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
- int i;
+ struct nouveau_tile_reg *found = NULL;
+ unsigned long i, flags;
- spin_lock(&dev_priv->tile.lock);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
for (i = 0; i < pfb->num_tiles; i++) {
- if (tile[i].used)
+ struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+
+ if (tile->used)
/* Tile region in use. */
continue;
- if (tile[i].fence &&
- !nouveau_fence_signalled(tile[i].fence, NULL))
+ if (tile->fence &&
+ !nouveau_fence_signalled(tile->fence, NULL))
/* Pending tile region. */
continue;
- if (max(tile[i].addr, addr) <
- min(tile[i].addr + tile[i].size, addr + size))
+ if (max(tile->addr, addr) <
+ min(tile->addr + tile->size, addr + size))
/* Kill an intersecting tile region. */
nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
if (pitch && !found) {
/* Free tile region. */
nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
- found = &tile[i];
+ found = tile;
}
}
- spin_unlock(&dev_priv->tile.lock);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return found;
}
@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
virt += (end - pte);
while (pte < end) {
- nv_wo32(dev, pgt, pte++, offset_l);
- nv_wo32(dev, pgt, pte++, offset_h);
+ nv_wo32(pgt, (pte * 4) + 0, offset_l);
+ nv_wo32(pgt, (pte * 4) + 4, offset_h);
+ pte += 2;
}
}
}
@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
pages -= (end - pte);
virt += (end - pte) << 15;
- while (pte < end)
- nv_wo32(dev, pgt, pte++, 0);
+ while (pte < end) {
+ nv_wo32(pgt, (pte * 4), 0);
+ pte++;
+ }
}
dev_priv->engine.instmem.flush(dev);
@@ -218,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
* Cleanup everything
*/
void
-nouveau_mem_close(struct drm_device *dev)
+nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -229,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev)
nouveau_ttm_global_release(dev_priv);
+ if (dev_priv->fb_mtrr >= 0) {
+ drm_mtrr_del(dev_priv->fb_mtrr,
+ pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
+ dev_priv->fb_mtrr = -1;
+ }
+}
+
+void
+nouveau_mem_gart_fini(struct drm_device *dev)
+{
+ nouveau_sgdma_takedown(dev);
+
if (drm_core_has_AGP(dev) && dev->agp) {
struct drm_agp_mem *entry, *tempe;
@@ -248,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
-
- if (dev_priv->fb_mtrr) {
- drm_mtrr_del(dev_priv->fb_mtrr,
- pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
- dev_priv->fb_mtrr = -1;
- }
}
static uint32_t
@@ -305,8 +314,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
-/* returns the amount of FB ram in bytes */
-int
+static void
+nv50_vram_preinit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru;
+
+ r0 = nv_rd32(dev, 0x100200);
+ r4 = nv_rd32(dev, 0x100204);
+ rt = nv_rd32(dev, 0x100250);
+ ru = nv_rd32(dev, 0x001540);
+ NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = ((r4 & 0x01000000) ? 8 : 4);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != dev_priv->vram_size) {
+ NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+ (u32)(dev_priv->vram_size >> 20));
+ NV_WARN(dev, "we calculated %dMiB VRAM\n",
+ (u32)(predicted >> 20));
+ }
+
+ dev_priv->vram_rblock_size = rowsize >> 12;
+ if (rt & 1)
+ dev_priv->vram_rblock_size *= 3;
+
+ NV_DEBUG(dev, "rblock %lld bytes\n",
+ (u64)dev_priv->vram_rblock_size << 12);
+}
+
+static void
+nvaa_vram_preinit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* To our knowledge, there's no large scale reordering of pages
+ * that occurs on IGP chipsets.
+ */
+ dev_priv->vram_rblock_size = 1;
+}
+
+static int
nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -325,9 +388,18 @@ nouveau_mem_detect(struct drm_device *dev)
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ll;
- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+
+ switch (dev_priv->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
dev_priv->vram_sys_base <<= 12;
+ nvaa_vram_preinit(dev);
+ break;
+ default:
+ nv50_vram_preinit(dev);
+ break;
}
} else {
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
@@ -345,6 +417,33 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
+#if __OS_HAS_AGP
+static unsigned long
+get_agp_mode(struct drm_device *dev, unsigned long mode)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * FW seems to be broken on nv18, it makes the card lock up
+ * randomly.
+ */
+ if (dev_priv->chipset == 0x18)
+ mode &= ~PCI_AGP_COMMAND_FW;
+
+ /*
+ * AGP mode set in the command line.
+ */
+ if (nouveau_agpmode > 0) {
+ bool agpv3 = mode & 0x8;
+ int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+
+ mode = (mode & ~0x7) | (rate & 0x7);
+ }
+
+ return mode;
+}
+#endif
+
int
nouveau_mem_reset_agp(struct drm_device *dev)
{
@@ -355,7 +454,8 @@ nouveau_mem_reset_agp(struct drm_device *dev)
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card's
* AGP controller we might be locking ourselves out of it. */
- if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) {
+ if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
+ dev->agp->mode) & PCI_AGP_COMMAND_FW) {
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -363,7 +463,7 @@ nouveau_mem_reset_agp(struct drm_device *dev)
if (ret)
return ret;
- mode.mode = info.mode & ~PCI_AGP_COMMAND_FW;
+ mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
ret = drm_agp_enable(dev, mode);
if (ret)
return ret;
@@ -418,7 +518,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
/* see agp.h for the AGPSTAT_* modes available */
- mode.mode = info.mode;
+ mode.mode = get_agp_mode(dev, info.mode);
ret = drm_agp_enable(dev, mode);
if (ret) {
NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
@@ -433,24 +533,27 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
int
-nouveau_mem_init(struct drm_device *dev)
+nouveau_mem_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- int ret, dma_bits = 32;
-
- dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
- dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+ int ret, dma_bits;
if (dev_priv->card_type >= NV_50 &&
pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
+ else
+ dma_bits = 32;
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
- if (ret) {
- NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+ if (ret)
return ret;
- }
+
+ ret = nouveau_mem_detect(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
@@ -465,8 +568,6 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- spin_lock_init(&dev_priv->tile.lock);
-
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
@@ -474,7 +575,16 @@ nouveau_mem_init(struct drm_device *dev)
pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
- /* remove reserved space at end of vram from available amount */
+ /* reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
+ dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
+ dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+ else
+ if (dev_priv->card_type >= NV_40)
+ dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+ else
+ dev_priv->ramin_rsvd_vram = (512 * 1024);
+
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
@@ -495,9 +605,23 @@ nouveau_mem_init(struct drm_device *dev)
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
}
- /* GART */
+ dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1),
+ DRM_MTRR_WC);
+ return 0;
+}
+
+int
+nouveau_mem_gart_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ int ret;
+
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) {
+ if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -523,11 +647,150 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1),
- DRM_MTRR_WC);
-
return 0;
}
+void
+nouveau_mem_timing_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 tUNK_0, tUNK_1, tUNK_2;
+ u8 tRP; /* Byte 3 */
+ u8 tRAS; /* Byte 5 */
+ u8 tRFC; /* Byte 7 */
+ u8 tRC; /* Byte 9 */
+ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+ u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 *mem = NULL, *entry;
+ int i, recordlen, entries;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ mem = ROMPTR(bios, P.data[4]);
+ else
+ if (P.version == 2)
+ mem = ROMPTR(bios, P.data[8]);
+ else {
+ NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
+ }
+ } else {
+ NV_DEBUG(dev, "BMP version too old for memory\n");
+ return;
+ }
+
+ if (!mem) {
+ NV_DEBUG(dev, "memory timing table pointer invalid\n");
+ return;
+ }
+ if (mem[0] != 0x10) {
+ NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+ return;
+ }
+
+ /* validate record length */
+ entries = mem[2];
+ recordlen = mem[3];
+ if (recordlen < 15) {
+ NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+ return;
+ }
+
+ /* parse vbios entries into common format */
+ memtimings->timing =
+ kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+ if (!memtimings->timing)
+ return;
+
+ entry = mem + mem[1];
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
+ if (entry[0] == 0)
+ continue;
+
+ tUNK_18 = 1;
+ tUNK_19 = 1;
+ tUNK_20 = 0;
+ tUNK_21 = 0;
+ switch (MIN(recordlen,21)) {
+ case 21:
+ tUNK_21 = entry[21];
+ case 20:
+ tUNK_20 = entry[20];
+ case 19:
+ tUNK_19 = entry[19];
+ case 18:
+ tUNK_18 = entry[18];
+ default:
+ tUNK_0 = entry[0];
+ tUNK_1 = entry[1];
+ tUNK_2 = entry[2];
+ tRP = entry[3];
+ tRAS = entry[5];
+ tRFC = entry[7];
+ tRC = entry[9];
+ tUNK_10 = entry[10];
+ tUNK_11 = entry[11];
+ tUNK_12 = entry[12];
+ tUNK_13 = entry[13];
+ tUNK_14 = entry[14];
+ break;
+ }
+
+ timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
+
+ /* XXX: I don't trust the -1's and +1's... they must come
+ * from somewhere! */
+ timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+ tUNK_18 << 16 |
+ (tUNK_1 + tUNK_19 + 1) << 8 |
+ (tUNK_2 - 1));
+
+ timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
+ if(recordlen > 19) {
+ timing->reg_100228 += (tUNK_19 - 1) << 24;
+ } else {
+ timing->reg_100228 += tUNK_12 << 24;
+ }
+
+ /* XXX: reg_10022c */
+
+ timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+ tUNK_13 << 8 | tUNK_13);
+
+ /* XXX: +6? */
+ timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
+ if(tUNK_10 > tUNK_11) {
+ timing->reg_100234 += tUNK_10 << 16;
+ } else {
+ timing->reg_100234 += tUNK_11 << 16;
+ }
+
+ /* XXX; reg_100238, reg_10023c */
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
+ timing->reg_100220, timing->reg_100224,
+ timing->reg_100228, timing->reg_10022c);
+ NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
+ timing->reg_100230, timing->reg_100234,
+ timing->reg_100238, timing->reg_10023c);
+ }
+
+ memtimings->nr_timing = entries;
+ memtimings->supported = true;
+}
+
+void
+nouveau_mem_timing_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
+
+ kfree(mem->timing);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 3ec181ff50ce..2cc59f8c658b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -28,6 +28,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
@@ -112,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
- offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
target = NV_DMA_TARGET_VIDMEM;
} else
@@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
nobj->dtor = nouveau_notifier_gpuobj_dtor;
nobj->priv = mem;
- ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
+ ret = nouveau_ramht_insert(chan, handle, nobj);
+ nouveau_gpuobj_ref(NULL, &nobj);
if (ret) {
- nouveau_gpuobj_del(dev, &nobj);
drm_mm_put_block(mem);
- NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
+ NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index b6bcb254f4ab..896cf8634144 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,6 +34,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_ramht.h"
/* NVidia uses context objects to drive drawing operations.
@@ -65,137 +66,6 @@
The key into the hash table depends on the object handle and channel id and
is given as:
*/
-static uint32_t
-nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t hash = 0;
- int i;
-
- NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
-
- for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
- hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
- handle >>= dev_priv->ramht_bits;
- }
-
- if (dev_priv->card_type < NV_50)
- hash ^= channel << (dev_priv->ramht_bits - 4);
- hash <<= 3;
-
- NV_DEBUG(dev, "hash=0x%08x\n", hash);
- return hash;
-}
-
-static int
-nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
- uint32_t offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
-
- if (dev_priv->card_type < NV_40)
- return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
- return (ctx != 0);
-}
-
-static int
-nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- struct nouveau_channel *chan = ref->channel;
- struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
- uint32_t ctx, co, ho;
-
- if (!ramht) {
- NV_ERROR(dev, "No hash table!\n");
- return -EINVAL;
- }
-
- if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
- (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else
- if (dev_priv->card_type < NV_50) {
- ctx = (ref->instance >> 4) |
- (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else {
- if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (ref->instance << 10) | 2;
- } else {
- ctx = (ref->instance >> 4) |
- ((ref->gpuobj->engine <<
- NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
- }
- }
-
- co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
- do {
- if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
- NV_DEBUG(dev,
- "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, ref->handle, ctx);
- nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
- nv_wo32(dev, ramht, (co + 4)/4, ctx);
-
- list_add_tail(&ref->list, &chan->ramht_refs);
- instmem->flush(dev);
- return 0;
- }
- NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
- chan->id, co, nv_ro32(dev, ramht, co/4));
-
- co += 8;
- if (co >= dev_priv->ramht_size)
- co = 0;
- } while (co != ho);
-
- NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
- return -ENOMEM;
-}
-
-static void
-nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- struct nouveau_channel *chan = ref->channel;
- struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
- uint32_t co, ho;
-
- if (!ramht) {
- NV_ERROR(dev, "No hash table!\n");
- return;
- }
-
- co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
- do {
- if (nouveau_ramht_entry_valid(dev, ramht, co) &&
- (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
- NV_DEBUG(dev,
- "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, ref->handle,
- nv_ro32(dev, ramht, (co + 4)));
- nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
- nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
-
- list_del(&ref->list);
- instmem->flush(dev);
- return;
- }
-
- co += 8;
- if (co >= dev_priv->ramht_size)
- co = 0;
- } while (co != ho);
- list_del(&ref->list);
-
- NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
- chan->id, ref->handle);
-}
int
nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
@@ -205,7 +75,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct nouveau_gpuobj *gpuobj;
- struct drm_mm *pramin = NULL;
+ struct drm_mm_node *ramin = NULL;
int ret;
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
@@ -218,69 +88,102 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
if (!gpuobj)
return -ENOMEM;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+ gpuobj->dev = dev;
gpuobj->flags = flags;
- gpuobj->im_channel = chan;
+ kref_init(&gpuobj->refcount);
+ gpuobj->size = size;
+ spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
- /* Choose between global instmem heap, and per-channel private
- * instmem heap. On <NV50 allow requests for private instmem
- * to be satisfied from global heap if no per-channel area
- * available.
- */
if (chan) {
NV_DEBUG(dev, "channel heap\n");
- pramin = &chan->ramin_heap;
+
+ ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
+ if (ramin)
+ ramin = drm_mm_get_block(ramin, size, align);
+
+ if (!ramin) {
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return -ENOMEM;
+ }
} else {
NV_DEBUG(dev, "global heap\n");
- pramin = &dev_priv->ramin_heap;
+ /* allocate backing pages, sets vinst */
ret = engine->instmem.populate(dev, gpuobj, &size);
if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
}
- }
- /* Allocate a chunk of the PRAMIN aperture */
- gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
- if (gpuobj->im_pramin)
- gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
+ /* try and get aperture space */
+ do {
+ if (drm_mm_pre_get(&dev_priv->ramin_heap))
+ return -ENOMEM;
+
+ spin_lock(&dev_priv->ramin_lock);
+ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
+ align, 0);
+ if (ramin == NULL) {
+ spin_unlock(&dev_priv->ramin_lock);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return ret;
+ }
- if (!gpuobj->im_pramin) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return -ENOMEM;
+ ramin = drm_mm_get_block_atomic(ramin, size, align);
+ spin_unlock(&dev_priv->ramin_lock);
+ } while (ramin == NULL);
+
+ /* on nv50 it's ok to fail, we have a fallback path */
+ if (!ramin && dev_priv->card_type < NV_50) {
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return -ENOMEM;
+ }
}
- if (!chan) {
+ /* if we got a chunk of the aperture, map pages into it */
+ gpuobj->im_pramin = ramin;
+ if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
ret = engine->instmem.bind(dev, gpuobj);
if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
}
}
+ /* calculate the various different addresses for the object */
+ if (chan) {
+ gpuobj->pinst = chan->ramin->pinst;
+ if (gpuobj->pinst != ~0)
+ gpuobj->pinst += gpuobj->im_pramin->start;
+
+ if (dev_priv->card_type < NV_50) {
+ gpuobj->cinst = gpuobj->pinst;
+ } else {
+ gpuobj->cinst = gpuobj->im_pramin->start;
+ gpuobj->vinst = gpuobj->im_pramin->start +
+ chan->ramin->vinst;
+ }
+ } else {
+ if (gpuobj->im_pramin)
+ gpuobj->pinst = gpuobj->im_pramin->start;
+ else
+ gpuobj->pinst = ~0;
+ gpuobj->cinst = 0xdeadbeef;
+ }
+
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
int i;
- for (i = 0; i < gpuobj->im_pramin->size; i += 4)
- nv_wo32(dev, gpuobj, i/4, 0);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0);
engine->instmem.flush(dev);
}
- *gpuobj_ret = gpuobj;
- return 0;
-}
-
-int
-nouveau_gpuobj_early_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- NV_DEBUG(dev, "\n");
-
- INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+ *gpuobj_ret = gpuobj;
return 0;
}
@@ -288,18 +191,12 @@ int
nouveau_gpuobj_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
NV_DEBUG(dev, "\n");
- if (dev_priv->card_type < NV_50) {
- ret = nouveau_gpuobj_new_fake(dev,
- dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
- NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
- &dev_priv->ramht, NULL);
- if (ret)
- return ret;
- }
+ INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+ spin_lock_init(&dev_priv->ramin_lock);
+ dev_priv->ramin_base = ~0;
return 0;
}
@@ -311,297 +208,89 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
NV_DEBUG(dev, "\n");
- nouveau_gpuobj_del(dev, &dev_priv->ramht);
+ BUG_ON(!list_empty(&dev_priv->gpuobj_list));
}
-void
-nouveau_gpuobj_late_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj = NULL;
- struct list_head *entry, *tmp;
-
- NV_DEBUG(dev, "\n");
-
- list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
- gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
-
- NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
- gpuobj, gpuobj->refcount);
- gpuobj->refcount = 0;
- nouveau_gpuobj_del(dev, &gpuobj);
- }
-}
-int
-nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+static void
+nouveau_gpuobj_del(struct kref *ref)
{
+ struct nouveau_gpuobj *gpuobj =
+ container_of(ref, struct nouveau_gpuobj, refcount);
+ struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
- struct nouveau_gpuobj *gpuobj;
int i;
- NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
-
- if (!dev_priv || !pgpuobj || !(*pgpuobj))
- return -EINVAL;
- gpuobj = *pgpuobj;
-
- if (gpuobj->refcount != 0) {
- NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
- return -EINVAL;
- }
+ NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
- for (i = 0; i < gpuobj->im_pramin->size; i += 4)
- nv_wo32(dev, gpuobj, i/4, 0);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0);
engine->instmem.flush(dev);
}
if (gpuobj->dtor)
gpuobj->dtor(dev, gpuobj);
- if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
+ if (gpuobj->im_backing)
engine->instmem.clear(dev, gpuobj);
- if (gpuobj->im_pramin) {
- if (gpuobj->flags & NVOBJ_FLAG_FAKE)
- kfree(gpuobj->im_pramin);
- else
- drm_mm_put_block(gpuobj->im_pramin);
- }
-
+ spin_lock(&dev_priv->ramin_lock);
+ if (gpuobj->im_pramin)
+ drm_mm_put_block(gpuobj->im_pramin);
list_del(&gpuobj->list);
+ spin_unlock(&dev_priv->ramin_lock);
- *pgpuobj = NULL;
kfree(gpuobj);
- return 0;
}
-static int
-nouveau_gpuobj_instance_get(struct drm_device *dev,
- struct nouveau_channel *chan,
- struct nouveau_gpuobj *gpuobj, uint32_t *inst)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *cpramin;
-
- /* <NV50 use PRAMIN address everywhere */
- if (dev_priv->card_type < NV_50) {
- *inst = gpuobj->im_pramin->start;
- return 0;
- }
-
- if (chan && gpuobj->im_channel != chan) {
- NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
- gpuobj->im_channel->id, chan->id);
- return -EINVAL;
- }
-
- /* NV50 channel-local instance */
- if (chan) {
- cpramin = chan->ramin->gpuobj;
- *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
- return 0;
- }
-
- /* NV50 global (VRAM) instance */
- if (!gpuobj->im_channel) {
- /* ...from global heap */
- if (!gpuobj->im_backing) {
- NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
- return -EINVAL;
- }
- *inst = gpuobj->im_backing_start;
- return 0;
- } else {
- /* ...from local heap */
- cpramin = gpuobj->im_channel->ramin->gpuobj;
- *inst = cpramin->im_backing_start +
- (gpuobj->im_pramin->start - cpramin->im_pramin->start);
- return 0;
- }
-
- return -EINVAL;
-}
-
-int
-nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
- uint32_t handle, struct nouveau_gpuobj *gpuobj,
- struct nouveau_gpuobj_ref **ref_ret)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj_ref *ref;
- uint32_t instance;
- int ret;
-
- NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
- chan ? chan->id : -1, handle, gpuobj);
-
- if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
- return -EINVAL;
-
- if (!chan && !ref_ret)
- return -EINVAL;
-
- if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
- /* sw object */
- instance = 0x40;
- } else {
- ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
- if (ret)
- return ret;
- }
-
- ref = kzalloc(sizeof(*ref), GFP_KERNEL);
- if (!ref)
- return -ENOMEM;
- INIT_LIST_HEAD(&ref->list);
- ref->gpuobj = gpuobj;
- ref->channel = chan;
- ref->instance = instance;
-
- if (!ref_ret) {
- ref->handle = handle;
-
- ret = nouveau_ramht_insert(dev, ref);
- if (ret) {
- kfree(ref);
- return ret;
- }
- } else {
- ref->handle = ~0;
- *ref_ret = ref;
- }
-
- ref->gpuobj->refcount++;
- return 0;
-}
-
-int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
-{
- struct nouveau_gpuobj_ref *ref;
-
- NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
-
- if (!dev || !pref || *pref == NULL)
- return -EINVAL;
- ref = *pref;
-
- if (ref->handle != ~0)
- nouveau_ramht_remove(dev, ref);
-
- if (ref->gpuobj) {
- ref->gpuobj->refcount--;
-
- if (ref->gpuobj->refcount == 0) {
- if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
- nouveau_gpuobj_del(dev, &ref->gpuobj);
- }
- }
-
- *pref = NULL;
- kfree(ref);
- return 0;
-}
-
-int
-nouveau_gpuobj_new_ref(struct drm_device *dev,
- struct nouveau_channel *oc, struct nouveau_channel *rc,
- uint32_t handle, uint32_t size, int align,
- uint32_t flags, struct nouveau_gpuobj_ref **ref)
-{
- struct nouveau_gpuobj *gpuobj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
- if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return ret;
- }
-
- return 0;
-}
-
-int
-nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
- struct nouveau_gpuobj_ref **ref_ret)
+void
+nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
{
- struct nouveau_gpuobj_ref *ref;
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, &chan->ramht_refs) {
- ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+ if (ref)
+ kref_get(&ref->refcount);
- if (ref->handle == handle) {
- if (ref_ret)
- *ref_ret = ref;
- return 0;
- }
- }
+ if (*ptr)
+ kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
- return -EINVAL;
+ *ptr = ref;
}
int
-nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
- uint32_t b_offset, uint32_t size,
- uint32_t flags, struct nouveau_gpuobj **pgpuobj,
- struct nouveau_gpuobj_ref **pref)
+nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
+ u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
int i;
NV_DEBUG(dev,
- "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
- p_offset, b_offset, size, flags);
+ "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
+ pinst, vinst, size, flags);
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
return -ENOMEM;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
- gpuobj->im_channel = NULL;
- gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
-
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
-
- if (p_offset != ~0) {
- gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
- GFP_KERNEL);
- if (!gpuobj->im_pramin) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return -ENOMEM;
- }
- gpuobj->im_pramin->start = p_offset;
- gpuobj->im_pramin->size = size;
- }
-
- if (b_offset != ~0) {
- gpuobj->im_backing = (struct nouveau_bo *)-1;
- gpuobj->im_backing_start = b_offset;
- }
+ gpuobj->dev = dev;
+ gpuobj->flags = flags;
+ kref_init(&gpuobj->refcount);
+ gpuobj->size = size;
+ gpuobj->pinst = pinst;
+ gpuobj->cinst = 0xdeadbeef;
+ gpuobj->vinst = vinst;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
- for (i = 0; i < gpuobj->im_pramin->size; i += 4)
- nv_wo32(dev, gpuobj, i/4, 0);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, 0);
dev_priv->engine.instmem.flush(dev);
}
- if (pref) {
- i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
- if (i) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return i;
- }
- }
-
- if (pgpuobj)
- *pgpuobj = gpuobj;
+ spin_lock(&dev_priv->ramin_lock);
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
+ *pgpuobj = gpuobj;
return 0;
}
@@ -685,14 +374,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
adjust = offset & 0x00000fff;
frame = offset & ~0x00000fff;
- nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
- (adjust << 20) |
- (access << 14) |
- (target << 16) |
- class));
- nv_wo32(dev, *gpuobj, 1, size - 1);
- nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
- nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
+ nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
+ (access << 14) | (target << 16) |
+ class));
+ nv_wo32(*gpuobj, 4, size - 1);
+ nv_wo32(*gpuobj, 8, frame | pte_flags);
+ nv_wo32(*gpuobj, 12, frame | pte_flags);
} else {
uint64_t limit = offset + size - 1;
uint32_t flags0, flags5;
@@ -705,12 +392,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
flags5 = 0x00080000;
}
- nv_wo32(dev, *gpuobj, 0, flags0 | class);
- nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
- nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
- nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
- (upper_32_bits(offset) & 0xff));
- nv_wo32(dev, *gpuobj, 5, flags5);
+ nv_wo32(*gpuobj, 0, flags0 | class);
+ nv_wo32(*gpuobj, 4, lower_32_bits(limit));
+ nv_wo32(*gpuobj, 8, lower_32_bits(offset));
+ nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
+ (upper_32_bits(offset) & 0xff));
+ nv_wo32(*gpuobj, 20, flags5);
}
instmem->flush(dev);
@@ -741,7 +428,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
*o_ret = 0;
} else
if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
- *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
if (offset & ~0xffffffffULL) {
NV_ERROR(dev, "obj offset exceeds 32-bits\n");
return -EINVAL;
@@ -829,25 +516,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
}
if (dev_priv->card_type >= NV_50) {
- nv_wo32(dev, *gpuobj, 0, class);
- nv_wo32(dev, *gpuobj, 5, 0x00010000);
+ nv_wo32(*gpuobj, 0, class);
+ nv_wo32(*gpuobj, 20, 0x00010000);
} else {
switch (class) {
case NV_CLASS_NULL:
- nv_wo32(dev, *gpuobj, 0, 0x00001030);
- nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
+ nv_wo32(*gpuobj, 0, 0x00001030);
+ nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
break;
default:
if (dev_priv->card_type >= NV_40) {
- nv_wo32(dev, *gpuobj, 0, class);
+ nv_wo32(*gpuobj, 0, class);
#ifdef __BIG_ENDIAN
- nv_wo32(dev, *gpuobj, 2, 0x01000000);
+ nv_wo32(*gpuobj, 8, 0x01000000);
#endif
} else {
#ifdef __BIG_ENDIAN
- nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
+ nv_wo32(*gpuobj, 0, class | 0x00080000);
#else
- nv_wo32(dev, *gpuobj, 0, class);
+ nv_wo32(*gpuobj, 0, class);
#endif
}
}
@@ -873,10 +560,15 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
return -ENOMEM;
+ gpuobj->dev = chan->dev;
gpuobj->engine = NVOBJ_ENGINE_SW;
gpuobj->class = class;
+ kref_init(&gpuobj->refcount);
+ gpuobj->cinst = 0x40;
+ spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
*gpuobj_ret = gpuobj;
return 0;
}
@@ -886,7 +578,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pramin = NULL;
uint32_t size;
uint32_t base;
int ret;
@@ -911,18 +602,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
size += 0x1000;
}
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
- &chan->ramin);
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
return ret;
}
- pramin = chan->ramin->gpuobj;
- ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
- nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
return ret;
}
@@ -939,8 +628,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i;
- INIT_LIST_HEAD(&chan->ramht_refs);
-
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
/* Allocate a chunk of memory for per-channel object storage */
@@ -956,41 +643,38 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
* locations determined during init.
*/
if (dev_priv->card_type >= NV_50) {
- uint32_t vm_offset, pde;
+ u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
+ u64 vm_vinst = chan->ramin->vinst + pgd_offs;
+ u32 vm_pinst = chan->ramin->pinst;
+ u32 pde;
- vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
- vm_offset += chan->ramin->gpuobj->im_pramin->start;
+ if (vm_pinst != ~0)
+ vm_pinst += pgd_offs;
- ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
- 0, &chan->vm_pd, NULL);
+ ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
+ 0, &chan->vm_pd);
if (ret)
return ret;
for (i = 0; i < 0x4000; i += 8) {
- nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
- nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
+ nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+ nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
}
- pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
- ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
- dev_priv->gart_info.sg_ctxdma,
- &chan->vm_gart_pt);
- if (ret)
- return ret;
- nv_wo32(dev, chan->vm_pd, pde++,
- chan->vm_gart_pt->instance | 0x03);
- nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+ nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
+ &chan->vm_gart_pt);
+ pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
+ nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
+ nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
- pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
+ pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
- dev_priv->vm_vram_pt[i],
- &chan->vm_vram_pt[i]);
- if (ret)
- return ret;
+ nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
+ &chan->vm_vram_pt[i]);
- nv_wo32(dev, chan->vm_pd, pde++,
- chan->vm_vram_pt[i]->instance | 0x61);
- nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+ nv_wo32(chan->vm_pd, pde + 0,
+ chan->vm_vram_pt[i]->vinst | 0x61);
+ nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
+ pde += 8;
}
instmem->flush(dev);
@@ -998,15 +682,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* RAMHT */
if (dev_priv->card_type < NV_50) {
- ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
- &chan->ramht);
+ nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
+ } else {
+ struct nouveau_gpuobj *ramht = NULL;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &ramht);
if (ret)
return ret;
- } else {
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
- 0x8000, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramht);
+
+ ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
}
@@ -1023,24 +709,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
}
} else {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->fb_available_size,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &vram);
+ 0, dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
}
}
- ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
+ ret = nouveau_ramht_insert(chan, vram_h, vram);
+ nouveau_gpuobj_ref(NULL, &vram);
if (ret) {
- NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
+ NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
return ret;
}
/* TT memory ctxdma */
if (dev_priv->card_type >= NV_50) {
- tt = vram;
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->vm_end,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_AGP, &tt);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
} else
if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
@@ -1056,9 +750,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
return ret;
}
- ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
+ ret = nouveau_ramht_insert(chan, tt_h, tt);
+ nouveau_gpuobj_ref(NULL, &tt);
if (ret) {
- NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
+ NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
return ret;
}
@@ -1070,33 +765,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct list_head *entry, *tmp;
- struct nouveau_gpuobj_ref *ref;
int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (!chan->ramht_refs.next)
+ if (!chan->ramht)
return;
- list_for_each_safe(entry, tmp, &chan->ramht_refs) {
- ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
-
- nouveau_gpuobj_ref_del(dev, &ref);
- }
-
- nouveau_gpuobj_ref_del(dev, &chan->ramht);
+ nouveau_ramht_ref(NULL, &chan->ramht, chan);
- nouveau_gpuobj_del(dev, &chan->vm_pd);
- nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+ nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
- if (chan->ramin)
- nouveau_gpuobj_ref_del(dev, &chan->ramin);
-
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
}
int
@@ -1117,17 +802,17 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
}
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
+ if (!gpuobj->im_backing)
continue;
- gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
+ gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
if (!gpuobj->im_backing_suspend) {
nouveau_gpuobj_resume(dev);
return -ENOMEM;
}
- for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
- gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
+ for (i = 0; i < gpuobj->size; i += 4)
+ gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
}
return 0;
@@ -1172,8 +857,8 @@ nouveau_gpuobj_resume(struct drm_device *dev)
if (!gpuobj->im_backing_suspend)
continue;
- for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
- nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
+ for (i = 0; i < gpuobj->size; i += 4)
+ nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
dev_priv->engine.instmem.flush(dev);
}
@@ -1208,25 +893,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
return -EPERM;
}
- if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+ if (nouveau_ramht_find(chan, init->handle))
return -EEXIST;
if (!grc->software)
ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
else
ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
-
if (ret) {
NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
return ret;
}
- ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
+ ret = nouveau_ramht_insert(chan, init->handle, gr);
+ nouveau_gpuobj_ref(NULL, &gr);
if (ret) {
NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
- nouveau_gpuobj_del(dev, &gr);
return ret;
}
@@ -1237,16 +921,62 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_gpuobj_ref *ref;
+ struct nouveau_gpuobj *gpuobj;
struct nouveau_channel *chan;
- int ret;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
- ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
- if (ret)
- return ret;
- nouveau_gpuobj_ref_del(dev, &ref);
+ gpuobj = nouveau_ramht_find(chan, objfree->handle);
+ if (!gpuobj)
+ return -ENOENT;
+ nouveau_ramht_remove(chan, objfree->handle);
return 0;
}
+
+u32
+nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_device *dev = gpuobj->dev;
+
+ if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
+ u64 ptr = gpuobj->vinst + offset;
+ u32 base = ptr >> 16;
+ u32 val;
+
+ spin_lock(&dev_priv->ramin_lock);
+ if (dev_priv->ramin_base != base) {
+ dev_priv->ramin_base = base;
+ nv_wr32(dev, 0x001700, dev_priv->ramin_base);
+ }
+ val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
+ spin_unlock(&dev_priv->ramin_lock);
+ return val;
+ }
+
+ return nv_ri32(dev, gpuobj->pinst + offset);
+}
+
+void
+nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
+{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_device *dev = gpuobj->dev;
+
+ if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
+ u64 ptr = gpuobj->vinst + offset;
+ u32 base = ptr >> 16;
+
+ spin_lock(&dev_priv->ramin_lock);
+ if (dev_priv->ramin_base != base) {
+ dev_priv->ramin_base = base;
+ nv_wr32(dev, 0x001700, dev_priv->ramin_base);
+ }
+ nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
+ spin_unlock(&dev_priv->ramin_lock);
+ return;
+ }
+
+ nv_wi32(dev, gpuobj->pinst + offset, val);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
new file mode 100644
index 000000000000..ac62a1b8c4fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static void
+legacy_perf_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ char *perf, *entry, *bmp = &bios->data[bios->offset];
+ int headerlen, use_straps;
+
+ if (bmp[5] < 0x5 || bmp[6] < 0x14) {
+ NV_DEBUG(dev, "BMP version too old for perf\n");
+ return;
+ }
+
+ perf = ROMPTR(bios, bmp[0x73]);
+ if (!perf) {
+ NV_DEBUG(dev, "No memclock table pointer found.\n");
+ return;
+ }
+
+ switch (perf[0]) {
+ case 0x12:
+ case 0x14:
+ case 0x18:
+ use_straps = 0;
+ headerlen = 1;
+ break;
+ case 0x01:
+ use_straps = perf[1] & 1;
+ headerlen = (use_straps ? 8 : 2);
+ break;
+ default:
+ NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]);
+ return;
+ }
+
+ entry = perf + headerlen;
+ if (use_straps)
+ entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
+
+ sprintf(pm->perflvl[0].name, "performance_level_0");
+ pm->perflvl[0].memory = ROM16(entry[0]) * 20;
+ pm->nr_perflvl = 1;
+}
+
+void
+nouveau_perf_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 version, headerlen, recordlen, entries;
+ u8 *perf, *entry;
+ int vid, i;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version != 1 && P.version != 2) {
+ NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
+ return;
+ }
+
+ perf = ROMPTR(bios, P.data[0]);
+ version = perf[0];
+ headerlen = perf[1];
+ if (version < 0x40) {
+ recordlen = perf[3] + (perf[4] * perf[5]);
+ entries = perf[2];
+ } else {
+ recordlen = perf[2] + (perf[3] * perf[4]);
+ entries = perf[5];
+ }
+ } else {
+ if (bios->data[bios->offset + 6] < 0x25) {
+ legacy_perf_init(dev);
+ return;
+ }
+
+ perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+ if (!perf) {
+ NV_DEBUG(dev, "perf table pointer invalid\n");
+ return;
+ }
+
+ version = perf[1];
+ headerlen = perf[0];
+ recordlen = perf[3];
+ entries = perf[2];
+ }
+
+ entry = perf + headerlen;
+ for (i = 0; i < entries; i++) {
+ struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
+
+ if (entry[0] == 0xff) {
+ entry += recordlen;
+ continue;
+ }
+
+ switch (version) {
+ case 0x12:
+ case 0x13:
+ case 0x15:
+ perflvl->fanspeed = entry[55];
+ perflvl->voltage = entry[56];
+ perflvl->core = ROM32(entry[1]) * 10;
+ perflvl->memory = ROM32(entry[5]) * 20;
+ break;
+ case 0x21:
+ case 0x23:
+ case 0x24:
+ perflvl->fanspeed = entry[4];
+ perflvl->voltage = entry[5];
+ perflvl->core = ROM16(entry[6]) * 1000;
+
+ if (dev_priv->chipset == 0x49 ||
+ dev_priv->chipset == 0x4b)
+ perflvl->memory = ROM16(entry[11]) * 1000;
+ else
+ perflvl->memory = ROM16(entry[11]) * 2000;
+
+ break;
+ case 0x25:
+ perflvl->fanspeed = entry[4];
+ perflvl->voltage = entry[5];
+ perflvl->core = ROM16(entry[6]) * 1000;
+ perflvl->shader = ROM16(entry[10]) * 1000;
+ perflvl->memory = ROM16(entry[12]) * 1000;
+ break;
+ case 0x30:
+ perflvl->memscript = ROM16(entry[2]);
+ case 0x35:
+ perflvl->fanspeed = entry[6];
+ perflvl->voltage = entry[7];
+ perflvl->core = ROM16(entry[8]) * 1000;
+ perflvl->shader = ROM16(entry[10]) * 1000;
+ perflvl->memory = ROM16(entry[12]) * 1000;
+ /*XXX: confirm on 0x35 */
+ perflvl->unk05 = ROM16(entry[16]) * 1000;
+ break;
+ case 0x40:
+#define subent(n) entry[perf[2] + ((n) * perf[3])]
+ perflvl->fanspeed = 0; /*XXX*/
+ perflvl->voltage = entry[2];
+ perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000;
+ perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000;
+ perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000;
+ break;
+ }
+
+ /* make sure vid is valid */
+ if (pm->voltage.supported && perflvl->voltage) {
+ vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
+ if (vid < 0) {
+ NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
+ entry += recordlen;
+ continue;
+ }
+ }
+
+ snprintf(perflvl->name, sizeof(perflvl->name),
+ "performance_level_%d", i);
+ perflvl->id = i;
+ pm->nr_perflvl++;
+
+ entry += recordlen;
+ }
+}
+
+void
+nouveau_perf_fini(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
new file mode 100644
index 000000000000..1c99c55d6d46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+static int
+nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u8 id, u32 khz)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ void *pre_state;
+
+ if (khz == 0)
+ return 0;
+
+ pre_state = pm->clock_pre(dev, perflvl, id, khz);
+ if (IS_ERR(pre_state))
+ return PTR_ERR(pre_state);
+
+ if (pre_state)
+ pm->clock_set(dev, pre_state);
+ return 0;
+}
+
+static int
+nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret;
+
+ if (perflvl == pm->cur)
+ return 0;
+
+ if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
+ ret = pm->voltage_set(dev, perflvl->voltage);
+ if (ret) {
+ NV_ERROR(dev, "voltage_set %d failed: %d\n",
+ perflvl->voltage, ret);
+ }
+ }
+
+ nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+ nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+ nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+ nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+
+ pm->cur = perflvl;
+ return 0;
+}
+
+static int
+nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_level *perflvl = NULL;
+
+ /* safety precaution, for now */
+ if (nouveau_perflvl_wr != 7777)
+ return -EPERM;
+
+ if (!pm->clock_set)
+ return -EINVAL;
+
+ if (!strncmp(profile, "boot", 4))
+ perflvl = &pm->boot;
+ else {
+ int pl = simple_strtol(profile, NULL, 10);
+ int i;
+
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ if (pm->perflvl[i].id == pl) {
+ perflvl = &pm->perflvl[i];
+ break;
+ }
+ }
+
+ if (!perflvl)
+ return -EINVAL;
+ }
+
+ NV_INFO(dev, "setting performance level: %s\n", profile);
+ return nouveau_pm_perflvl_set(dev, perflvl);
+}
+
+static int
+nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret;
+
+ if (!pm->clock_get)
+ return -EINVAL;
+
+ memset(perflvl, 0, sizeof(*perflvl));
+
+ ret = pm->clock_get(dev, PLL_CORE);
+ if (ret > 0)
+ perflvl->core = ret;
+
+ ret = pm->clock_get(dev, PLL_MEMORY);
+ if (ret > 0)
+ perflvl->memory = ret;
+
+ ret = pm->clock_get(dev, PLL_SHADER);
+ if (ret > 0)
+ perflvl->shader = ret;
+
+ ret = pm->clock_get(dev, PLL_UNK05);
+ if (ret > 0)
+ perflvl->unk05 = ret;
+
+ if (pm->voltage.supported && pm->voltage_get) {
+ ret = pm->voltage_get(dev);
+ if (ret > 0)
+ perflvl->voltage = ret;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
+{
+ char c[16], s[16], v[16], f[16];
+
+ c[0] = '\0';
+ if (perflvl->core)
+ snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
+
+ s[0] = '\0';
+ if (perflvl->shader)
+ snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
+
+ v[0] = '\0';
+ if (perflvl->voltage)
+ snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
+
+ f[0] = '\0';
+ if (perflvl->fanspeed)
+ snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
+
+ snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000,
+ c, s, v, f);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl_info(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
+ char *ptr = buf;
+ int len = PAGE_SIZE;
+
+ snprintf(ptr, len, "%d: ", perflvl->id);
+ ptr += strlen(buf);
+ len -= strlen(buf);
+
+ nouveau_pm_perflvl_info(perflvl, ptr, len);
+ return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_level cur;
+ int len = PAGE_SIZE, ret;
+ char *ptr = buf;
+
+ if (!pm->cur)
+ snprintf(ptr, len, "setting: boot\n");
+ else if (pm->cur == &pm->boot)
+ snprintf(ptr, len, "setting: boot\nc: ");
+ else
+ snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
+ ptr += strlen(buf);
+ len -= strlen(buf);
+
+ ret = nouveau_pm_perflvl_get(dev, &cur);
+ if (ret == 0)
+ nouveau_pm_perflvl_info(&cur, ptr, len);
+ return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+ int ret;
+
+ ret = nouveau_pm_profile_set(dev, buf);
+ if (ret)
+ return ret;
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
+ nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
+
+static int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct device *d = &dev->pdev->dev;
+ int ret, i;
+
+ ret = device_create_file(d, &dev_attr_performance_level);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ struct nouveau_pm_level *perflvl = &pm->perflvl[i];
+
+ perflvl->dev_attr.attr.name = perflvl->name;
+ perflvl->dev_attr.attr.mode = S_IRUGO;
+ perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
+ perflvl->dev_attr.store = NULL;
+ sysfs_attr_init(&perflvl->dev_attr.attr);
+
+ ret = device_create_file(d, &perflvl->dev_attr);
+ if (ret) {
+ NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n",
+ perflvl->id, i);
+ perflvl->dev_attr.attr.name = NULL;
+ nouveau_pm_fini(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct device *d = &dev->pdev->dev;
+ int i;
+
+ device_remove_file(d, &dev_attr_performance_level);
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ struct nouveau_pm_level *pl = &pm->perflvl[i];
+
+ if (!pl->dev_attr.attr.name)
+ break;
+
+ device_remove_file(d, &pl->dev_attr);
+ }
+}
+
+static ssize_t
+nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
+ NULL, 0);
+
+static ssize_t
+nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ temp->down_clock = value/1000;
+
+ nouveau_temp_safety_checks(dev);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
+ nouveau_hwmon_set_max_temp,
+ 0);
+
+static ssize_t
+nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return count;
+
+ temp->critical = value/1000;
+
+ nouveau_temp_safety_checks(dev);
+
+ return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_critical_temp,
+ nouveau_hwmon_set_critical_temp,
+ 0);
+
+static ssize_t nouveau_hwmon_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "nouveau\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
+
+static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "1000\n");
+}
+static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+ nouveau_hwmon_show_update_rate,
+ NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_update_rate.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+};
+
+static int
+nouveau_hwmon_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!pm->temp_get)
+ return -ENODEV;
+
+ hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+ if (IS_ERR(hwmon_dev)) {
+ ret = PTR_ERR(hwmon_dev);
+ NV_ERROR(dev,
+ "Unable to register hwmon device: %d\n", ret);
+ return ret;
+ }
+ dev_set_drvdata(hwmon_dev, dev);
+ ret = sysfs_create_group(&hwmon_dev->kobj,
+ &hwmon_attrgroup);
+ if (ret) {
+ NV_ERROR(dev,
+ "Unable to create hwmon sysfs file: %d\n", ret);
+ hwmon_device_unregister(hwmon_dev);
+ return ret;
+ }
+
+ pm->hwmon = hwmon_dev;
+
+ return 0;
+}
+
+static void
+nouveau_hwmon_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ if (pm->hwmon) {
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+ hwmon_device_unregister(pm->hwmon);
+ }
+}
+
+int
+nouveau_pm_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ char info[256];
+ int ret, i;
+
+ nouveau_volt_init(dev);
+ nouveau_perf_init(dev);
+ nouveau_temp_init(dev);
+ nouveau_mem_timing_init(dev);
+
+ NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
+ for (i = 0; i < pm->nr_perflvl; i++) {
+ nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
+ NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
+ }
+
+ /* determine current ("boot") performance level */
+ ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+ if (ret == 0) {
+ pm->cur = &pm->boot;
+
+ nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+ NV_INFO(dev, "c: %s", info);
+ }
+
+ /* switch performance levels now if requested */
+ if (nouveau_perflvl != NULL) {
+ ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
+ if (ret) {
+ NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
+ nouveau_perflvl, ret);
+ }
+ }
+
+ nouveau_sysfs_init(dev);
+ nouveau_hwmon_init(dev);
+
+ return 0;
+}
+
+void
+nouveau_pm_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ if (pm->cur != &pm->boot)
+ nouveau_pm_perflvl_set(dev, &pm->boot);
+
+ nouveau_mem_timing_fini(dev);
+ nouveau_temp_fini(dev);
+ nouveau_perf_fini(dev);
+ nouveau_volt_fini(dev);
+
+ nouveau_hwmon_fini(dev);
+ nouveau_sysfs_fini(dev);
+}
+
+void
+nouveau_pm_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_level *perflvl;
+
+ if (pm->cur == &pm->boot)
+ return;
+
+ perflvl = pm->cur;
+ pm->cur = &pm->boot;
+ nouveau_pm_perflvl_set(dev, perflvl);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
new file mode 100644
index 000000000000..4a9838ddacec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+/* nouveau_pm.c */
+int nouveau_pm_init(struct drm_device *dev);
+void nouveau_pm_fini(struct drm_device *dev);
+void nouveau_pm_resume(struct drm_device *dev);
+
+/* nouveau_volt.c */
+void nouveau_volt_init(struct drm_device *);
+void nouveau_volt_fini(struct drm_device *);
+int nouveau_volt_vid_lookup(struct drm_device *, int voltage);
+int nouveau_volt_lvl_lookup(struct drm_device *, int vid);
+int nouveau_voltage_gpio_get(struct drm_device *);
+int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
+
+/* nouveau_perf.c */
+void nouveau_perf_init(struct drm_device *);
+void nouveau_perf_fini(struct drm_device *);
+
+/* nouveau_mem.c */
+void nouveau_mem_timing_init(struct drm_device *);
+void nouveau_mem_timing_fini(struct drm_device *);
+
+/* nv04_pm.c */
+int nv04_pm_clock_get(struct drm_device *, u32 id);
+void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nv04_pm_clock_set(struct drm_device *, void *);
+
+/* nv50_pm.c */
+int nv50_pm_clock_get(struct drm_device *, u32 id);
+void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nv50_pm_clock_set(struct drm_device *, void *);
+
+/* nva3_pm.c */
+int nva3_pm_clock_get(struct drm_device *, u32 id);
+void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+ u32 id, int khz);
+void nva3_pm_clock_set(struct drm_device *, void *);
+
+/* nouveau_temp.c */
+void nouveau_temp_init(struct drm_device *dev);
+void nouveau_temp_fini(struct drm_device *dev);
+void nouveau_temp_safety_checks(struct drm_device *dev);
+int nv40_temp_get(struct drm_device *dev);
+int nv84_temp_get(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
new file mode 100644
index 000000000000..7f16697cc96c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+
+static u32
+nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_ramht *ramht = chan->ramht;
+ u32 hash = 0;
+ int i;
+
+ NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
+
+ for (i = 32; i > 0; i -= ramht->bits) {
+ hash ^= (handle & ((1 << ramht->bits) - 1));
+ handle >>= ramht->bits;
+ }
+
+ if (dev_priv->card_type < NV_50)
+ hash ^= chan->id << (ramht->bits - 4);
+ hash <<= 3;
+
+ NV_DEBUG(dev, "hash=0x%08x\n", hash);
+ return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+ u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 ctx = nv_ro32(ramht, offset + 4);
+
+ if (dev_priv->card_type < NV_40)
+ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+ return (ctx != 0);
+}
+
+static int
+nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
+ struct nouveau_gpuobj *ramht, u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ u32 ctx = nv_ro32(ramht, offset + 4);
+
+ if (dev_priv->card_type >= NV_50)
+ return true;
+ else if (dev_priv->card_type >= NV_40)
+ return chan->id ==
+ ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+ else
+ return chan->id ==
+ ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+}
+
+int
+nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
+ struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+ unsigned long flags;
+ u32 ctx, co, ho;
+
+ if (nouveau_ramht_find(chan, handle))
+ return -EEXIST;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->channel = chan;
+ entry->gpuobj = NULL;
+ entry->handle = handle;
+ nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
+
+ if (dev_priv->card_type < NV_40) {
+ ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+ (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ ctx = (gpuobj->cinst >> 4) |
+ (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else {
+ if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+ ctx = (gpuobj->cinst << 10) | 2;
+ } else {
+ ctx = (gpuobj->cinst >> 4) |
+ ((gpuobj->engine <<
+ NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+ }
+ }
+
+ spin_lock_irqsave(&chan->ramht->lock, flags);
+ list_add(&entry->head, &chan->ramht->entries);
+
+ co = ho = nouveau_ramht_hash_handle(chan, handle);
+ do {
+ if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+ NV_DEBUG(dev,
+ "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, handle, ctx);
+ nv_wo32(ramht, co + 0, handle);
+ nv_wo32(ramht, co + 4, ctx);
+
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+ instmem->flush(dev);
+ return 0;
+ }
+ NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+ chan->id, co, nv_ro32(ramht, co));
+
+ co += 8;
+ if (co >= ramht->size)
+ co = 0;
+ } while (co != ho);
+
+ NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+ list_del(&entry->head);
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+ kfree(entry);
+ return -ENOMEM;
+}
+
+static void
+nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+ struct nouveau_ramht_entry *entry, *tmp;
+ u32 co, ho;
+
+ list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) {
+ if (entry->channel != chan || entry->handle != handle)
+ continue;
+
+ nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+ list_del(&entry->head);
+ kfree(entry);
+ break;
+ }
+
+ co = ho = nouveau_ramht_hash_handle(chan, handle);
+ do {
+ if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+ nouveau_ramht_entry_same_channel(chan, ramht, co) &&
+ (handle == nv_ro32(ramht, co))) {
+ NV_DEBUG(dev,
+ "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, handle, nv_ro32(ramht, co + 4));
+ nv_wo32(ramht, co + 0, 0x00000000);
+ nv_wo32(ramht, co + 4, 0x00000000);
+ instmem->flush(dev);
+ return;
+ }
+
+ co += 8;
+ if (co >= ramht->size)
+ co = 0;
+ } while (co != ho);
+
+ NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+ chan->id, handle);
+}
+
+void
+nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht *ramht = chan->ramht;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ramht->lock, flags);
+ nouveau_ramht_remove_locked(chan, handle);
+ spin_unlock_irqrestore(&ramht->lock, flags);
+}
+
+struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht *ramht = chan->ramht;
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ unsigned long flags;
+
+ if (unlikely(!chan->ramht))
+ return NULL;
+
+ spin_lock_irqsave(&ramht->lock, flags);
+ list_for_each_entry(entry, &chan->ramht->entries, head) {
+ if (entry->channel == chan && entry->handle == handle) {
+ gpuobj = entry->gpuobj;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return gpuobj;
+}
+
+int
+nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ struct nouveau_ramht **pramht)
+{
+ struct nouveau_ramht *ramht;
+
+ ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
+ if (!ramht)
+ return -ENOMEM;
+
+ ramht->dev = dev;
+ kref_init(&ramht->refcount);
+ ramht->bits = drm_order(gpuobj->size / 8);
+ INIT_LIST_HEAD(&ramht->entries);
+ spin_lock_init(&ramht->lock);
+ nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
+
+ *pramht = ramht;
+ return 0;
+}
+
+static void
+nouveau_ramht_del(struct kref *ref)
+{
+ struct nouveau_ramht *ramht =
+ container_of(ref, struct nouveau_ramht, refcount);
+
+ nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
+ kfree(ramht);
+}
+
+void
+nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
+ struct nouveau_channel *chan)
+{
+ struct nouveau_ramht_entry *entry, *tmp;
+ struct nouveau_ramht *ramht;
+ unsigned long flags;
+
+ if (ref)
+ kref_get(&ref->refcount);
+
+ ramht = *ptr;
+ if (ramht) {
+ spin_lock_irqsave(&ramht->lock, flags);
+ list_for_each_entry_safe(entry, tmp, &ramht->entries, head) {
+ if (entry->channel != chan)
+ continue;
+
+ nouveau_ramht_remove_locked(chan, entry->handle);
+ }
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ kref_put(&ramht->refcount, nouveau_ramht_del);
+ }
+ *ptr = ref;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
new file mode 100644
index 000000000000..b79cb5e1a8f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_RAMHT_H__
+#define __NOUVEAU_RAMHT_H__
+
+struct nouveau_ramht_entry {
+ struct list_head head;
+ struct nouveau_channel *channel;
+ struct nouveau_gpuobj *gpuobj;
+ u32 handle;
+};
+
+struct nouveau_ramht {
+ struct drm_device *dev;
+ struct kref refcount;
+ spinlock_t lock;
+ struct nouveau_gpuobj *gpuobj;
+ struct list_head entries;
+ int bits;
+};
+
+extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
+ struct nouveau_ramht **);
+extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
+ struct nouveau_channel *unref_channel);
+
+extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
+ struct nouveau_gpuobj *);
+extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 21a6e453b975..1b42541ca9e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -551,6 +551,8 @@
#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
#define NV03_PFIFO_CACHE1_PULL0 0x00003240
#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
+# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
#define NV03_PFIFO_CACHE1_PULL1 0x00003250
#define NV04_PFIFO_CACHE1_PULL1 0x00003254
#define NV04_PFIFO_CACHE1_HASH 0x00003258
@@ -785,15 +787,12 @@
#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i) (0x00610b80 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i) (0x00610b84 + (i) * 0x8)
#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
-
#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
#define NV50_PDISPLAY_CRTC_CLK 0x00614000
#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 6b9187d7f67d..288bacac7e5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned i, j, pte;
- NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+ NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+ pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
nvbe->pte_start = pte;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i];
@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
uint32_t offset_h = upper_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50)
- nv_wo32(dev, gpuobj, pte++, offset_l | 3);
- else {
- nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
- nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+ if (dev_priv->card_type < NV_50) {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
+ pte += 1;
+ } else {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
+ nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
+ pte += 2;
}
dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50)
- nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
- else {
- nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
- nv_wo32(dev, gpuobj, pte++, 0x00000000);
+ if (dev_priv->card_type < NV_50) {
+ nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
+ pte += 1;
+ } else {
+ nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
+ nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
+ pte += 2;
}
dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -230,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev)
}
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ALLOW_NO_REFS |
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
@@ -239,9 +242,9 @@ nouveau_sgdma_init(struct drm_device *dev)
}
dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32);
+ alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
if (!dev_priv->gart_info.sg_dummy_page) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return -ENOMEM;
}
@@ -250,29 +253,34 @@ nouveau_sgdma_init(struct drm_device *dev)
pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return -EFAULT;
}
if (dev_priv->card_type < NV_50) {
+ /* special case, allocated from global instmem heap so
+ * cinst is invalid, we use it on all channels though so
+ * cinst needs to be valid, set it the same as pinst
+ */
+ gpuobj->cinst = gpuobj->pinst;
+
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
* on those cards? */
- nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
- (1 << 12) /* PT present */ |
- (0 << 13) /* PT *not* linear */ |
- (NV_DMA_ACCESS_RW << 14) |
- (NV_DMA_TARGET_PCI << 16));
- nv_wo32(dev, gpuobj, 1, aper_size - 1);
+ nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+ (1 << 12) /* PT present */ |
+ (0 << 13) /* PT *not* linear */ |
+ (NV_DMA_ACCESS_RW << 14) |
+ (NV_DMA_TARGET_PCI << 16));
+ nv_wo32(gpuobj, 4, aper_size - 1);
for (i = 2; i < 2 + (aper_size >> 12); i++) {
- nv_wo32(dev, gpuobj, i,
- dev_priv->gart_info.sg_dummy_bus | 3);
+ nv_wo32(gpuobj, i * 4,
+ dev_priv->gart_info.sg_dummy_bus | 3);
}
} else {
for (i = 0; i < obj_size; i += 8) {
- nv_wo32(dev, gpuobj, (i+0)/4,
- dev_priv->gart_info.sg_dummy_bus | 0x21);
- nv_wo32(dev, gpuobj, (i+4)/4, 0);
+ nv_wo32(gpuobj, i + 0, 0x00000000);
+ nv_wo32(gpuobj, i + 4, 0x00000000);
}
}
dev_priv->engine.instmem.flush(dev);
@@ -298,7 +306,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_bus = 0;
}
- nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+ nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
}
int
@@ -308,9 +316,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
int pte;
- pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
if (dev_priv->card_type < NV_50) {
- *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+ *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 989322be3728..ed7757f14083 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -35,6 +35,8 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
+#include "nouveau_ramht.h"
+#include "nouveau_pm.h"
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -78,7 +80,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv04_fifo_channel_id;
engine->fifo.create_context = nv04_fifo_create_context;
@@ -95,6 +96,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = NULL;
engine->gpio.set = NULL;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
break;
case 0x10:
engine->instmem.init = nv04_instmem_init;
@@ -130,7 +134,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
@@ -147,6 +150,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
break;
case 0x20:
engine->instmem.init = nv04_instmem_init;
@@ -182,7 +188,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
@@ -199,6 +204,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
break;
case 0x30:
engine->instmem.init = nv04_instmem_init;
@@ -234,7 +242,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
@@ -251,6 +258,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
break;
case 0x40:
case 0x60:
@@ -287,7 +299,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_flush = nv04_fifo_cache_flush;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
@@ -304,6 +315,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv10_gpio_get;
engine->gpio.set = nv10_gpio_set;
engine->gpio.irq_enable = NULL;
+ engine->pm.clock_get = nv04_pm_clock_get;
+ engine->pm.clock_pre = nv04_pm_clock_pre;
+ engine->pm.clock_set = nv04_pm_clock_set;
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->pm.temp_get = nv40_temp_get;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -358,6 +375,27 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ switch (dev_priv->chipset) {
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaf:
+ engine->pm.clock_get = nva3_pm_clock_get;
+ engine->pm.clock_pre = nva3_pm_clock_pre;
+ engine->pm.clock_set = nva3_pm_clock_set;
+ break;
+ default:
+ engine->pm.clock_get = nv50_pm_clock_get;
+ engine->pm.clock_pre = nv50_pm_clock_pre;
+ engine->pm.clock_set = nv50_pm_clock_set;
+ break;
+ }
+ engine->pm.voltage_get = nouveau_voltage_gpio_get;
+ engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ if (dev_priv->chipset >= 0x84)
+ engine->pm.temp_get = nv84_temp_get;
+ else
+ engine->pm.temp_get = nv40_temp_get;
break;
case 0xC0:
engine->instmem.init = nvc0_instmem_init;
@@ -437,16 +475,14 @@ static int
nouveau_card_init_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
+ struct nouveau_gpuobj *gpuobj = NULL;
int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
- (struct drm_file *)-2,
- NvDmaFB, NvDmaTT);
+ (struct drm_file *)-2, NvDmaFB, NvDmaTT);
if (ret)
return ret;
- gpuobj = NULL;
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vram_size,
NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
@@ -454,26 +490,25 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
- gpuobj, NULL);
+ ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
if (ret)
goto out_err;
- gpuobj = NULL;
ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RW, &gpuobj, NULL);
if (ret)
goto out_err;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
- gpuobj, NULL);
+ ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
if (ret)
goto out_err;
return 0;
+
out_err:
- nouveau_gpuobj_del(dev, &gpuobj);
nouveau_channel_free(dev_priv->channel);
dev_priv->channel = NULL;
return ret;
@@ -534,35 +569,28 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_display_early;
- ret = nouveau_mem_detect(dev);
+ nouveau_pm_init(dev);
+
+ ret = nouveau_mem_vram_init(dev);
if (ret)
goto out_bios;
- ret = nouveau_gpuobj_early_init(dev);
+ ret = nouveau_gpuobj_init(dev);
if (ret)
- goto out_bios;
+ goto out_vram;
- /* Initialise instance memory, must happen before mem_init so we
- * know exactly how much VRAM we're able to use for "normal"
- * purposes.
- */
ret = engine->instmem.init(dev);
if (ret)
- goto out_gpuobj_early;
+ goto out_gpuobj;
- /* Setup the memory manager */
- ret = nouveau_mem_init(dev);
+ ret = nouveau_mem_gart_init(dev);
if (ret)
goto out_instmem;
- ret = nouveau_gpuobj_init(dev);
- if (ret)
- goto out_mem;
-
/* PMC */
ret = engine->mc.init(dev);
if (ret)
- goto out_gpuobj;
+ goto out_gart;
/* PGPIO */
ret = engine->gpio.init(dev);
@@ -611,9 +639,13 @@ nouveau_card_init(struct drm_device *dev)
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
if (!engine->graph.accel_blocked) {
- ret = nouveau_card_init_channel(dev);
+ ret = nouveau_fence_init(dev);
if (ret)
goto out_irq;
+
+ ret = nouveau_card_init_channel(dev);
+ if (ret)
+ goto out_fence;
}
ret = nouveau_backlight_init(dev);
@@ -624,6 +656,8 @@ nouveau_card_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
return 0;
+out_fence:
+ nouveau_fence_fini(dev);
out_irq:
drm_irq_uninstall(dev);
out_display:
@@ -642,16 +676,16 @@ out_gpio:
engine->gpio.takedown(dev);
out_mc:
engine->mc.takedown(dev);
-out_gpuobj:
- nouveau_gpuobj_takedown(dev);
-out_mem:
- nouveau_sgdma_takedown(dev);
- nouveau_mem_close(dev);
+out_gart:
+ nouveau_mem_gart_fini(dev);
out_instmem:
engine->instmem.takedown(dev);
-out_gpuobj_early:
- nouveau_gpuobj_late_takedown(dev);
+out_gpuobj:
+ nouveau_gpuobj_takedown(dev);
+out_vram:
+ nouveau_mem_vram_fini(dev);
out_bios:
+ nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
out_display_early:
engine->display.late_takedown(dev);
@@ -667,7 +701,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_backlight_exit(dev);
- if (dev_priv->channel) {
+ if (!engine->graph.accel_blocked) {
+ nouveau_fence_fini(dev);
nouveau_channel_free(dev_priv->channel);
dev_priv->channel = NULL;
}
@@ -686,15 +721,15 @@ static void nouveau_card_takedown(struct drm_device *dev)
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
- nouveau_sgdma_takedown(dev);
+ nouveau_mem_gart_fini(dev);
- nouveau_gpuobj_takedown(dev);
- nouveau_mem_close(dev);
engine->instmem.takedown(dev);
+ nouveau_gpuobj_takedown(dev);
+ nouveau_mem_vram_fini(dev);
drm_irq_uninstall(dev);
- nouveau_gpuobj_late_takedown(dev);
+ nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -1057,7 +1092,7 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
/* Waits for PGRAPH to go completely idle */
bool nouveau_wait_for_idle(struct drm_device *dev)
{
- if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
nv_rd32(dev, NV04_PGRAPH_STATUS));
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
new file mode 100644
index 000000000000..16bbbf1eff63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2010 PathScale inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static void
+nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+ struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
+ int i, headerlen, recordlen, entries;
+
+ if (!temp) {
+ NV_DEBUG(dev, "temperature table pointer invalid\n");
+ return;
+ }
+
+ /* Set the default sensor's contants */
+ sensor->offset_constant = 0;
+ sensor->offset_mult = 1;
+ sensor->offset_div = 1;
+ sensor->slope_mult = 1;
+ sensor->slope_div = 1;
+
+ /* Set the default temperature thresholds */
+ temps->critical = 110;
+ temps->down_clock = 100;
+ temps->fan_boost = 90;
+
+ /* Set the known default values to setup the temperature sensor */
+ if (dev_priv->card_type >= NV_40) {
+ switch (dev_priv->chipset) {
+ case 0x43:
+ sensor->offset_mult = 32060;
+ sensor->offset_div = 1000;
+ sensor->slope_mult = 792;
+ sensor->slope_div = 1000;
+ break;
+
+ case 0x44:
+ case 0x47:
+ case 0x4a:
+ sensor->offset_mult = 27839;
+ sensor->offset_div = 1000;
+ sensor->slope_mult = 780;
+ sensor->slope_div = 1000;
+ break;
+
+ case 0x46:
+ sensor->offset_mult = -24775;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 467;
+ sensor->slope_div = 10000;
+ break;
+
+ case 0x49:
+ sensor->offset_mult = -25051;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 458;
+ sensor->slope_div = 10000;
+ break;
+
+ case 0x4b:
+ sensor->offset_mult = -24088;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 442;
+ sensor->slope_div = 10000;
+ break;
+
+ case 0x50:
+ sensor->offset_mult = -22749;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 431;
+ sensor->slope_div = 10000;
+ break;
+ }
+ }
+
+ headerlen = temp[1];
+ recordlen = temp[2];
+ entries = temp[3];
+ temp = temp + headerlen;
+
+ /* Read the entries from the table */
+ for (i = 0; i < entries; i++) {
+ u16 value = ROM16(temp[1]);
+
+ switch (temp[0]) {
+ case 0x01:
+ if ((value & 0x8f) == 0)
+ sensor->offset_constant = (value >> 9) & 0x7f;
+ break;
+
+ case 0x04:
+ if ((value & 0xf00f) == 0xa000) /* core */
+ temps->critical = (value&0x0ff0) >> 4;
+ break;
+
+ case 0x07:
+ if ((value & 0xf00f) == 0xa000) /* core */
+ temps->down_clock = (value&0x0ff0) >> 4;
+ break;
+
+ case 0x08:
+ if ((value & 0xf00f) == 0xa000) /* core */
+ temps->fan_boost = (value&0x0ff0) >> 4;
+ break;
+
+ case 0x10:
+ sensor->offset_mult = value;
+ break;
+
+ case 0x11:
+ sensor->offset_div = value;
+ break;
+
+ case 0x12:
+ sensor->slope_mult = value;
+ break;
+
+ case 0x13:
+ sensor->slope_div = value;
+ break;
+ }
+ temp += recordlen;
+ }
+
+ nouveau_temp_safety_checks(dev);
+}
+
+static int
+nv40_sensor_setup(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+ u32 offset = sensor->offset_mult / sensor->offset_div;
+ u32 sensor_calibration;
+
+ /* set up the sensors */
+ sensor_calibration = 120 - offset - sensor->offset_constant;
+ sensor_calibration = sensor_calibration * sensor->slope_div /
+ sensor->slope_mult;
+
+ if (dev_priv->chipset >= 0x46)
+ sensor_calibration |= 0x80000000;
+ else
+ sensor_calibration |= 0x10000000;
+
+ nv_wr32(dev, 0x0015b0, sensor_calibration);
+
+ /* Wait for the sensor to update */
+ msleep(5);
+
+ /* read */
+ return nv_rd32(dev, 0x0015b4) & 0x1fff;
+}
+
+int
+nv40_temp_get(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+ int offset = sensor->offset_mult / sensor->offset_div;
+ int core_temp;
+
+ if (dev_priv->chipset >= 0x50) {
+ core_temp = nv_rd32(dev, 0x20008);
+ } else {
+ core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
+ /* Setup the sensor if the temperature is 0 */
+ if (core_temp == 0)
+ core_temp = nv40_sensor_setup(dev);
+ }
+
+ core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+ core_temp = core_temp + offset + sensor->offset_constant;
+
+ return core_temp;
+}
+
+int
+nv84_temp_get(struct drm_device *dev)
+{
+ return nv_rd32(dev, 0x20400);
+}
+
+void
+nouveau_temp_safety_checks(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
+
+ if (temps->critical > 120)
+ temps->critical = 120;
+ else if (temps->critical < 80)
+ temps->critical = 80;
+
+ if (temps->down_clock > 110)
+ temps->down_clock = 110;
+ else if (temps->down_clock < 60)
+ temps->down_clock = 60;
+
+ if (temps->fan_boost > 100)
+ temps->fan_boost = 100;
+ else if (temps->fan_boost < 40)
+ temps->fan_boost = 40;
+}
+
+static bool
+probe_monitoring_device(struct nouveau_i2c_chan *i2c,
+ struct i2c_board_info *info)
+{
+ char modalias[16] = "i2c:";
+ struct i2c_client *client;
+
+ strlcat(modalias, info->type, sizeof(modalias));
+ request_module(modalias);
+
+ client = i2c_new_device(&i2c->adapter, info);
+ if (!client)
+ return false;
+
+ if (!client->driver || client->driver->detect(client, info)) {
+ i2c_unregister_device(client);
+ return false;
+ }
+
+ return true;
+}
+
+static void
+nouveau_temp_probe_i2c(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ struct i2c_board_info info[] = {
+ { I2C_BOARD_INFO("w83l785ts", 0x2d) },
+ { I2C_BOARD_INFO("w83781d", 0x2d) },
+ { I2C_BOARD_INFO("f75375", 0x2e) },
+ { I2C_BOARD_INFO("adt7473", 0x2e) },
+ { I2C_BOARD_INFO("lm99", 0x4c) },
+ { }
+ };
+ int idx = (dcb->version >= 0x40 ?
+ dcb->i2c_default_indices & 0xf : 2);
+
+ nouveau_i2c_identify(dev, "monitoring device", info,
+ probe_monitoring_device, idx);
+}
+
+void
+nouveau_temp_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 *temp = NULL;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ temp = ROMPTR(bios, P.data[12]);
+ else if (P.version == 2)
+ temp = ROMPTR(bios, P.data[16]);
+ else
+ NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
+
+ nouveau_temp_vbios_parse(dev, temp);
+ }
+
+ nouveau_temp_probe_i2c(dev);
+}
+
+void
+nouveau_temp_fini(struct drm_device *dev)
+{
+
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
new file mode 100644
index 000000000000..04fdc00a67d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a };
+static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
+
+int
+nouveau_voltage_gpio_get(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ u8 vid = 0;
+ int i;
+
+ for (i = 0; i < nr_vidtag; i++) {
+ if (!(volt->vid_mask & (1 << i)))
+ continue;
+
+ vid |= gpio->get(dev, vidtag[i]) << i;
+ }
+
+ return nouveau_volt_lvl_lookup(dev, vid);
+}
+
+int
+nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ int vid, i;
+
+ vid = nouveau_volt_vid_lookup(dev, voltage);
+ if (vid < 0)
+ return vid;
+
+ for (i = 0; i < nr_vidtag; i++) {
+ if (!(volt->vid_mask & (1 << i)))
+ continue;
+
+ gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+ }
+
+ return 0;
+}
+
+int
+nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ int i;
+
+ for (i = 0; i < volt->nr_level; i++) {
+ if (volt->level[i].voltage == voltage)
+ return volt->level[i].vid;
+ }
+
+ return -ENOENT;
+}
+
+int
+nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ int i;
+
+ for (i = 0; i < volt->nr_level; i++) {
+ if (volt->level[i].vid == vid)
+ return volt->level[i].voltage;
+ }
+
+ return -ENOENT;
+}
+
+void
+nouveau_volt_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_voltage *voltage = &pm->voltage;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 *volt = NULL, *entry;
+ int i, headerlen, recordlen, entries, vidmask, vidshift;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ volt = ROMPTR(bios, P.data[16]);
+ else
+ if (P.version == 2)
+ volt = ROMPTR(bios, P.data[12]);
+ else {
+ NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
+ }
+ } else {
+ if (bios->data[bios->offset + 6] < 0x27) {
+ NV_DEBUG(dev, "BMP version too old for voltage\n");
+ return;
+ }
+
+ volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+ }
+
+ if (!volt) {
+ NV_DEBUG(dev, "voltage table pointer invalid\n");
+ return;
+ }
+
+ switch (volt[0]) {
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ headerlen = 5;
+ recordlen = volt[1];
+ entries = volt[2];
+ vidshift = 0;
+ vidmask = volt[4];
+ break;
+ case 0x20:
+ headerlen = volt[1];
+ recordlen = volt[3];
+ entries = volt[2];
+ vidshift = 0; /* could be vidshift like 0x30? */
+ vidmask = volt[5];
+ break;
+ case 0x30:
+ headerlen = volt[1];
+ recordlen = volt[2];
+ entries = volt[3];
+ vidshift = hweight8(volt[5]);
+ vidmask = volt[4];
+ break;
+ default:
+ NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
+ return;
+ }
+
+ /* validate vid mask */
+ voltage->vid_mask = vidmask;
+ if (!voltage->vid_mask)
+ return;
+
+ i = 0;
+ while (vidmask) {
+ if (i > nr_vidtag) {
+ NV_DEBUG(dev, "vid bit %d unknown\n", i);
+ return;
+ }
+
+ if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+ NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
+ return;
+ }
+
+ vidmask >>= 1;
+ i++;
+ }
+
+ /* parse vbios entries into common format */
+ voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
+ if (!voltage->level)
+ return;
+
+ entry = volt + headerlen;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ voltage->level[i].voltage = entry[0];
+ voltage->level[i].vid = entry[1] >> vidshift;
+ }
+ voltage->nr_level = entries;
+ voltage->supported = true;
+}
+
+void
+nouveau_volt_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+
+ kfree(volt->level);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 497df8765f28..c71abc2a34d5 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -33,6 +33,7 @@
#include "nouveau_fb.h"
#include "nouveau_hw.h"
#include "nvreg.h"
+#include "nouveau_fbcon.h"
static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -109,7 +110,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
struct nouveau_pll_vals *pv = &regp->pllvals;
struct pll_lims pll_lim;
- if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
+ if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim))
return;
/* NM2 == 0 is used to determine single stage mode on two stage plls */
@@ -718,6 +719,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc);
}
@@ -768,8 +770,9 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
}
static int
-nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *passed_fb,
+ int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -780,13 +783,26 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
int arb_burst, arb_lwm;
int ret;
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
+ /* If atomic, we want to switch to the fb we were passed, so
+ * now we update pointers to do that. (We don't pin; just
+ * assume we're already pinned and update the base address.)
+ */
+ if (atomic) {
+ drm_fb = passed_fb;
+ fb = nouveau_framebuffer(passed_fb);
+ }
+ else {
+ /* If not atomic, we can go ahead and pin, and unpin the
+ * old fb we were passed.
+ */
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
- if (old_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(ofb->nvbo);
+ if (passed_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
}
nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -826,7 +842,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (dev_priv->card_type >= NV_30) {
+ if (dev_priv->card_type >= NV_20) {
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
}
@@ -834,6 +850,29 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+}
+
+static int
+nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+
+ if (state == ENTER_ATOMIC_MODE_SET)
+ nouveau_fbcon_save_disable_accel(dev);
+ else
+ nouveau_fbcon_restore_accel(dev);
+
+ return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
+}
+
static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
struct nouveau_bo *dst)
{
@@ -962,6 +1001,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_fixup = nv_crtc_mode_fixup,
.mode_set = nv_crtc_mode_set,
.mode_set_base = nv04_crtc_mode_set_base,
+ .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.load_lut = nv_crtc_gamma_load,
};
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ea3627041ecf..ba6423f2ffcc 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -291,6 +291,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
msleep(5);
sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+ /* do it again just in case it's a residual current */
+ sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -343,22 +345,13 @@ static void nv04_dac_prepare(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
int head = nouveau_crtc(encoder->crtc)->index;
- struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
nv04_dfp_disable(dev, head);
-
- /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
- * at LCD__INDEX which we don't alter
- */
- if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
- crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
}
-
static void nv04_dac_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 0d3206a7046c..c936403b26e2 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -104,6 +104,8 @@ void nv04_dfp_disable(struct drm_device *dev, int head)
}
/* don't inadvertently turn it on when state written later */
crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
+ crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &=
+ ~NV_CIO_CRE_LCD_ROUTE_MASK;
}
void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
@@ -253,26 +255,21 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
- /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
- * at LCD__INDEX which we don't alter
- */
- if (!(*cr_lcd & 0x44)) {
- *cr_lcd = 0x3;
-
- if (nv_two_heads(dev)) {
- if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
- *cr_lcd |= head ? 0x0 : 0x8;
- else {
- *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
- if (nv_encoder->dcb->type == OUTPUT_LVDS)
- *cr_lcd |= 0x30;
- if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
- /* avoid being connected to both crtcs */
- *cr_lcd_oth &= ~0x30;
- NVWriteVgaCrtc(dev, head ^ 1,
- NV_CIO_CRE_LCD__INDEX,
- *cr_lcd_oth);
- }
+ *cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3;
+
+ if (nv_two_heads(dev)) {
+ if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
+ *cr_lcd |= head ? 0x0 : 0x8;
+ else {
+ *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ *cr_lcd |= 0x30;
+ if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
+ /* avoid being connected to both crtcs */
+ *cr_lcd_oth &= ~0x30;
+ NVWriteVgaCrtc(dev, head ^ 1,
+ NV_CIO_CRE_LCD__INDEX,
+ *cr_lcd_oth);
}
}
}
@@ -640,7 +637,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
get_tmds_slave(encoder))
return;
- type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2);
+ type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2);
if (type < 0)
return;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 1eeac4fae73d..33e4c9388bc1 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -25,6 +25,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
void
@@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
if (ret)
return ret;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
- if (ret)
- return ret;
-
- return 0;
+ ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
}
int
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 06cedd99c26a..708293b7ddcd 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -27,8 +27,9 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
-#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
+#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
#define NV04_RAMFC_DMA_PUT 0x00
#define NV04_RAMFC_DMA_GET 0x04
@@ -38,10 +39,8 @@
#define NV04_RAMFC_ENGINE 0x14
#define NV04_RAMFC_PULL1_ENGINE 0x18
-#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
- NV04_RAMFC_##offset/4, (val))
-#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
- NV04_RAMFC_##offset/4)
+#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
+#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
void
nv04_fifo_disable(struct drm_device *dev)
@@ -72,37 +71,32 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
}
bool
-nv04_fifo_cache_flush(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- uint64_t start = ptimer->read(dev);
-
- do {
- if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
- return true;
-
- } while (ptimer->read(dev) - start < 100000000);
-
- NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
-
- return false;
-}
-
-bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
{
- uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
+ int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
+
+ if (!enable) {
+ /* In some cases the PFIFO puller may be left in an
+ * inconsistent state if you try to stop it when it's
+ * busy translating handles. Sometimes you get a
+ * PFIFO_CACHE_ERROR, sometimes it just fails silently
+ * sending incorrect instance offsets to PGRAPH after
+ * it's started up again. To avoid the latter we
+ * invalidate the most recently calculated instance.
+ */
+ if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
+ NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
+
+ if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
- if (enable) {
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
- } else {
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
}
- return !!(pull & 1);
+ return pull & 1;
}
int
@@ -130,7 +124,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
NV04_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
- NULL, &chan->ramfc);
+ &chan->ramfc);
if (ret)
return ret;
@@ -139,7 +133,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
/* Setup initial state */
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
+ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -161,7 +155,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan)
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
@@ -264,10 +258,10 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht_bits - 9) << 16) |
- (dev_priv->ramht_offset >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 4408232d33f1..0b5ae297abde 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
/* returns the size of fifo context */
static int
@@ -17,102 +18,51 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
return 32;
}
-static void
-nv04_instmem_determine_amount(struct drm_device *dev)
+int nv04_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
+ struct nouveau_gpuobj *ramht = NULL;
+ u32 offset, length;
+ int ret;
- /* Figure out how much instance memory we need */
- if (dev_priv->card_type >= NV_40) {
- /* We'll want more instance memory than this on some NV4x cards.
- * There's a 16MB aperture to play with that maps onto the end
- * of vram. For now, only reserve a small piece until we know
- * more about what each chipset requires.
- */
- switch (dev_priv->chipset) {
- case 0x40:
- case 0x47:
- case 0x49:
- case 0x4b:
- dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
- break;
- default:
- dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
- break;
- }
- } else {
- /*XXX: what *are* the limits on <NV40 cards?
- */
- dev_priv->ramin_rsvd_vram = (512 * 1024);
- }
- NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
+ /* RAMIN always available */
+ dev_priv->ramin_available = true;
- /* Clear all of it, except the BIOS image that's in the first 64KiB */
- for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
- nv_wi32(dev, i, 0x00000000);
-}
+ /* Setup shared RAMHT */
+ ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+ if (ret)
+ return ret;
-static void
-nv04_instmem_configure_fixed_tables(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret)
+ return ret;
- /* FIFO hash table (RAMHT)
- * use 4k hash table at RAMIN+0x10000
- * TODO: extend the hash table
- */
- dev_priv->ramht_offset = 0x10000;
- dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */
- dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */
- NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
- dev_priv->ramht_size);
-
- /* FIFO runout table (RAMRO) - 512k at 0x11200 */
- dev_priv->ramro_offset = 0x11200;
- dev_priv->ramro_size = 512;
- NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
- dev_priv->ramro_size);
-
- /* FIFO context table (RAMFC)
- * NV40 : Not sure exactly how to position RAMFC on some cards,
- * 0x30002 seems to position it at RAMIN+0x20000 on these
- * cards. RAMFC is 4kb (32 fifos, 128byte entries).
- * Others: Position RAMFC at RAMIN+0x11400
- */
- dev_priv->ramfc_size = engine->fifo.channels *
- nouveau_fifo_ctx_size(dev);
+ /* And RAMRO */
+ ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
+ NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
+ if (ret)
+ return ret;
+
+ /* And RAMFC */
+ length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
switch (dev_priv->card_type) {
case NV_40:
- dev_priv->ramfc_offset = 0x20000;
+ offset = 0x20000;
break;
- case NV_30:
- case NV_20:
- case NV_10:
- case NV_04:
default:
- dev_priv->ramfc_offset = 0x11400;
+ offset = 0x11400;
break;
}
- NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
- dev_priv->ramfc_size);
-}
-int nv04_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t offset;
- int ret;
-
- nv04_instmem_determine_amount(dev);
- nv04_instmem_configure_fixed_tables(dev);
+ ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
+ NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
+ if (ret)
+ return ret;
- /* Create a heap to manage RAMIN allocations, we don't allocate
- * the space that was reserved for RAMHT/FC/RO.
- */
- offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+ /* Only allow space after RAMFC to be used for object allocation */
+ offset += length;
/* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
* on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
@@ -140,46 +90,34 @@ int nv04_instmem_init(struct drm_device *dev)
void
nv04_instmem_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
+ nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
+ nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
}
int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ uint32_t *sz)
{
- if (gpuobj->im_backing)
- return -EINVAL;
-
return 0;
}
void
nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- gpuobj->im_backing = NULL;
- }
}
int
nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
- if (!gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- gpuobj->im_bound = 1;
return 0;
}
int
nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- gpuobj->im_bound = 0;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
new file mode 100644
index 000000000000..6a6eb697d38e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_pm.h"
+
+struct nv04_pm_state {
+ struct pll_lims pll;
+ struct nouveau_pll_vals calc;
+};
+
+int
+nv04_pm_clock_get(struct drm_device *dev, u32 id)
+{
+ return nouveau_hw_get_clock(dev, id);
+}
+
+void *
+nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
+{
+ struct nv04_pm_state *state;
+ int ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ }
+
+ ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
+ if (!ret) {
+ kfree(state);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return state;
+}
+
+void
+nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_pm_state *state = pre_state;
+ u32 reg = state->pll.reg;
+
+ /* thank the insane nouveau_hw_setpll() interface for this */
+ if (dev_priv->card_type >= NV_40)
+ reg += 4;
+
+ nouveau_hw_setpll(dev, reg, &state->calc);
+ kfree(state);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 0b5d012d7c28..3eb605ddfd03 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -49,8 +49,8 @@ static struct i2c_board_info nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
- return nouveau_i2c_identify(dev, "TV encoder",
- nv04_tv_encoder_info, i2c_index);
+ return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info,
+ NULL, i2c_index);
}
@@ -99,12 +99,10 @@ static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
state->tv_setup = 0;
- if (bind) {
- state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+ if (bind)
state->CRTC[NV_CIO_CRE_49] |= 0x10;
- } else {
+ else
state->CRTC[NV_CIO_CRE_49] &= ~0x10;
- }
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
state->CRTC[NV_CIO_CRE_LCD__INDEX]);
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index 7a4069cf5d0b..f1b03ad58fd5 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -27,8 +27,9 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
-#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
+#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
int
@@ -48,7 +49,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
if (ret)
return ret;
@@ -57,7 +58,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
*/
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -80,7 +81,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan)
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
@@ -202,14 +203,14 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht_bits - 9) << 16) |
- (dev_priv->ramht_offset >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
if (dev_priv->chipset < 0x17) {
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
} else {
- nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
+ nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) |
(1 << 16) /* 64 Bytes entry*/);
/* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index b2f6a57c0cc5..8e68c9731159 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -803,7 +803,7 @@ nv10_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
chan = dev_priv->fifos[chid];
- if (chan)
+ if (chan && chan->pgraph_ctx)
nv10_graph_load_context(chan);
pgraph->fifo_access(dev, true);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 13cdc05b7c2d..28119fd19d03 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -193,55 +193,56 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
}
}
-static const struct {
- int hdisplay;
- int vdisplay;
-} modes[] = {
- { 640, 400 },
- { 640, 480 },
- { 720, 480 },
- { 720, 576 },
- { 800, 600 },
- { 1024, 768 },
- { 1280, 720 },
- { 1280, 1024 },
- { 1920, 1080 }
-};
-
-static int nv17_tv_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
- struct drm_display_mode *mode;
- struct drm_display_mode *output_mode;
+ struct drm_display_mode *mode, *tv_mode;
int n = 0;
- int i;
-
- if (tv_norm->kind != CTV_ENC_MODE) {
- struct drm_display_mode *tv_mode;
- for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
- mode = drm_mode_duplicate(encoder->dev, tv_mode);
+ for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ mode = drm_mode_duplicate(encoder->dev, tv_mode);
- mode->clock = tv_norm->tv_enc_mode.vrefresh *
- mode->htotal / 1000 *
- mode->vtotal / 1000;
+ mode->clock = tv_norm->tv_enc_mode.vrefresh *
+ mode->htotal / 1000 *
+ mode->vtotal / 1000;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- mode->clock *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ mode->clock *= 2;
- if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
- mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
- mode->type |= DRM_MODE_TYPE_PREFERRED;
+ if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
+ mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
- n++;
- }
- return n;
+ drm_mode_probed_add(connector, mode);
+ n++;
}
- /* tv_norm->kind == CTV_ENC_MODE */
- output_mode = &tv_norm->ctv_enc_mode.mode;
+ return n;
+}
+
+static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode;
+ struct drm_display_mode *mode;
+ const struct {
+ int hdisplay;
+ int vdisplay;
+ } modes[] = {
+ { 640, 400 },
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 800, 600 },
+ { 1024, 768 },
+ { 1280, 720 },
+ { 1280, 1024 },
+ { 1920, 1080 }
+ };
+ int i, n = 0;
+
for (i = 0; i < ARRAY_SIZE(modes); i++) {
if (modes[i].hdisplay > output_mode->hdisplay ||
modes[i].vdisplay > output_mode->vdisplay)
@@ -251,11 +252,12 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
mode->type |= DRM_MODE_TYPE_PREFERRED;
+
} else {
mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
- modes[i].vdisplay, 60, false,
- output_mode->flags & DRM_MODE_FLAG_INTERLACE,
- false);
+ modes[i].vdisplay, 60, false,
+ (output_mode->flags &
+ DRM_MODE_FLAG_INTERLACE), false);
}
/* CVT modes are sometimes unsuitable... */
@@ -266,6 +268,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
- mode->hdisplay) * 9 / 10) & ~7;
mode->hsync_end = mode->hsync_start + 8;
}
+
if (output_mode->vdisplay >= 1024) {
mode->vtotal = output_mode->vtotal;
mode->vsync_start = output_mode->vsync_start;
@@ -276,9 +279,21 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
drm_mode_probed_add(connector, mode);
n++;
}
+
return n;
}
+static int nv17_tv_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (tv_norm->kind == CTV_ENC_MODE)
+ return nv17_tv_get_hd_modes(encoder, connector);
+ else
+ return nv17_tv_get_ld_modes(encoder, connector);
+}
+
static int nv17_tv_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
@@ -408,15 +423,8 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
}
- /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
- * at LCD__INDEX which we don't alter
- */
- if (!(*cr_lcd & 0x44)) {
- if (tv_norm->kind == CTV_ENC_MODE)
- *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
- else
- *cr_lcd = 0;
- }
+ if (tv_norm->kind == CTV_ENC_MODE)
+ *cr_lcd |= 0x1 | (head ? 0x0 : 0x8);
/* Set the DACCLK register */
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index c00977cedabd..6bf03840f9eb 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -127,7 +127,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
/* TV hardware access functions */
-static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
+static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
+ uint32_t val)
{
nv_wr32(dev, reg, val);
}
@@ -137,7 +138,8 @@ static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
return nv_rd32(dev, reg);
}
-static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
+static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
+ uint8_t val)
{
nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
nv_write_ptv(dev, NV_PTV_TV_DATA, val);
@@ -149,8 +151,11 @@ static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
return nv_read_ptv(dev, NV_PTV_TV_DATA);
}
-#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
-#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
-#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
+#define nv_load_ptv(dev, state, reg) \
+ nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
+#define nv_save_ptv(dev, state, reg) \
+ state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
+#define nv_load_tv_enc(dev, state, reg) \
+ nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index d64683d97e0d..9d3893c50a41 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -336,12 +336,17 @@ static void tv_setup_filter(struct drm_encoder *encoder)
struct filter_params *p = &fparams[k][j];
for (i = 0; i < 7; i++) {
- int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
- + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
- + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
- + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
-
- (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
+ int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
+ p->ki3*i*i*i)
+ + (p->kr + p->kir*i + p->ki2r*i*i +
+ p->ki3r*i*i*i) * rs[k]
+ + (p->kf + p->kif*i + p->ki2f*i*i +
+ p->ki3f*i*i*i) * flicker
+ + (p->krf + p->kirf*i + p->ki2rf*i*i +
+ p->ki3rf*i*i*i) * flicker * rs[k];
+
+ (*filters[k])[j][i] = (c + id5/2) >> 39
+ & (0x1 << 31 | 0x7f << 9);
}
}
}
@@ -349,7 +354,8 @@ static void tv_setup_filter(struct drm_encoder *encoder)
/* Hardware state saving/restoring */
-static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+static void tv_save_filter(struct drm_device *dev, uint32_t base,
+ uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -360,7 +366,8 @@ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[
}
}
-static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+static void tv_load_filter(struct drm_device *dev, uint32_t base,
+ uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -504,10 +511,10 @@ void nv17_tv_update_properties(struct drm_encoder *encoder)
break;
}
- regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
- tv_enc->saturation);
- regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
- tv_enc->saturation);
+ regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20],
+ 255, tv_enc->saturation);
+ regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22],
+ 255, tv_enc->saturation);
regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
nv_load_ptv(dev, regs, 204);
@@ -541,7 +548,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
- struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
+ struct drm_display_mode *output_mode =
+ &get_tv_norm(encoder)->ctv_enc_mode.mode;
int overscan, hmargin, vmargin, hratio, vratio;
/* The rescaler doesn't do the right thing for interlaced modes. */
@@ -553,13 +561,15 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
- hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
- overscan);
- vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
- overscan);
+ hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20),
+ hmargin, overscan);
+ vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20),
+ vmargin, overscan);
- hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
- vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
+ hratio = crtc_mode->hdisplay * 0x800 /
+ (output_mode->hdisplay - 2*hmargin);
+ vratio = crtc_mode->vdisplay * 0x800 /
+ (output_mode->vdisplay - 2*vmargin) & ~3;
regs->fp_horiz_regs[FP_VALID_START] = hmargin;
regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 17f309b36c91..12ab9cd56eca 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -37,49 +37,49 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
- nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ nv_wo32(ctx, 0x033c, 0xffff0000);
+ nv_wo32(ctx, 0x03a0, 0x0fff0000);
+ nv_wo32(ctx, 0x03a4, 0x0fff0000);
+ nv_wo32(ctx, 0x047c, 0x00000101);
+ nv_wo32(ctx, 0x0490, 0x00000111);
+ nv_wo32(ctx, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080000);
+ nv_wo32(ctx, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(dev, ctx, i/4, 0x000105b8);
+ nv_wo32(ctx, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(ctx, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
- nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
- nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+ nv_wo32(ctx, 0x05fc, 0x00000001);
+ nv_wo32(ctx, 0x0604, 0x00004000);
+ nv_wo32(ctx, 0x0610, 0x00000001);
+ nv_wo32(ctx, 0x0618, 0x00040000);
+ nv_wo32(ctx, 0x061c, 0x00010000);
for (i = 0x1c1c; i <= 0x248c; i += 16) {
- nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
- nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
- nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
}
- nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
- nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
- nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
+ nv_wo32(ctx, 0x281c, 0x3f800000);
+ nv_wo32(ctx, 0x2830, 0x3f800000);
+ nv_wo32(ctx, 0x285c, 0x40000000);
+ nv_wo32(ctx, 0x2860, 0x3f800000);
+ nv_wo32(ctx, 0x2864, 0x3f000000);
+ nv_wo32(ctx, 0x286c, 0x40000000);
+ nv_wo32(ctx, 0x2870, 0x3f800000);
+ nv_wo32(ctx, 0x2878, 0xbf800000);
+ nv_wo32(ctx, 0x2880, 0xbf800000);
+ nv_wo32(ctx, 0x34a4, 0x000fe000);
+ nv_wo32(ctx, 0x3530, 0x000003f8);
+ nv_wo32(ctx, 0x3540, 0x002fe000);
for (i = 0x355c; i <= 0x3578; i += 4)
- nv_wo32(dev, ctx, i/4, 0x001c527c);
+ nv_wo32(ctx, i, 0x001c527c);
}
static void
@@ -87,58 +87,58 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
- nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
- nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
- nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
- nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
+ nv_wo32(ctx, 0x035c, 0xffff0000);
+ nv_wo32(ctx, 0x03c0, 0x0fff0000);
+ nv_wo32(ctx, 0x03c4, 0x0fff0000);
+ nv_wo32(ctx, 0x049c, 0x00000101);
+ nv_wo32(ctx, 0x04b0, 0x00000111);
+ nv_wo32(ctx, 0x04c8, 0x00000080);
+ nv_wo32(ctx, 0x04cc, 0xffff0000);
+ nv_wo32(ctx, 0x04d0, 0x00000001);
+ nv_wo32(ctx, 0x04e4, 0x44400000);
+ nv_wo32(ctx, 0x04fc, 0x4b800000);
for (i = 0x0510; i <= 0x051c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x0530; i <= 0x053c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080000);
+ nv_wo32(ctx, i, 0x00080000);
for (i = 0x0548; i <= 0x0554; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0558; i <= 0x0564; i += 4)
- nv_wo32(dev, ctx, i/4, 0x000105b8);
+ nv_wo32(ctx, i, 0x000105b8);
for (i = 0x0568; i <= 0x0574; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(ctx, i, 0x00080008);
for (i = 0x0598; i <= 0x05d4; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
- nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
- nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
- nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
- nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
- nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05e0, 0x4b7fffff);
+ nv_wo32(ctx, 0x0620, 0x00000080);
+ nv_wo32(ctx, 0x0624, 0x30201000);
+ nv_wo32(ctx, 0x0628, 0x70605040);
+ nv_wo32(ctx, 0x062c, 0xb0a09080);
+ nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
+ nv_wo32(ctx, 0x0664, 0x00000001);
+ nv_wo32(ctx, 0x066c, 0x00004000);
+ nv_wo32(ctx, 0x0678, 0x00000001);
+ nv_wo32(ctx, 0x0680, 0x00040000);
+ nv_wo32(ctx, 0x0684, 0x00010000);
for (i = 0x1b04; i <= 0x2374; i += 16) {
- nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
- nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
- nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
}
- nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
- nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
- nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
+ nv_wo32(ctx, 0x2704, 0x3f800000);
+ nv_wo32(ctx, 0x2718, 0x3f800000);
+ nv_wo32(ctx, 0x2744, 0x40000000);
+ nv_wo32(ctx, 0x2748, 0x3f800000);
+ nv_wo32(ctx, 0x274c, 0x3f000000);
+ nv_wo32(ctx, 0x2754, 0x40000000);
+ nv_wo32(ctx, 0x2758, 0x3f800000);
+ nv_wo32(ctx, 0x2760, 0xbf800000);
+ nv_wo32(ctx, 0x2768, 0xbf800000);
+ nv_wo32(ctx, 0x308c, 0x000fe000);
+ nv_wo32(ctx, 0x3108, 0x000003f8);
+ nv_wo32(ctx, 0x3468, 0x002fe000);
for (i = 0x3484; i <= 0x34a0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x001c527c);
+ nv_wo32(ctx, i, 0x001c527c);
}
static void
@@ -146,49 +146,49 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
- nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ nv_wo32(ctx, 0x033c, 0xffff0000);
+ nv_wo32(ctx, 0x03a0, 0x0fff0000);
+ nv_wo32(ctx, 0x03a4, 0x0fff0000);
+ nv_wo32(ctx, 0x047c, 0x00000101);
+ nv_wo32(ctx, 0x0490, 0x00000111);
+ nv_wo32(ctx, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080000);
+ nv_wo32(ctx, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(dev, ctx, i/4, 0x000105b8);
+ nv_wo32(ctx, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(ctx, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
- nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
- nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+ nv_wo32(ctx, 0x05fc, 0x00000001);
+ nv_wo32(ctx, 0x0604, 0x00004000);
+ nv_wo32(ctx, 0x0610, 0x00000001);
+ nv_wo32(ctx, 0x0618, 0x00040000);
+ nv_wo32(ctx, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
- nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
- nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
- nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
}
- nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
- nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
- nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
- nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
- nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
+ nv_wo32(ctx, 0x269c, 0x3f800000);
+ nv_wo32(ctx, 0x26b0, 0x3f800000);
+ nv_wo32(ctx, 0x26dc, 0x40000000);
+ nv_wo32(ctx, 0x26e0, 0x3f800000);
+ nv_wo32(ctx, 0x26e4, 0x3f000000);
+ nv_wo32(ctx, 0x26ec, 0x40000000);
+ nv_wo32(ctx, 0x26f0, 0x3f800000);
+ nv_wo32(ctx, 0x26f8, 0xbf800000);
+ nv_wo32(ctx, 0x2700, 0xbf800000);
+ nv_wo32(ctx, 0x3024, 0x000fe000);
+ nv_wo32(ctx, 0x30a0, 0x000003f8);
+ nv_wo32(ctx, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
- nv_wo32(dev, ctx, i/4, 0x001c527c);
+ nv_wo32(ctx, i, 0x001c527c);
}
static void
@@ -196,57 +196,57 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
- nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
- nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
- nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
- nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
+ nv_wo32(ctx, 0x0410, 0x00000101);
+ nv_wo32(ctx, 0x0424, 0x00000111);
+ nv_wo32(ctx, 0x0428, 0x00000060);
+ nv_wo32(ctx, 0x0444, 0x00000080);
+ nv_wo32(ctx, 0x0448, 0xffff0000);
+ nv_wo32(ctx, 0x044c, 0x00000001);
+ nv_wo32(ctx, 0x0460, 0x44400000);
+ nv_wo32(ctx, 0x048c, 0xffff0000);
for (i = 0x04e0; i < 0x04e8; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04ec, 0x00011100);
for (i = 0x0508; i < 0x0548; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
- nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
- nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
- nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x0550, 0x4b7fffff);
+ nv_wo32(ctx, 0x058c, 0x00000080);
+ nv_wo32(ctx, 0x0590, 0x30201000);
+ nv_wo32(ctx, 0x0594, 0x70605040);
+ nv_wo32(ctx, 0x0598, 0xb8a89888);
+ nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05b0, 0xb0000000);
for (i = 0x0600; i < 0x0640; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00010588);
+ nv_wo32(ctx, i, 0x00010588);
for (i = 0x0640; i < 0x0680; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x06c0; i < 0x0700; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ nv_wo32(ctx, i, 0x0008aae4);
for (i = 0x0700; i < 0x0740; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0740; i < 0x0780; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
- nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x085c, 0x00040000);
+ nv_wo32(ctx, 0x0860, 0x00010000);
for (i = 0x0864; i < 0x0874; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00040004);
+ nv_wo32(ctx, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
- nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
- nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
- nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 1, 0x0436086c);
+ nv_wo32(ctx, i + 2, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0000ffff);
- nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
- nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
- nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x344c, 0x3f800000);
+ nv_wo32(ctx, 0x3808, 0x3f800000);
+ nv_wo32(ctx, 0x381c, 0x3f800000);
+ nv_wo32(ctx, 0x3848, 0x40000000);
+ nv_wo32(ctx, 0x384c, 0x3f800000);
+ nv_wo32(ctx, 0x3850, 0x3f000000);
+ nv_wo32(ctx, 0x3858, 0x40000000);
+ nv_wo32(ctx, 0x385c, 0x3f800000);
+ nv_wo32(ctx, 0x3864, 0xbf800000);
+ nv_wo32(ctx, 0x386c, 0xbf800000);
}
static void
@@ -254,57 +254,57 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
- nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
- nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
- nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
- nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
- nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
+ nv_wo32(ctx, 0x040c, 0x01000101);
+ nv_wo32(ctx, 0x0420, 0x00000111);
+ nv_wo32(ctx, 0x0424, 0x00000060);
+ nv_wo32(ctx, 0x0440, 0x00000080);
+ nv_wo32(ctx, 0x0444, 0xffff0000);
+ nv_wo32(ctx, 0x0448, 0x00000001);
+ nv_wo32(ctx, 0x045c, 0x44400000);
+ nv_wo32(ctx, 0x0480, 0xffff0000);
for (i = 0x04d4; i < 0x04dc; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04e0, 0x00011100);
for (i = 0x04fc; i < 0x053c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
- nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
- nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
- nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x0544, 0x4b7fffff);
+ nv_wo32(ctx, 0x057c, 0x00000080);
+ nv_wo32(ctx, 0x0580, 0x30201000);
+ nv_wo32(ctx, 0x0584, 0x70605040);
+ nv_wo32(ctx, 0x0588, 0xb8a89888);
+ nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05a0, 0xb0000000);
for (i = 0x05f0; i < 0x0630; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00010588);
+ nv_wo32(ctx, i, 0x00010588);
for (i = 0x0630; i < 0x0670; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x06b0; i < 0x06f0; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ nv_wo32(ctx, i, 0x0008aae4);
for (i = 0x06f0; i < 0x0730; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0730; i < 0x0770; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
- nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x0850, 0x00040000);
+ nv_wo32(ctx, 0x0854, 0x00010000);
for (i = 0x0858; i < 0x0868; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00040004);
+ nv_wo32(ctx, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
- nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
- nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
- nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 1, 0x0436086c);
+ nv_wo32(ctx, i + 2, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0000ffff);
- nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
- nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x2ae0, 0x3f800000);
+ nv_wo32(ctx, 0x2e9c, 0x3f800000);
+ nv_wo32(ctx, 0x2eb0, 0x3f800000);
+ nv_wo32(ctx, 0x2edc, 0x40000000);
+ nv_wo32(ctx, 0x2ee0, 0x3f800000);
+ nv_wo32(ctx, 0x2ee4, 0x3f000000);
+ nv_wo32(ctx, 0x2eec, 0x40000000);
+ nv_wo32(ctx, 0x2ef0, 0x3f800000);
+ nv_wo32(ctx, 0x2ef8, 0xbf800000);
+ nv_wo32(ctx, 0x2f00, 0xbf800000);
}
static void
@@ -312,57 +312,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
int i;
- nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
- nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
- nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
- nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
- nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
- nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
- nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
- nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
+ nv_wo32(ctx, 0x040c, 0x00000101);
+ nv_wo32(ctx, 0x0420, 0x00000111);
+ nv_wo32(ctx, 0x0424, 0x00000060);
+ nv_wo32(ctx, 0x0440, 0x00000080);
+ nv_wo32(ctx, 0x0444, 0xffff0000);
+ nv_wo32(ctx, 0x0448, 0x00000001);
+ nv_wo32(ctx, 0x045c, 0x44400000);
+ nv_wo32(ctx, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0fff0000);
- nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
- nv_wo32(dev, ctx, i/4, 0x07ff0000);
- nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
- nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
- nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
- nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
- nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
- nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
- nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x054c, 0x4b7fffff);
+ nv_wo32(ctx, 0x0588, 0x00000080);
+ nv_wo32(ctx, 0x058c, 0x30201000);
+ nv_wo32(ctx, 0x0590, 0x70605040);
+ nv_wo32(ctx, 0x0594, 0xb8a89888);
+ nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00010588);
+ nv_wo32(ctx, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00030303);
+ nv_wo32(ctx, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ nv_wo32(ctx, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
- nv_wo32(dev, ctx, i/4, 0x01012000);
+ nv_wo32(ctx, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00080008);
- nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
- nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x0860, 0x00040000);
+ nv_wo32(ctx, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
- nv_wo32(dev, ctx, i/4, 0x00040004);
+ nv_wo32(ctx, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
- nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
- nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
- nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 4, 0x0436086c);
+ nv_wo32(ctx, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
- nv_wo32(dev, ctx, i/4, 0x0000ffff);
- nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
- nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
- nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
- nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
- nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x3450, 0x3f800000);
+ nv_wo32(ctx, 0x380c, 0x3f800000);
+ nv_wo32(ctx, 0x3820, 0x3f800000);
+ nv_wo32(ctx, 0x384c, 0x40000000);
+ nv_wo32(ctx, 0x3850, 0x3f800000);
+ nv_wo32(ctx, 0x3854, 0x3f000000);
+ nv_wo32(ctx, 0x385c, 0x40000000);
+ nv_wo32(ctx, 0x3860, 0x3f800000);
+ nv_wo32(ctx, 0x3868, 0xbf800000);
+ nv_wo32(ctx, 0x3870, 0xbf800000);
}
int
@@ -372,7 +372,7 @@ nv20_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
- unsigned int idoffs = 0x28/4;
+ unsigned int idoffs = 0x28;
int ret;
switch (dev_priv->chipset) {
@@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan)
BUG_ON(1);
}
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
- 16, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
if (ret)
return ret;
/* Initialise default context values */
- ctx_init(dev, chan->ramin_grctx->gpuobj);
+ ctx_init(dev, chan->ramin_grctx);
/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
- nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
- (chan->id << 24) | 0x1); /* CTX_USER */
+ nv_wo32(chan->ramin_grctx, idoffs,
+ (chan->id << 24) | 0x1); /* CTX_USER */
- nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id,
- chan->ramin_grctx->instance >> 4);
+ nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
return 0;
}
@@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- if (chan->ramin_grctx)
- nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
-
- nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+ nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
}
int
@@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan)
if (!chan->ramin_grctx)
return -EINVAL;
- inst = chan->ramin_grctx->instance >> 4;
+ inst = chan->ramin_grctx->pinst >> 4;
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev)
chan = pgraph->channel(dev);
if (!chan)
return 0;
- inst = chan->ramin_grctx->instance >> 4;
+ inst = chan->ramin_grctx->pinst >> 4;
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev)
if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pgraph->ctx_table);
+ ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- pgraph->ctx_table->instance >> 4);
+ pgraph->ctx_table->pinst >> 4);
nv20_graph_rdi(dev);
@@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table);
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
}
int
@@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev)
if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pgraph->ctx_table);
+ ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- pgraph->ctx_table->instance >> 4);
+ pgraph->ctx_table->pinst >> 4);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 2b67f1835c39..d337b8b28cdd 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -27,8 +27,9 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_ramht.h"
-#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
+#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE))
#define NV40_RAMFC__SIZE 128
int
@@ -42,7 +43,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
if (ret)
return ret;
@@ -50,7 +51,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -58,7 +59,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
+ nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
nv_wi32(dev, fc + 60, 0x0001FFFF);
/* enable the fifo dma operation */
@@ -77,8 +78,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan)
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
- if (chan->ramfc)
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
@@ -241,9 +241,9 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht_bits - 9) << 16) |
- (dev_priv->ramht_offset >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
switch (dev_priv->chipset) {
case 0x47:
@@ -271,7 +271,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
nv_wr32(dev, 0x2230, 0);
nv_wr32(dev, NV40_PFIFO_RAMFC,
((dev_priv->vram_size - 512 * 1024 +
- dev_priv->ramfc_offset) >> 16) | (3 << 16));
+ dev_priv->ramfc->pinst) >> 16) | (3 << 16));
break;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index fd7d2b501316..7ee1b91569b8 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
struct nouveau_channel *chan = dev_priv->fifos[i];
if (chan && chan->ramin_grctx &&
- chan->ramin_grctx->instance == inst)
+ chan->ramin_grctx->pinst == inst)
return chan;
}
@@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan)
struct nouveau_grctx ctx = {};
int ret;
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
- 16, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
if (ret)
return ret;
/* Initialise default context values */
ctx.dev = chan->dev;
ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = chan->ramin_grctx->gpuobj;
+ ctx.data = chan->ramin_grctx;
nv40_grctx_init(&ctx);
- nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
- chan->ramin_grctx->gpuobj->im_pramin->start);
+ nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
return 0;
}
void
nv40_graph_destroy_context(struct nouveau_channel *chan)
{
- nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
static int
@@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan)
if (!chan->ramin_grctx)
return -EINVAL;
- inst = chan->ramin_grctx->instance >> 4;
+ inst = chan->ramin_grctx->pinst >> 4;
ret = nv40_graph_transfer_context(dev, inst, 0);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 9b5c97469588..ce585093264e 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
offset += 0x0280/4;
for (i = 0; i < 16; i++, offset += 2)
- nv_wo32(dev, obj, offset, 0x3f800000);
+ nv_wo32(obj, offset * 4, 0x3f800000);
for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
for (i = 0; i < vs_nr_b0 * 6; i += 6)
- nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
+ nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
for (i = 0; i < vs_nr_b1 * 4; i += 4)
- nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
+ nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index bfd4ca2fe7ef..16380d52cd88 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
- PAGE_SHIFT) >> 8);
+ OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
if (dev_priv->chipset != 0x50) {
BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
@@ -266,15 +265,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pll_lims pll;
- uint32_t reg, reg1, reg2;
+ uint32_t reg1, reg2;
int ret, N1, M1, N2, M2, P;
- if (dev_priv->chipset < NV_C0)
- reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
- else
- reg = 0x614140 + (head * 0x800);
-
- ret = get_pll_limits(dev, reg, &pll);
+ ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll);
if (ret)
return ret;
@@ -286,11 +280,11 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
pclk, ret, N1, M1, N2, M2, P);
- reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
- reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
- nv_wr32(dev, reg, 0x10000611);
- nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
- nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
+ reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00;
+ nv_wr32(dev, pll.reg + 0, 0x10000611);
+ nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1);
+ nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
} else
if (dev_priv->chipset < NV_C0) {
ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
@@ -300,10 +294,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
pclk, ret, N1, N2, M1, P);
- reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
- nv_wr32(dev, reg, 0x50000610);
- nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
- nv_wr32(dev, reg + 8, N2);
+ reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000;
+ nv_wr32(dev, pll.reg + 0, 0x50000610);
+ nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+ nv_wr32(dev, pll.reg + 8, N2);
} else {
ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
if (ret <= 0)
@@ -312,9 +306,9 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
pclk, ret, N1, N2, M1, P);
- nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100);
- nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1);
- nv_wr32(dev, reg + 0x10, N2 << 16);
+ nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100);
+ nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1);
+ nv_wr32(dev, pll.reg + 0x10, N2 << 16);
}
return 0;
@@ -338,7 +332,9 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nv50_cursor_fini(nv_crtc);
+ nouveau_bo_unmap(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc->mode);
kfree(nv_crtc);
@@ -491,8 +487,9 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb, bool update)
+nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *passed_fb,
+ int x, int y, bool update, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
@@ -504,6 +501,28 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ /* If atomic, we want to switch to the fb we were passed, so
+ * now we update pointers to do that. (We don't pin; just
+ * assume we're already pinned and update the base address.)
+ */
+ if (atomic) {
+ drm_fb = passed_fb;
+ fb = nouveau_framebuffer(passed_fb);
+ }
+ else {
+ /* If not atomic, we can go ahead and pin, and unpin the
+ * old fb we were passed.
+ */
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (passed_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
+ }
+
switch (drm_fb->depth) {
case 8:
format = NV50_EVO_CRTC_FB_DEPTH_8;
@@ -526,15 +545,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
-
nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@@ -685,14 +695,22 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
- return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
}
static int
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -702,6 +720,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
.mode_fixup = nv50_crtc_mode_fixup,
.mode_set = nv50_crtc_mode_set,
.mode_set_base = nv50_crtc_mode_set_base,
+ .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
.load_lut = nv50_crtc_lut_load,
};
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 03ad7ab14f09..1b9ce3021aa3 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -147,7 +147,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
- if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 1bc085962945..875414b09ade 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,7 +79,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -130,7 +130,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
/* wait for it to be done */
- if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 612fa6d6a0cb..55c9663ef2bf 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -30,8 +30,22 @@
#include "nouveau_connector.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
+#include "nouveau_ramht.h"
#include "drm_crtc_helper.h"
+static inline int
+nv50_sor_nr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 ||
+ dev_priv->chipset == 0xa0)
+ return 2;
+
+ return 4;
+}
+
static void
nv50_evo_channel_del(struct nouveau_channel **pchan)
{
@@ -42,6 +56,7 @@ nv50_evo_channel_del(struct nouveau_channel **pchan)
*pchan = NULL;
nouveau_gpuobj_channel_takedown(chan);
+ nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
if (chan->user)
@@ -65,23 +80,23 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
- ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
- if (ret) {
- nouveau_gpuobj_del(dev, &obj);
- return ret;
- }
-
- nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(dev, obj, 1, limit);
- nv_wo32(dev, obj, 2, offset);
- nv_wo32(dev, obj, 3, 0x00000000);
- nv_wo32(dev, obj, 4, 0x00000000);
+ nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+ nv_wo32(obj, 4, limit);
+ nv_wo32(obj, 8, offset);
+ nv_wo32(obj, 12, 0x00000000);
+ nv_wo32(obj, 16, 0x00000000);
if (dev_priv->card_type < NV_C0)
- nv_wo32(dev, obj, 5, 0x00010000);
+ nv_wo32(obj, 20, 0x00010000);
else
- nv_wo32(dev, obj, 5, 0x00020000);
+ nv_wo32(obj, 20, 0x00020000);
dev_priv->engine.instmem.flush(dev);
+ ret = nouveau_ramht_insert(evo, name, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret) {
+ return ret;
+ }
+
return 0;
}
@@ -89,6 +104,7 @@ static int
nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *chan;
int ret;
@@ -102,32 +118,35 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
chan->user_get = 4;
chan->user_put = 0;
- INIT_LIST_HEAD(&chan->ramht_refs);
-
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
+ ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
nv50_evo_channel_del(pchan);
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap,
- chan->ramin->gpuobj->im_pramin->start, 32768);
+ ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
nv50_evo_channel_del(pchan);
return ret;
}
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
- 0, &chan->ramht);
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
nv50_evo_channel_del(pchan);
return ret;
}
+ ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
if (dev_priv->chipset != 0x50) {
ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
0, 0xffffffff);
@@ -227,11 +246,11 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
}
/* SOR */
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < nv50_sor_nr(dev); i++) {
val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
}
- /* Something not yet in use, tv-out maybe. */
+ /* EXT */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
@@ -260,7 +279,7 @@ nv50_display_init(struct drm_device *dev)
if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
- if (!nv_wait(0x006194e8, 2, 0)) {
+ if (!nv_wait(dev, 0x006194e8, 2, 0)) {
NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
nv_rd32(dev, 0x6194e8));
@@ -291,7 +310,8 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
- if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
+ if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ 0x40000000, 0x40000000)) {
NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
NV_ERROR(dev, "0x610200 = 0x%08x\n",
nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
@@ -300,7 +320,7 @@ nv50_display_init(struct drm_device *dev)
for (i = 0; i < 2; i++) {
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
- if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
@@ -310,7 +330,7 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
- if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
NV_ERROR(dev, "timeout: "
@@ -321,16 +341,16 @@ nv50_display_init(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
/* initialise fifo */
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
- ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
+ ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
- if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
+ if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
return -EBUSY;
@@ -370,7 +390,7 @@ nv50_display_init(struct drm_device *dev)
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
- if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
+ if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
/* enable clock change interrupts. */
@@ -424,7 +444,7 @@ static int nv50_display_disable(struct drm_device *dev)
continue;
nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
- if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
+ if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) {
NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
"0x%08x\n", mask, mask);
NV_ERROR(dev, "0x610024 = 0x%08x\n",
@@ -434,14 +454,14 @@ static int nv50_display_disable(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
- if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
+ if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
NV_ERROR(dev, "0x610200 = 0x%08x\n",
nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
}
for (i = 0; i < 3; i++) {
- if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
+ if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
@@ -710,7 +730,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
or = i;
}
- for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0)
@@ -841,7 +861,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
or = i;
}
- for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 ||
dev_priv->chipset == 0xa0)
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 32611bd30e6d..cd1988b15d2c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -20,6 +20,7 @@ nv50_fb_init(struct drm_device *dev)
case 0x50:
nv_wr32(dev, 0x100c90, 0x0707ff);
break;
+ case 0xa3:
case 0xa5:
case 0xa8:
nv_wr32(dev, 0x100c90, 0x0d0fff);
@@ -36,3 +37,42 @@ void
nv50_fb_takedown(struct drm_device *dev)
{
}
+
+void
+nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 trap[6], idx, chinst;
+ int i, ch;
+
+ idx = nv_rd32(dev, 0x100c90);
+ if (!(idx & 0x80000000))
+ return;
+ idx &= 0x00ffffff;
+
+ for (i = 0; i < 6; i++) {
+ nv_wr32(dev, 0x100c90, idx | i << 24);
+ trap[i] = nv_rd32(dev, 0x100c94);
+ }
+ nv_wr32(dev, 0x100c90, idx | 0x80000000);
+
+ if (!display)
+ return;
+
+ chinst = (trap[2] << 16) | trap[1];
+ for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+ struct nouveau_channel *chan = dev_priv->fifos[ch];
+
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (chinst == chan->ramin->vinst >> 12)
+ break;
+ }
+
+ NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
+ "channel %d (0x%08x)\n",
+ name, (trap[5] & 0x100 ? "read" : "write"),
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
+ trap[0], ch, chinst);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bf025c6fc6f..6dcf048eddbc 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,6 +1,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
void
@@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
+ ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
+ nouveau_gpuobj_ref(NULL, &eng2d);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index fb0281ae8f90..a46a961102f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -27,13 +27,14 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
static void
nv50_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj_ref *cur;
+ struct nouveau_gpuobj *cur;
int i, nr;
NV_DEBUG(dev, "\n");
@@ -43,12 +44,14 @@ nv50_fifo_playlist_update(struct drm_device *dev)
/* We never schedule channel 0 or 127 */
for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
- nv_wo32(dev, cur->gpuobj, nr++, i);
+ if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+ nv_wo32(cur, (nr * 4), i);
+ nr++;
+ }
}
dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, 0x32f4, cur->instance >> 12);
+ nv_wr32(dev, 0x32f4, cur->vinst >> 12);
nv_wr32(dev, 0x32ec, nr);
nv_wr32(dev, 0x2500, 0x101);
}
@@ -63,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel)
NV_DEBUG(dev, "ch%d\n", channel);
if (dev_priv->chipset == 0x50)
- inst = chan->ramfc->instance >> 12;
+ inst = chan->ramfc->vinst >> 12;
else
- inst = chan->ramfc->instance >> 8;
+ inst = chan->ramfc->vinst >> 8;
nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
@@ -163,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev)
goto just_reset;
}
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[0]);
+ ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[0]);
if (ret) {
NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
return ret;
}
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[1]);
+ ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[1]);
if (ret) {
- nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
return ret;
}
@@ -203,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev)
if (!pfifo->playlist[0])
return;
- nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
- nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
}
int
@@ -226,59 +229,54 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
if (dev_priv->chipset == 0x50) {
- uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
- uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
-
- ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
- 0x100, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ramfc,
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+ chan->ramin->vinst, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
&chan->ramfc);
if (ret)
return ret;
- ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
- ramin_voffset + 0x0400, 4096,
- 0, NULL, &chan->cache);
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
+ chan->ramin->vinst + 0x0400,
+ 4096, 0, &chan->cache);
if (ret)
return ret;
} else {
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
+ ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
if (ret)
return ret;
- ramfc = chan->ramfc->gpuobj;
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
- 0, &chan->cache);
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
+ 0, &chan->cache);
if (ret)
return ret;
}
+ ramfc = chan->ramfc;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
- nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->instance >> 4));
- nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
- nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
- nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
- nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
- nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
- nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
- nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
- chan->dma.ib_base * 4);
- nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
+ nv_wo32(ramfc, 0x44, 0x2101ffff);
+ nv_wo32(ramfc, 0x60, 0x7fffffff);
+ nv_wo32(ramfc, 0x40, 0x00000000);
+ nv_wo32(ramfc, 0x7c, 0x30000001);
+ nv_wo32(ramfc, 0x78, 0x00000000);
+ nv_wo32(ramfc, 0x3c, 0x403f6078);
+ nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
+ nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
if (dev_priv->chipset != 0x50) {
- nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
- nv_wo32(dev, chan->ramin->gpuobj, 1,
- chan->ramfc->instance >> 8);
+ nv_wo32(chan->ramin, 0, chan->id);
+ nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
- nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
- nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
+ nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
+ nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
}
dev_priv->engine.instmem.flush(dev);
@@ -293,12 +291,13 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
+ struct nouveau_gpuobj *ramfc = NULL;
NV_DEBUG(dev, "ch%d\n", chan->id);
/* This will ensure the channel is seen as disabled. */
- chan->ramfc = NULL;
+ nouveau_gpuobj_ref(chan->ramfc, &ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
nv50_fifo_channel_disable(dev, chan->id);
/* Dummy channel, also used on ch 127 */
@@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
nv50_fifo_channel_disable(dev, 127);
nv50_fifo_playlist_update(dev);
- nouveau_gpuobj_ref_del(dev, &ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->cache);
+ nouveau_gpuobj_ref(NULL, &ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->cache);
}
int
@@ -315,63 +314,63 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
- struct nouveau_gpuobj *cache = chan->cache->gpuobj;
+ struct nouveau_gpuobj *ramfc = chan->ramfc;
+ struct nouveau_gpuobj *cache = chan->cache;
int ptr, cnt;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
- nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
- nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
- nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
- nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
- nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
- nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
- nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
- nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
- nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
- nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
- nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
- nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
- nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
- nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
- nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
- nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
- nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
- nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
- nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
- nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
- nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
- nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
- nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
- nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
- nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
- nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
- nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
- nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
- nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
- nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
- nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
- nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
-
- cnt = nv_ro32(dev, ramfc, 0x84/4);
+ nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
+ nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
+ nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
+ nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
+ nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
+ nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
+ nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
+ nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
+ nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
+ nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
+ nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
+ nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
+ nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
+ nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
+ nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
+ nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
+ nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
+ nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
+ nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
+ nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
+ nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
+ nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
+ nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
+ nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
+ nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
+ nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
+ nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
+ nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
+ nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
+ nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
+ nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
+ nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
+ nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
+
+ cnt = nv_ro32(ramfc, 0x84);
for (ptr = 0; ptr < cnt; ptr++) {
nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
- nv_ro32(dev, cache, (ptr * 2) + 0));
+ nv_ro32(cache, (ptr * 8) + 0));
nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
- nv_ro32(dev, cache, (ptr * 2) + 1));
+ nv_ro32(cache, (ptr * 8) + 4));
}
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
if (dev_priv->chipset != 0x50) {
- nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
- nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
- nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
- nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
- nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
+ nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
+ nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
+ nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
+ nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
+ nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
}
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
@@ -399,62 +398,63 @@ nv50_fifo_unload_context(struct drm_device *dev)
return -EINVAL;
}
NV_DEBUG(dev, "ch%d\n", chan->id);
- ramfc = chan->ramfc->gpuobj;
- cache = chan->cache->gpuobj;
-
- nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
- nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
- nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
- nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
- nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
- nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
- nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
- nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
- nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
- nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
- nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
- nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
- nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
- nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
- nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
- nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
- nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
- nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
- nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
- nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
- nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
- nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
- nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
- nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
- nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
- nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
- nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
- nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
- nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
- nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
- nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
- nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
- nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
+ ramfc = chan->ramfc;
+ cache = chan->cache;
+
+ nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
+ nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
+ nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
+ nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
+ nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
+ nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
+ nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
+ nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
+ nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
+ nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
+ nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
+ nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
+ nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
+ nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
+ nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
+ nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
+ nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
+ nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
+ nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
+ nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
+ nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
+ nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
+ nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
+ nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
+ nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
+ nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
+ nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
+ nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
+ nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
+ nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
+ nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
+ nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
+ nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
ptr = 0;
while (put != get) {
- nv_wo32(dev, cache, ptr++,
- nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
- nv_wo32(dev, cache, ptr++,
- nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
+ nv_wo32(cache, ptr + 0,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
+ nv_wo32(cache, ptr + 4,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
get = (get + 1) & 0x1ff;
+ ptr += 8;
}
/* guessing that all the 0x34xx regs aren't on NV50 */
if (dev_priv->chipset != 0x50) {
- nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
- nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
- nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
- nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
- nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
- nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
+ nv_wo32(ramfc, 0x84, ptr >> 3);
+ nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
+ nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
+ nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
+ nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
+ nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
}
dev_priv->engine.instmem.flush(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 1413028e1580..cbf5ae2f67d4 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,7 +27,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-
+#include "nouveau_ramht.h"
#include "nouveau_grctx.h"
static void
@@ -181,7 +181,7 @@ nv50_graph_channel(struct drm_device *dev)
/* Be sure we're not in the middle of a context switch or bad things
* will happen, such as unloading the wrong pgraph context.
*/
- if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+ if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
NV_ERROR(dev, "Ctxprog is still running\n");
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
@@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev)
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
struct nouveau_channel *chan = dev_priv->fifos[i];
- if (chan && chan->ramin && chan->ramin->instance == inst)
+ if (chan && chan->ramin && chan->ramin->vinst == inst)
return chan;
}
@@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
- struct nouveau_gpuobj *obj;
+ struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
return ret;
- obj = chan->ramin_grctx->gpuobj;
hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
- nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
- nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
- pgraph->grctx_size - 1);
- nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
- nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
- nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
- nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
+ nv_wo32(ramin, hdr + 0x00, 0x00190002);
+ nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
+ pgraph->grctx_size - 1);
+ nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
+ nv_wo32(ramin, hdr + 0x0c, 0);
+ nv_wo32(ramin, hdr + 0x10, 0);
+ nv_wo32(ramin, hdr + 0x14, 0x00010000);
ctx.dev = chan->dev;
ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = obj;
+ ctx.data = chan->ramin_grctx;
nv50_grctx_init(&ctx);
- nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
+ nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
return 0;
@@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (!chan->ramin || !chan->ramin->gpuobj)
+ if (!chan->ramin)
return;
for (i = hdr; i < hdr + 24; i += 4)
- nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
+ nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
- nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
static int
@@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
int
nv50_graph_load_context(struct nouveau_channel *chan)
{
- uint32_t inst = chan->ramin->instance >> 12;
+ uint32_t inst = chan->ramin->vinst >> 12;
NV_DEBUG(chan->dev, "ch%d\n", chan->id);
return nv50_graph_do_load_context(chan->dev, inst);
@@ -327,15 +325,16 @@ static int
nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
- struct nouveau_gpuobj_ref *ref = NULL;
+ struct nouveau_gpuobj *gpuobj;
- if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ gpuobj = nouveau_ramht_find(chan, data);
+ if (!gpuobj)
return -ENOENT;
- if (nouveau_notifier_offset(ref->gpuobj, NULL))
+ if (nouveau_notifier_offset(gpuobj, NULL))
return -EINVAL;
- chan->nvsw.vblsem = ref->gpuobj;
+ chan->nvsw.vblsem = gpuobj;
chan->nvsw.vblsem_offset = ~0;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 42a8fb20c1e6..336aab2a24a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -103,6 +103,9 @@
#include "nouveau_drv.h"
#include "nouveau_grctx.h"
+#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
+#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
+
/*
* This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
* the GPU itself that does context-switching, but it needs a special
@@ -182,6 +185,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
case 0xa8:
case 0xaa:
case 0xac:
+ case 0xaf:
break;
default:
NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
@@ -268,6 +272,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
*/
static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
+
+static void
nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
@@ -286,7 +293,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400840, 0xffe806a8);
}
gr_def(ctx, 0x400844, 0x00000002);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ if (IS_NVA3F(dev_priv->chipset))
gr_def(ctx, 0x400894, 0x00001000);
gr_def(ctx, 0x4008e8, 0x00000003);
gr_def(ctx, 0x4008ec, 0x00001000);
@@ -299,13 +306,15 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset >= 0xa0)
cp_ctx(ctx, 0x400b00, 0x1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ if (IS_NVA3F(dev_priv->chipset)) {
cp_ctx(ctx, 0x400b10, 0x1);
gr_def(ctx, 0x400b10, 0x0001629d);
cp_ctx(ctx, 0x400b20, 0x1);
gr_def(ctx, 0x400b20, 0x0001629d);
}
+ nv50_graph_construct_mmio_ddata(ctx);
+
/* 0C00: VFETCH */
cp_ctx(ctx, 0x400c08, 0x2);
gr_def(ctx, 0x400c08, 0x0000fe0c);
@@ -314,7 +323,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
cp_ctx(ctx, 0x401008, 0x4);
gr_def(ctx, 0x401014, 0x00001000);
- } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
cp_ctx(ctx, 0x401008, 0x5);
gr_def(ctx, 0x401018, 0x00001000);
} else {
@@ -368,10 +377,13 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
case 0xa3:
case 0xa5:
case 0xa8:
+ case 0xaf:
gr_def(ctx, 0x401c00, 0x142500df);
break;
}
+ /* 2000 */
+
/* 2400 */
cp_ctx(ctx, 0x402400, 0x1);
if (dev_priv->chipset == 0x50)
@@ -380,12 +392,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x402408, 0x2);
gr_def(ctx, 0x402408, 0x00000600);
- /* 2800 */
+ /* 2800: CSCHED */
cp_ctx(ctx, 0x402800, 0x1);
if (dev_priv->chipset == 0x50)
gr_def(ctx, 0x402800, 0x00000006);
- /* 2C00 */
+ /* 2C00: ZCULL */
cp_ctx(ctx, 0x402c08, 0x6);
if (dev_priv->chipset != 0x50)
gr_def(ctx, 0x402c14, 0x01000000);
@@ -396,23 +408,23 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x402ca0, 0x2);
if (dev_priv->chipset < 0xa0)
gr_def(ctx, 0x402ca0, 0x00000400);
- else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ else if (!IS_NVA3F(dev_priv->chipset))
gr_def(ctx, 0x402ca0, 0x00000800);
else
gr_def(ctx, 0x402ca0, 0x00000400);
cp_ctx(ctx, 0x402cac, 0x4);
- /* 3000 */
+ /* 3000: ENG2D */
cp_ctx(ctx, 0x403004, 0x1);
gr_def(ctx, 0x403004, 0x00000001);
- /* 3404 */
+ /* 3400 */
if (dev_priv->chipset >= 0xa0) {
cp_ctx(ctx, 0x403404, 0x1);
gr_def(ctx, 0x403404, 0x00000001);
}
- /* 5000 */
+ /* 5000: CCACHE */
cp_ctx(ctx, 0x405000, 0x1);
switch (dev_priv->chipset) {
case 0x50:
@@ -425,6 +437,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
case 0xa8:
case 0xaa:
case 0xac:
+ case 0xaf:
gr_def(ctx, 0x405000, 0x000e0080);
break;
case 0x86:
@@ -441,210 +454,6 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x405024, 0x1);
cp_ctx(ctx, 0x40502c, 0x1);
- /* 5400 or maybe 4800 */
- if (dev_priv->chipset == 0x50) {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xea);
- } else if (dev_priv->chipset < 0x94) {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xcb);
- } else if (dev_priv->chipset < 0xa0) {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xcc);
- } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- offset = 0x404800;
- cp_ctx(ctx, 0x404800, 0xda);
- } else {
- offset = 0x405400;
- cp_ctx(ctx, 0x405400, 0xd4);
- }
- gr_def(ctx, offset + 0x0c, 0x00000002);
- gr_def(ctx, offset + 0x10, 0x00000001);
- if (dev_priv->chipset >= 0x94)
- offset += 4;
- gr_def(ctx, offset + 0x1c, 0x00000001);
- gr_def(ctx, offset + 0x20, 0x00000100);
- gr_def(ctx, offset + 0x38, 0x00000002);
- gr_def(ctx, offset + 0x3c, 0x00000001);
- gr_def(ctx, offset + 0x40, 0x00000001);
- gr_def(ctx, offset + 0x50, 0x00000001);
- gr_def(ctx, offset + 0x54, 0x003fffff);
- gr_def(ctx, offset + 0x58, 0x00001fff);
- gr_def(ctx, offset + 0x60, 0x00000001);
- gr_def(ctx, offset + 0x64, 0x00000001);
- gr_def(ctx, offset + 0x6c, 0x00000001);
- gr_def(ctx, offset + 0x70, 0x00000001);
- gr_def(ctx, offset + 0x74, 0x00000001);
- gr_def(ctx, offset + 0x78, 0x00000004);
- gr_def(ctx, offset + 0x7c, 0x00000001);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- offset += 4;
- gr_def(ctx, offset + 0x80, 0x00000001);
- gr_def(ctx, offset + 0x84, 0x00000001);
- gr_def(ctx, offset + 0x88, 0x00000007);
- gr_def(ctx, offset + 0x8c, 0x00000001);
- gr_def(ctx, offset + 0x90, 0x00000007);
- gr_def(ctx, offset + 0x94, 0x00000001);
- gr_def(ctx, offset + 0x98, 0x00000001);
- gr_def(ctx, offset + 0x9c, 0x00000001);
- if (dev_priv->chipset == 0x50) {
- gr_def(ctx, offset + 0xb0, 0x00000001);
- gr_def(ctx, offset + 0xb4, 0x00000001);
- gr_def(ctx, offset + 0xbc, 0x00000001);
- gr_def(ctx, offset + 0xc0, 0x0000000a);
- gr_def(ctx, offset + 0xd0, 0x00000040);
- gr_def(ctx, offset + 0xd8, 0x00000002);
- gr_def(ctx, offset + 0xdc, 0x00000100);
- gr_def(ctx, offset + 0xe0, 0x00000001);
- gr_def(ctx, offset + 0xe4, 0x00000100);
- gr_def(ctx, offset + 0x100, 0x00000001);
- gr_def(ctx, offset + 0x124, 0x00000004);
- gr_def(ctx, offset + 0x13c, 0x00000001);
- gr_def(ctx, offset + 0x140, 0x00000100);
- gr_def(ctx, offset + 0x148, 0x00000001);
- gr_def(ctx, offset + 0x154, 0x00000100);
- gr_def(ctx, offset + 0x158, 0x00000001);
- gr_def(ctx, offset + 0x15c, 0x00000100);
- gr_def(ctx, offset + 0x164, 0x00000001);
- gr_def(ctx, offset + 0x170, 0x00000100);
- gr_def(ctx, offset + 0x174, 0x00000001);
- gr_def(ctx, offset + 0x17c, 0x00000001);
- gr_def(ctx, offset + 0x188, 0x00000002);
- gr_def(ctx, offset + 0x190, 0x00000001);
- gr_def(ctx, offset + 0x198, 0x00000001);
- gr_def(ctx, offset + 0x1ac, 0x00000003);
- offset += 0xd0;
- } else {
- gr_def(ctx, offset + 0xb0, 0x00000001);
- gr_def(ctx, offset + 0xb4, 0x00000100);
- gr_def(ctx, offset + 0xbc, 0x00000001);
- gr_def(ctx, offset + 0xc8, 0x00000100);
- gr_def(ctx, offset + 0xcc, 0x00000001);
- gr_def(ctx, offset + 0xd0, 0x00000100);
- gr_def(ctx, offset + 0xd8, 0x00000001);
- gr_def(ctx, offset + 0xe4, 0x00000100);
- }
- gr_def(ctx, offset + 0xf8, 0x00000004);
- gr_def(ctx, offset + 0xfc, 0x00000070);
- gr_def(ctx, offset + 0x100, 0x00000080);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- offset += 4;
- gr_def(ctx, offset + 0x114, 0x0000000c);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- gr_def(ctx, offset + 0x11c, 0x00000008);
- gr_def(ctx, offset + 0x120, 0x00000014);
- if (dev_priv->chipset == 0x50) {
- gr_def(ctx, offset + 0x124, 0x00000026);
- offset -= 0x18;
- } else {
- gr_def(ctx, offset + 0x128, 0x00000029);
- gr_def(ctx, offset + 0x12c, 0x00000027);
- gr_def(ctx, offset + 0x130, 0x00000026);
- gr_def(ctx, offset + 0x134, 0x00000008);
- gr_def(ctx, offset + 0x138, 0x00000004);
- gr_def(ctx, offset + 0x13c, 0x00000027);
- }
- gr_def(ctx, offset + 0x148, 0x00000001);
- gr_def(ctx, offset + 0x14c, 0x00000002);
- gr_def(ctx, offset + 0x150, 0x00000003);
- gr_def(ctx, offset + 0x154, 0x00000004);
- gr_def(ctx, offset + 0x158, 0x00000005);
- gr_def(ctx, offset + 0x15c, 0x00000006);
- gr_def(ctx, offset + 0x160, 0x00000007);
- gr_def(ctx, offset + 0x164, 0x00000001);
- gr_def(ctx, offset + 0x1a8, 0x000000cf);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- gr_def(ctx, offset + 0x1d8, 0x00000080);
- gr_def(ctx, offset + 0x1dc, 0x00000004);
- gr_def(ctx, offset + 0x1e0, 0x00000004);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- else
- gr_def(ctx, offset + 0x1e4, 0x00000003);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- gr_def(ctx, offset + 0x1ec, 0x00000003);
- offset += 8;
- }
- gr_def(ctx, offset + 0x1e8, 0x00000001);
- if (dev_priv->chipset == 0x50)
- offset -= 4;
- gr_def(ctx, offset + 0x1f4, 0x00000012);
- gr_def(ctx, offset + 0x1f8, 0x00000010);
- gr_def(ctx, offset + 0x1fc, 0x0000000c);
- gr_def(ctx, offset + 0x200, 0x00000001);
- gr_def(ctx, offset + 0x210, 0x00000004);
- gr_def(ctx, offset + 0x214, 0x00000002);
- gr_def(ctx, offset + 0x218, 0x00000004);
- if (dev_priv->chipset >= 0xa0)
- offset += 4;
- gr_def(ctx, offset + 0x224, 0x003fffff);
- gr_def(ctx, offset + 0x228, 0x00001fff);
- if (dev_priv->chipset == 0x50)
- offset -= 0x20;
- else if (dev_priv->chipset >= 0xa0) {
- gr_def(ctx, offset + 0x250, 0x00000001);
- gr_def(ctx, offset + 0x254, 0x00000001);
- gr_def(ctx, offset + 0x258, 0x00000002);
- offset += 0x10;
- }
- gr_def(ctx, offset + 0x250, 0x00000004);
- gr_def(ctx, offset + 0x254, 0x00000014);
- gr_def(ctx, offset + 0x258, 0x00000001);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- offset += 4;
- gr_def(ctx, offset + 0x264, 0x00000002);
- if (dev_priv->chipset >= 0xa0)
- offset += 8;
- gr_def(ctx, offset + 0x270, 0x00000001);
- gr_def(ctx, offset + 0x278, 0x00000002);
- gr_def(ctx, offset + 0x27c, 0x00001000);
- if (dev_priv->chipset == 0x50)
- offset -= 0xc;
- else {
- gr_def(ctx, offset + 0x280, 0x00000e00);
- gr_def(ctx, offset + 0x284, 0x00001000);
- gr_def(ctx, offset + 0x288, 0x00001e00);
- }
- gr_def(ctx, offset + 0x290, 0x00000001);
- gr_def(ctx, offset + 0x294, 0x00000001);
- gr_def(ctx, offset + 0x298, 0x00000001);
- gr_def(ctx, offset + 0x29c, 0x00000001);
- gr_def(ctx, offset + 0x2a0, 0x00000001);
- gr_def(ctx, offset + 0x2b0, 0x00000200);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- gr_def(ctx, offset + 0x2b4, 0x00000200);
- offset += 4;
- }
- if (dev_priv->chipset < 0xa0) {
- gr_def(ctx, offset + 0x2b8, 0x00000001);
- gr_def(ctx, offset + 0x2bc, 0x00000070);
- gr_def(ctx, offset + 0x2c0, 0x00000080);
- gr_def(ctx, offset + 0x2cc, 0x00000001);
- gr_def(ctx, offset + 0x2d0, 0x00000070);
- gr_def(ctx, offset + 0x2d4, 0x00000080);
- } else {
- gr_def(ctx, offset + 0x2b8, 0x00000001);
- gr_def(ctx, offset + 0x2bc, 0x000000f0);
- gr_def(ctx, offset + 0x2c0, 0x000000ff);
- gr_def(ctx, offset + 0x2cc, 0x00000001);
- gr_def(ctx, offset + 0x2d0, 0x000000f0);
- gr_def(ctx, offset + 0x2d4, 0x000000ff);
- gr_def(ctx, offset + 0x2dc, 0x00000009);
- offset += 4;
- }
- gr_def(ctx, offset + 0x2e4, 0x00000001);
- gr_def(ctx, offset + 0x2e8, 0x000000cf);
- gr_def(ctx, offset + 0x2f0, 0x00000001);
- gr_def(ctx, offset + 0x300, 0x000000cf);
- gr_def(ctx, offset + 0x308, 0x00000002);
- gr_def(ctx, offset + 0x310, 0x00000001);
- gr_def(ctx, offset + 0x318, 0x00000001);
- gr_def(ctx, offset + 0x320, 0x000000cf);
- gr_def(ctx, offset + 0x324, 0x000000cf);
- gr_def(ctx, offset + 0x328, 0x00000001);
-
/* 6000? */
if (dev_priv->chipset == 0x50)
cp_ctx(ctx, 0x4063e0, 0x1);
@@ -661,7 +470,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x406818, 0x00000f80);
else
gr_def(ctx, 0x406818, 0x00001f80);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ if (IS_NVA3F(dev_priv->chipset))
gr_def(ctx, 0x40681c, 0x00000030);
cp_ctx(ctx, 0x406830, 0x3);
}
@@ -706,7 +515,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0)
cp_ctx(ctx, 0x407094 + (i<<8), 1);
- else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ else if (!IS_NVA3F(dev_priv->chipset))
cp_ctx(ctx, 0x407094 + (i<<8), 3);
else {
cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -799,6 +608,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
case 0xa8:
case 0xaa:
case 0xac:
+ case 0xaf:
gr_def(ctx, offset + 0x1c, 0x300c0000);
break;
}
@@ -825,7 +635,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, base + 0x304, 0x00007070);
else if (dev_priv->chipset < 0xa0)
gr_def(ctx, base + 0x304, 0x00027070);
- else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ else if (!IS_NVA3F(dev_priv->chipset))
gr_def(ctx, base + 0x304, 0x01127070);
else
gr_def(ctx, base + 0x304, 0x05127070);
@@ -849,7 +659,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
cp_ctx(ctx, base + 0x340, 9);
offset = base + 0x340;
- } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
cp_ctx(ctx, base + 0x33c, 0xb);
offset = base + 0x344;
} else {
@@ -880,7 +690,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x0, 0x000001f0);
gr_def(ctx, offset + 0x4, 0x00000001);
gr_def(ctx, offset + 0x8, 0x00000003);
- if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
+ if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
gr_def(ctx, offset + 0xc, 0x00008000);
gr_def(ctx, offset + 0x14, 0x00039e00);
cp_ctx(ctx, offset + 0x1c, 2);
@@ -892,7 +702,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset >= 0xa0) {
cp_ctx(ctx, base + 0x54c, 2);
- if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ if (!IS_NVA3F(dev_priv->chipset))
gr_def(ctx, base + 0x54c, 0x003fe006);
else
gr_def(ctx, base + 0x54c, 0x003fe007);
@@ -948,6 +758,336 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
}
+static void
+dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+ int i;
+ if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ for (i = 0; i < num; i++)
+ nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+ ctx->ctxvals_pos += num;
+}
+
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int base, num;
+ base = ctx->ctxvals_pos;
+
+ /* tesla state */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK135C */
+
+ /* SRC_TIC state */
+ dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */
+ dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
+ dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
+ if (dev_priv->chipset >= 0x94)
+ dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
+ dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
+ dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
+
+ /* turing state */
+ dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */
+ dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
+ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
+ dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */
+ dd_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ dd_emit(ctx, 1, 0); /* 000000ff UNK370 */
+ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */
+ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */
+ dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */
+ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */
+ dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */
+ dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */
+ dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
+ dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
+ dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
+ dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
+ dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
+ dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */
+ dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */
+ dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */
+ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */
+ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
+
+ /* compat 2d state */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
+
+ dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
+
+ dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */
+ dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */
+
+ dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */
+ dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */
+ dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */
+ dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */
+ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */
+
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */
+ dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */
+
+ dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */
+ dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */
+ dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */
+ }
+
+ /* m2mf state */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */
+ dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */
+ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */
+ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */
+ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */
+ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */
+ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */
+ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
+
+ /* more compat 2d state */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
+
+ dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */
+
+ dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */
+ dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */
+ dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */
+ dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */
+ dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */
+ dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */
+ }
+
+ /* tesla state */
+ dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */
+ dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */
+ dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
+ dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
+ } else {
+ dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
+ }
+ dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
+ dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
+ dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ } else {
+ dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */
+ dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */
+ dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */
+ dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */
+ dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */
+ dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */
+ }
+ dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */
+ dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */
+ dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */
+ dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */
+ dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */
+ dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */
+ dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */
+ dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */
+ dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */
+ dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */
+ dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
+ dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 3, 0); /* 1, 1, 1 */
+ else
+ dd_emit(ctx, 2, 0); /* 1, 1 */
+ dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */
+ dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
+ dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ dd_emit(ctx, 1, 3); /* 00000003 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
+ }
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
+ dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
+ dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
+ dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
+ dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */
+ dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */
+ dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
+ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
+ dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
+ dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
+ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 8, 0); /* 00000001 */
+ if (dev_priv->chipset >= 0xa0) {
+ dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
+ dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
+ dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
+ dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */
+ }
+ dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */
+ dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
+ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* 00000003 */
+ dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
+ dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
+ dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
+ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
+ dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
+ if (dev_priv->chipset != 0x50) {
+ dd_emit(ctx, 1, 0xe00); /* 7fff */
+ dd_emit(ctx, 1, 0x1000); /* 7fff */
+ dd_emit(ctx, 1, 0x1e00); /* 7fff */
+ }
+ dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */
+ dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */
+ dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */
+ dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */
+ dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
+ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
+ dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0x200);
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ if (dev_priv->chipset < 0xa0) {
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff */
+ dd_emit(ctx, 1, 0x80); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff */
+ dd_emit(ctx, 1, 0x80); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ } else {
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0xf0); /* 000000ff */
+ dd_emit(ctx, 1, 0xff); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0xf0); /* 000000ff */
+ dd_emit(ctx, 1, 0xff); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */
+ }
+
+ /* eng2d state */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */
+ dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */
+ dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */
+ dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */
+ dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */
+
+ num = ctx->ctxvals_pos - base;
+ ctx->ctxvals_pos = base;
+ if (IS_NVA3F(dev_priv->chipset))
+ cp_ctx(ctx, 0x404800, num);
+ else
+ cp_ctx(ctx, 0x405400, num);
+}
+
/*
* xfer areas. These are a pain.
*
@@ -990,28 +1130,33 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
* without the help of ctxprog.
*/
-static inline void
+static void
xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
int i;
if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
for (i = 0; i < num; i++)
- nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
+ nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
ctx->ctxvals_pos += num << 3;
}
/* Gene declarations... */
+static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
@@ -1030,102 +1175,32 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
/* Strand 0 */
ctx->ctxvals_pos = offset;
- switch (dev_priv->chipset) {
- case 0x50:
- xf_emit(ctx, 0x99, 0);
- break;
- case 0x84:
- case 0x86:
- xf_emit(ctx, 0x384, 0);
- break;
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- xf_emit(ctx, 0x380, 0);
- break;
- }
- nv50_graph_construct_gene_m2mf (ctx);
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x84:
- case 0x86:
- case 0x98:
- xf_emit(ctx, 0x4c4, 0);
- break;
- case 0x92:
- case 0x94:
- case 0x96:
- xf_emit(ctx, 0x984, 0);
- break;
- }
- nv50_graph_construct_gene_unk5(ctx);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0xa, 0);
- else
- xf_emit(ctx, 0xb, 0);
- nv50_graph_construct_gene_unk4(ctx);
- nv50_graph_construct_gene_unk3(ctx);
+ nv50_graph_construct_gene_dispatch(ctx);
+ nv50_graph_construct_gene_m2mf(ctx);
+ nv50_graph_construct_gene_unk24xx(ctx);
+ nv50_graph_construct_gene_clipid(ctx);
+ nv50_graph_construct_gene_zcull(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 1 */
ctx->ctxvals_pos = offset + 0x1;
- nv50_graph_construct_gene_unk6(ctx);
- nv50_graph_construct_gene_unk7(ctx);
- nv50_graph_construct_gene_unk8(ctx);
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x92:
- xf_emit(ctx, 0xfb, 0);
- break;
- case 0x84:
- xf_emit(ctx, 0xd3, 0);
- break;
- case 0x94:
- case 0x96:
- xf_emit(ctx, 0xab, 0);
- break;
- case 0x86:
- case 0x98:
- xf_emit(ctx, 0x6b, 0);
- break;
- }
- xf_emit(ctx, 2, 0x4e3bfdf);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 0xb, 0);
- xf_emit(ctx, 2, 0x4e3bfdf);
+ nv50_graph_construct_gene_vfetch(ctx);
+ nv50_graph_construct_gene_eng2d(ctx);
+ nv50_graph_construct_gene_csched(ctx);
+ nv50_graph_construct_gene_ropm1(ctx);
+ nv50_graph_construct_gene_ropm2(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 2 */
ctx->ctxvals_pos = offset + 0x2;
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x92:
- xf_emit(ctx, 0xa80, 0);
- break;
- case 0x84:
- xf_emit(ctx, 0xa7e, 0);
- break;
- case 0x94:
- case 0x96:
- xf_emit(ctx, 0xa7c, 0);
- break;
- case 0x86:
- case 0x98:
- xf_emit(ctx, 0xa7a, 0);
- break;
- }
- xf_emit(ctx, 1, 0x3fffff);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x1fff);
- xf_emit(ctx, 0xe, 0);
- nv50_graph_construct_gene_unk9(ctx);
- nv50_graph_construct_gene_unk2(ctx);
- nv50_graph_construct_gene_unk1(ctx);
- nv50_graph_construct_gene_unk10(ctx);
+ nv50_graph_construct_gene_ccache(ctx);
+ nv50_graph_construct_gene_unk1cxx(ctx);
+ nv50_graph_construct_gene_strmout(ctx);
+ nv50_graph_construct_gene_unk14xx(ctx);
+ nv50_graph_construct_gene_unk10xx(ctx);
+ nv50_graph_construct_gene_unk34xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1150,86 +1225,46 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
} else {
/* Strand 0 */
ctx->ctxvals_pos = offset;
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x385, 0);
- else
- xf_emit(ctx, 0x384, 0);
+ nv50_graph_construct_gene_dispatch(ctx);
nv50_graph_construct_gene_m2mf(ctx);
- xf_emit(ctx, 0x950, 0);
- nv50_graph_construct_gene_unk10(ctx);
- xf_emit(ctx, 1, 0x0fac6881);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 3, 0);
- }
- nv50_graph_construct_gene_unk8(ctx);
- if (dev_priv->chipset == 0xa0)
- xf_emit(ctx, 0x189, 0);
- else if (dev_priv->chipset == 0xa3)
- xf_emit(ctx, 0xd5, 0);
- else if (dev_priv->chipset == 0xa5)
- xf_emit(ctx, 0x99, 0);
- else if (dev_priv->chipset == 0xaa)
- xf_emit(ctx, 0x65, 0);
- else
- xf_emit(ctx, 0x6d, 0);
- nv50_graph_construct_gene_unk9(ctx);
+ nv50_graph_construct_gene_unk34xx(ctx);
+ nv50_graph_construct_gene_csched(ctx);
+ nv50_graph_construct_gene_unk1cxx(ctx);
+ nv50_graph_construct_gene_strmout(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 1 */
ctx->ctxvals_pos = offset + 1;
- nv50_graph_construct_gene_unk1(ctx);
+ nv50_graph_construct_gene_unk10xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 2 */
ctx->ctxvals_pos = offset + 2;
- if (dev_priv->chipset == 0xa0) {
- nv50_graph_construct_gene_unk2(ctx);
- }
- xf_emit(ctx, 0x36, 0);
- nv50_graph_construct_gene_unk5(ctx);
+ if (dev_priv->chipset == 0xa0)
+ nv50_graph_construct_gene_unk14xx(ctx);
+ nv50_graph_construct_gene_unk24xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 3 */
ctx->ctxvals_pos = offset + 3;
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- nv50_graph_construct_gene_unk6(ctx);
+ nv50_graph_construct_gene_vfetch(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 4 */
ctx->ctxvals_pos = offset + 4;
- if (dev_priv->chipset == 0xa0)
- xf_emit(ctx, 0xa80, 0);
- else if (dev_priv->chipset == 0xa3)
- xf_emit(ctx, 0xa7c, 0);
- else
- xf_emit(ctx, 0xa7a, 0);
- xf_emit(ctx, 1, 0x3fffff);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x1fff);
+ nv50_graph_construct_gene_ccache(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
/* Strand 5 */
ctx->ctxvals_pos = offset + 5;
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 0xb, 0);
- xf_emit(ctx, 2, 0x4e3bfdf);
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 0x4e3bfdf);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
+ nv50_graph_construct_gene_ropm2(ctx);
+ nv50_graph_construct_gene_ropm1(ctx);
+ /* per-ROP context */
for (i = 0; i < 8; i++)
if (units & (1<<(i+16)))
nv50_graph_construct_gene_ropc(ctx);
@@ -1238,10 +1273,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
/* Strand 6 */
ctx->ctxvals_pos = offset + 6;
- nv50_graph_construct_gene_unk3(ctx);
- xf_emit(ctx, 0xb, 0);
- nv50_graph_construct_gene_unk4(ctx);
- nv50_graph_construct_gene_unk7(ctx);
+ nv50_graph_construct_gene_zcull(ctx);
+ nv50_graph_construct_gene_clipid(ctx);
+ nv50_graph_construct_gene_eng2d(ctx);
if (units & (1 << 0))
nv50_graph_construct_xfer_tp(ctx);
if (units & (1 << 1))
@@ -1269,7 +1303,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
if (units & (1 << 9))
nv50_graph_construct_xfer_tp(ctx);
} else {
- nv50_graph_construct_gene_unk2(ctx);
+ nv50_graph_construct_gene_unk14xx(ctx);
}
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
@@ -1290,9 +1324,70 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
*/
static void
+nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
+{
+ /* start of strand 0 */
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* SEEK */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 5, 0);
+ else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 6, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ /* SEEK */
+ /* the PGRAPH's internal FIFO */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 8*3, 0);
+ else
+ xf_emit(ctx, 0x100*3, 0);
+ /* and another bonus slot?!? */
+ xf_emit(ctx, 3, 0);
+ /* and YET ANOTHER bonus slot? */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 3, 0);
+ /* SEEK */
+ /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ if (dev_priv->chipset < 0x90)
+ xf_emit(ctx, 4, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 6*2, 0);
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 6*2, 0);
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x1c, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x1e, 0);
+ else
+ xf_emit(ctx, 0x22, 0);
+ /* SEEK */
+ xf_emit(ctx, 0x15, 0);
+}
+
+static void
nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
{
- /* m2mf state */
+ /* Strand 0, right after dispatch */
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int smallm2mf = 0;
+ if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
+ smallm2mf = 1;
+ /* SEEK */
xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
@@ -1319,427 +1414,975 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
+ /* SEEK */
+ if (smallm2mf)
+ xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */
+ else
+ xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */
+ xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */
+ /* SEEK */
+ if (smallm2mf)
+ xf_emit(ctx, 0x400, 0); /* ffffffff */
+ else
+ xf_emit(ctx, 0x800, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */
+ xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */
}
static void
-nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* end of area 2 on pre-NVA0, area 1 on NVAx */
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
- else
- xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 2, 0); /* RO */
+ xf_emit(ctx, 0x800, 0); /* ffffffff */
switch (dev_priv->chipset) {
case 0x50:
- case 0x86:
- case 0x98:
- case 0xaa:
- case 0xac:
- xf_emit(ctx, 0x542, 0);
+ case 0x92:
+ case 0xa0:
+ xf_emit(ctx, 0x2b, 0);
break;
case 0x84:
- case 0x92:
+ xf_emit(ctx, 0x29, 0);
+ break;
case 0x94:
case 0x96:
- xf_emit(ctx, 0x942, 0);
- break;
- case 0xa0:
case 0xa3:
- xf_emit(ctx, 0x2042, 0);
+ xf_emit(ctx, 0x27, 0);
break;
+ case 0x86:
+ case 0x98:
case 0xa5:
case 0xa8:
- xf_emit(ctx, 0x842, 0);
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ xf_emit(ctx, 0x25, 0);
break;
}
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x27);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x26);
- xf_emit(ctx, 3, 0);
+ /* CB bindings, 0x80 of them. first word is address >> 8, second is
+ * size >> 4 | valid << 24 */
+ xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */
+ xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */
+ xf_emit(ctx, 1, 0); /* 0 */
+ xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */
+ xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0x100, 0); /* ffffffff */
+ xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */
+ xf_emit(ctx, 8, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 3 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */
+ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */
}
static void
-nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
- xf_emit(ctx, 0x10, 0x04000000);
- xf_emit(ctx, 0x24, 0);
- xf_emit(ctx, 2, 0x04e3bfdf);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x1fe21);
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ for (i = 0; i < 8; i++) {
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x86:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ xf_emit(ctx, 0xa0, 0); /* ffffffff */
+ break;
+ case 0x84:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x120, 0);
+ break;
+ case 0xa5:
+ case 0xa8:
+ xf_emit(ctx, 0x100, 0); /* ffffffff */
+ break;
+ case 0xa0:
+ case 0xa3:
+ case 0xaf:
+ xf_emit(ctx, 0x400, 0); /* ffffffff */
+ break;
+ }
+ xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ }
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+}
+
+static void
+nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+ xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
+ xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
+ xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ }
}
static void
-nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
/* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x804);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 5, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 2, 4); /* 7f, ff */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
}
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x10);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 3, 0);
- else
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x804);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0x7f);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 6, 0);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
- else
- xf_emit(ctx, 1, 0x7ff);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 0x38, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 0x38, 0);
- xf_emit(ctx, 2, 0x88);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 0x16, 0);
- xf_emit(ctx, 1, 0x26);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x3f800000);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0); /* 3ff */
+ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
+ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 0x28, 0);
+ xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
- xf_emit(ctx, 0x25, 0);
- xf_emit(ctx, 1, 0x52);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x26);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */
+ xf_emit(ctx, 3, 0); /* f, 0, 0 */
+ xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */
+ xf_emit(ctx, 3, 0); /* f, 0, 0 */
+ xf_emit(ctx, 3, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ }
+ xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */
+ xf_emit(ctx, 1, 0); /* f */
+ xf_emit(ctx, 1, 0); /* 0? */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 003fffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* 0000000f */
}
static void
-nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
- xf_emit(ctx, 1, 0x3f);
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 0x04000000);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0x10, 0);
- else
- xf_emit(ctx, 0x11, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x1001);
- xf_emit(ctx, 4, 0xffff);
- xf_emit(ctx, 0x20, 0);
- xf_emit(ctx, 0x10, 0x3f800000);
- xf_emit(ctx, 1, 0x10);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0);
- else
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 2, 0);
+ /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 1, 0); /* 0000ffff */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
+ /* SEEK */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+ xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
}
static void
-nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
{
- /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
- xf_emit(ctx, 2, 0x04000000);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 0);
+ /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */
+ /* SEEK */
+ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */
+ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */
+ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */
}
static void
-nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
- xf_emit(ctx, 2, 4);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x1c4d, 0);
+ int i;
+ /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 0x33, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
+
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
+ }
else
- xf_emit(ctx, 0x1c4b, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x8100c12);
+ {
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ /* SEEK */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x80c14);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 0x27);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x3c1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x16, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4); /* 000000ff */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ for (i = 0; i < 10; i++) {
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* ffffffff */
+ xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */
+ xf_emit(ctx, 0x10, 0); /* ffffffff */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
+ xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 000003ff */
}
static void
-nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 8, 0);
- else
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x20);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x11, 0);
+ int acnt = 0x10, rep, i;
+ /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
+ if (IS_NVA3F(dev_priv->chipset))
+ acnt = 0x20;
+ /* SEEK */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
+ xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */
+ xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */
+ xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */
+ xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */
+ xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0xb, 0); /* RO */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0xf, 0);
+ xf_emit(ctx, 0x9, 0); /* RO */
else
- xf_emit(ctx, 0xe, 0);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 0xd, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 8);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x8, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 7f/ff */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
+ xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
+ xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
else
- xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
if (dev_priv->chipset == 0xa8)
- xf_emit(ctx, 1, 0x1e00);
- xf_emit(ctx, 0xc, 0);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0x125, 0);
- else if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x126, 0);
- else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
- xf_emit(ctx, 0x124, 0);
+ xf_emit(ctx, 1, 0x1e00); /* 7fff */
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO or close */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 2, 0); /* ffffffff */
else
- xf_emit(ctx, 0x1f7, 0);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 0x10, 0); /* 0? */
+ xf_emit(ctx, 2, 0); /* weird... */
+ xf_emit(ctx, 2, 0); /* RO */
+ } else {
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 1, 0); /* weird... */
+ xf_emit(ctx, 2, 0); /* RO */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
+ xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
+ xf_emit(ctx, 1, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
+ xf_emit(ctx, 1, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* RO */
+ xf_emit(ctx, 2, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */
+ xf_emit(ctx, 1, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, acnt, 0); /* f */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ }
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 2, 0); /* RO */
+ else
+ xf_emit(ctx, 5, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
+ /* SEEK */
+ if (dev_priv->chipset < 0xa0) {
+ xf_emit(ctx, 0x41, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0x11, 0); /* RO */
+ } else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x50, 0); /* RO */
else
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xa1, 0);
+ xf_emit(ctx, 0x58, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, 1, 1); /* 1 UNK0DEC */
+ /* SEEK */
+ xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
+ xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x1d, 0); /* RO */
else
- xf_emit(ctx, 0x5a, 0);
- xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0x16, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ /* SEEK */
if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x834, 0);
- else if (dev_priv->chipset == 0xa0)
- xf_emit(ctx, 0x1873, 0);
- else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x8ba, 0);
+ xf_emit(ctx, 8, 0); /* RO */
+ else if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0xc, 0); /* RO */
+ else
+ xf_emit(ctx, 7, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xa, 0); /* RO */
+ if (dev_priv->chipset == 0xa0)
+ rep = 0xc;
+ else
+ rep = 4;
+ for (i = 0; i < rep; i++) {
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x20, 0); /* ffffffff */
+ xf_emit(ctx, 0x200, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 113/111 */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 7, 0); /* weird... */
else
- xf_emit(ctx, 0x833, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 0xf, 0);
+ xf_emit(ctx, 5, 0); /* weird... */
}
static void
-nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0x100);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 8);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 1, 0xcf);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x15);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x4444480);
- xf_emit(ctx, 0x37, 0);
+ /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
+ xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
+ if (dev_priv->chipset < 0xa0) {
+ /* this is useless on everything but the original NV50,
+ * guess they forgot to nuke it. Or just didn't bother. */
+ xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
+ xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */
+ xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */
+ }
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
+ xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */
+ xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */
+ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */
+ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
+ xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */
+ xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */
+ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */
+ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */
+ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK888 */
+ xf_emit(ctx, 1, 4); /* 0000003f UNK884 */
+ xf_emit(ctx, 1, 0); /* 00000007 UNK880 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */
+ xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */
+ xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK260 */
+ xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */
+ /* SEEK */
+ xf_emit(ctx, 0x10, 0);
+ /* SEEK */
+ xf_emit(ctx, 0x27, 0);
}
static void
-nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
{
- /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x10001);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x10001);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x10001);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 2);
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */
+ xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */
+ xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */
+ xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */
+ xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */
+ xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
+ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
+ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x80, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x84:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x60, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x40, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
+ xf_emit(ctx, 0x10, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa0:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0xf0, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa3:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x60, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa5:
+ case 0xaf:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x30, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xaa:
+ xf_emit(ctx, 0x12, 0);
+ break;
+ case 0xa8:
+ case 0xac:
+ xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
+ xf_emit(ctx, 0x10, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
+ break;
+ }
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000000 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0000001f */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 000000ff */
}
static void
-nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
- xf_emit(ctx, 1, 0x3f800000);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x1a);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x12, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 0xf, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
+ xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */
+ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 0x04000000);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 5);
- xf_emit(ctx, 1, 0x52);
- if (dev_priv->chipset == 0x50) {
- xf_emit(ctx, 0x13, 0);
- } else {
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x11, 0);
- else
- xf_emit(ctx, 0x10, 0);
+ xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */
+ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
+ xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* 3ff */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
}
- xf_emit(ctx, 0x10, 0x3f800000);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 0x26, 0);
- xf_emit(ctx, 1, 0x8100c12);
- xf_emit(ctx, 1, 5);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 4, 0xffff);
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x1f, 0);
- else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xc, 0);
- else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 0x1c, 0); /* RO */
+ else if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x9, 0);
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
+ xf_emit(ctx, 1, 0); /* 3ff */
}
+ /* XXX: the following block could belong either to unk1cxx, or
+ * to STRMOUT. Rather hard to tell. */
if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x26, 0);
+ xf_emit(ctx, 0x25, 0);
else
- xf_emit(ctx, 0x3c, 0);
- xf_emit(ctx, 1, 0x102);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 4, 4);
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x3b, 0);
+}
+
+static void
+nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
+ }
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
+ xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
- xf_emit(ctx, 1, 0x7ff);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x102);
- xf_emit(ctx, 9, 0);
- xf_emit(ctx, 4, 4);
- xf_emit(ctx, 0x2c, 0);
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+ xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
+ xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
+ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
+ }
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000000? */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+}
+
+static void
+nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+}
+
+static void
+nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 7 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
static void
@@ -1749,443 +2392,709 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
int magic2;
if (dev_priv->chipset == 0x50) {
magic2 = 0x00003e60;
- } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
magic2 = 0x001ffe67;
} else {
magic2 = 0x00087e67;
}
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 4, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 7, 0);
- if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0x15);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x15); /* 000000ff */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x400);
- xf_emit(ctx, 1, 0x300);
- xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
+ xf_emit(ctx, 1, 4); /* 7 */
+ xf_emit(ctx, 1, 0x400); /* fffffff */
+ xf_emit(ctx, 1, 0x300); /* ffff */
+ xf_emit(ctx, 1, 0x1001); /* 1fff */
if (dev_priv->chipset != 0xa0) {
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 0);
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
else
- xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 1, 0x15); /* ff */
}
- xf_emit(ctx, 3, 0);
}
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x13, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 0x10, 0);
- xf_emit(ctx, 0x10, 0x3f800000);
- xf_emit(ctx, 0x19, 0);
- xf_emit(ctx, 1, 0x10);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x3f);
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
if (dev_priv->chipset >= 0xa0) {
xf_emit(ctx, 2, 0);
xf_emit(ctx, 1, 0x1001);
xf_emit(ctx, 0xb, 0);
} else {
- xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
}
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x11);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 0x18, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
+ xf_emit(ctx, 1, 0); /* 000000ff */
+ }
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ } else if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 2, 0); /* 00000001 */
} else {
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0x1b, 0);
- else
- xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
+ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
+ xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 0x10, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 0x10, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
}
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x5b, 0);
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */
+ xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */
+ xf_emit(ctx, 1, 0); /* 000000ff ROP */
+ xf_emit(ctx, 1, 0); /* ffffffff BETA1 */
+ xf_emit(ctx, 1, 0); /* ffffffff BETA4 */
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */
}
static void
-nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
int magic3;
- if (dev_priv->chipset == 0x50)
+ switch (dev_priv->chipset) {
+ case 0x50:
magic3 = 0x1000;
- else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
+ break;
+ case 0x86:
+ case 0x98:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
magic3 = 0x1e00;
- else
+ break;
+ default:
magic3 = 0;
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 4);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0x24, 0);
+ }
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x1f, 0); /* ffffffff */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0x14, 0);
+ xf_emit(ctx, 0x0f, 0); /* ffffffff */
else
- xf_emit(ctx, 0x15, 0);
- xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
+ xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0x03020100);
+ xf_emit(ctx, 1, 0x03020100); /* ffffffff */
else
- xf_emit(ctx, 1, 0x00608080);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 2, 4);
- xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
if (magic3)
- xf_emit(ctx, 1, magic3);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 0x24, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0x03020100);
- xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */
+ xf_emit(ctx, 1, 0); /* 0000001f */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
if (magic3)
- xf_emit(ctx, 1, magic3);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
- xf_emit(ctx, 0x1024, 0);
+ xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
else if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0xa24, 0);
- else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
- xf_emit(ctx, 0x214, 0);
+ xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
+ else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x210, 0); /* ffffffff */
else
- xf_emit(ctx, 0x414, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 0x410, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
}
static void
-nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
int magic1, magic2;
if (dev_priv->chipset == 0x50) {
magic1 = 0x3ff;
magic2 = 0x00003e60;
- } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
magic1 = 0x7ff;
magic2 = 0x001ffe67;
} else {
magic1 = 0x7ff;
magic2 = 0x00087e67;
}
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0xc, 0);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 0xb, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 4, 0xffff);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 1, 0);
- } else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 0x18, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */
+ xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */
+ xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
+ } else if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ } else {
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
}
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 3, 0xcf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x11);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ }
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
if(dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0); /* ff */
else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, magic1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 0x28, 0);
- xf_emit(ctx, 8, 8);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 8, 0x400);
- xf_emit(ctx, 8, 0x300);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x20);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x40);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 4, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 9, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x400);
- xf_emit(ctx, 1, 0x300);
- xf_emit(ctx, 1, 0x1001);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 4, 0);
- else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, 0xf);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 0x15, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 3, 0);
- } else
- xf_emit(ctx, 0x17, 0);
+ xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */
+ xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */
+ xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
+ xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */
+ xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */
+ xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
+ xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
+ xf_emit(ctx, 1, 0); /* 0000ffff */
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 2, 0); /* ffff, ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */
+ xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */
+ xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */
+ xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */
+ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ }
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0x0fac6881);
- xf_emit(ctx, 1, magic2);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 3, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 0);
- else if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
+ xf_emit(ctx, 1, 0xfac6881); /* fffffff */
+ xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
+ xf_emit(ctx, 1, 4); /* 7 */
+ xf_emit(ctx, 1, 0); /* 1 */
+ xf_emit(ctx, 2, 1); /* 1 */
+ xf_emit(ctx, 2, 0); /* 7, f */
+ xf_emit(ctx, 1, 1); /* 1 */
+ xf_emit(ctx, 1, 0); /* 7/f */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x9, 0); /* 1 */
+ else
+ xf_emit(ctx, 0x8, 0); /* 1 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 1); /* 1 */
+ xf_emit(ctx, 1, 0x11); /* 7f */
+ xf_emit(ctx, 7, 0); /* 7f */
+ xf_emit(ctx, 1, 0xfac6881); /* fffffff */
+ xf_emit(ctx, 1, 0xf); /* f */
+ xf_emit(ctx, 7, 0); /* f */
+ xf_emit(ctx, 1, 0x11); /* 7f */
+ xf_emit(ctx, 1, 1); /* 1 */
+ xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ }
+ }
}
static void
-nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 3 */
+ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
else
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0x2a712488);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x4085c000);
- xf_emit(ctx, 1, 0x40);
- xf_emit(ctx, 1, 0x100);
- xf_emit(ctx, 1, 0x10100);
- xf_emit(ctx, 1, 0x02800000);
+ xf_emit(ctx, 2, 0); /* 3ff, 1 */
+ xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */
+ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */
+ xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */
+ xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */
+ xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */
+ xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
+ xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
+ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
+ if (dev_priv->chipset == 0x50) {
+ xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
+ xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
+ } else if (!IS_NVAAF(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ } else {
+ xf_emit(ctx, 0x6, 0);
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */
}
static void
-nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 2, 0x04e3bfdf);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 2, 1);
- else
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x00ffff00);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0x30201000);
- xf_emit(ctx, 1, 0x70605040);
- xf_emit(ctx, 1, 0xb8a89888);
- xf_emit(ctx, 1, 0xf8e8d8c8);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x1a);
-}
-
-static void
-nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 0xfac6881);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xb, 0);
- else
- xf_emit(ctx, 0xa, 0);
- xf_emit(ctx, 8, 1);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0xfac6881);
- xf_emit(ctx, 1, 0xf);
- xf_emit(ctx, 7, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 1);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 6, 0);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 6, 0);
- } else {
- xf_emit(ctx, 0xb, 0);
- }
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
+ xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
}
static void
@@ -2193,108 +3102,136 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
if (dev_priv->chipset < 0xa0) {
- nv50_graph_construct_xfer_tp_x1(ctx);
- nv50_graph_construct_xfer_tp_x2(ctx);
- nv50_graph_construct_xfer_tp_x3(ctx);
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0xf, 0);
- else
- xf_emit(ctx, 0x12, 0);
- nv50_graph_construct_xfer_tp_x4(ctx);
+ nv50_graph_construct_xfer_unk84xx(ctx);
+ nv50_graph_construct_xfer_tprop(ctx);
+ nv50_graph_construct_xfer_tex(ctx);
+ nv50_graph_construct_xfer_unk8cxx(ctx);
} else {
- nv50_graph_construct_xfer_tp_x3(ctx);
- if (dev_priv->chipset < 0xaa)
- xf_emit(ctx, 0xc, 0);
- else
- xf_emit(ctx, 0xa, 0);
- nv50_graph_construct_xfer_tp_x2(ctx);
- nv50_graph_construct_xfer_tp_x5(ctx);
- nv50_graph_construct_xfer_tp_x4(ctx);
- nv50_graph_construct_xfer_tp_x1(ctx);
+ nv50_graph_construct_xfer_tex(ctx);
+ nv50_graph_construct_xfer_tprop(ctx);
+ nv50_graph_construct_xfer_unk8cxx(ctx);
+ nv50_graph_construct_xfer_unk84xx(ctx);
}
}
static void
-nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i, mpcnt;
- if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
- mpcnt = 1;
- else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
- mpcnt = 2;
- else
- mpcnt = 3;
+ int i, mpcnt = 2;
+ switch (dev_priv->chipset) {
+ case 0x98:
+ case 0xaa:
+ mpcnt = 1;
+ break;
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa8:
+ case 0xac:
+ mpcnt = 2;
+ break;
+ case 0xa0:
+ case 0xa3:
+ case 0xa5:
+ case 0xaf:
+ mpcnt = 3;
+ break;
+ }
for (i = 0; i < mpcnt; i++) {
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x80);
- xf_emit(ctx, 1, 0x80007004);
- xf_emit(ctx, 1, 0x04000400);
+ xf_emit(ctx, 1, 0); /* ff */
+ xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
+ xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
+ xf_emit(ctx, 1, 0x04000400); /* ffffffff */
if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0xc0);
- xf_emit(ctx, 1, 0x1000);
- xf_emit(ctx, 2, 0);
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
- xf_emit(ctx, 1, 0xe00);
- xf_emit(ctx, 1, 0x1e00);
+ xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
+ xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0xe00); /* 7fff */
+ xf_emit(ctx, 1, 0x1e00); /* 7fff */
}
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 2, 0x1000);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 2);
- if (dev_priv->chipset >= 0xaa)
- xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
+ xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ if (IS_NVAAF(dev_priv->chipset))
+ xf_emit(ctx, 0xb, 0); /* RO */
else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 0xc, 0); /* RO */
else
- xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 0xa, 0); /* RO */
}
- xf_emit(ctx, 1, 0x08100c12);
- xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 1, 0x1fe21);
+ xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
}
- xf_emit(ctx, 5, 0);
- xf_emit(ctx, 4, 0xffff);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 2, 0x10001);
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 0x1fe21);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 4, 0);
- xf_emit(ctx, 1, 0x08100c12);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 8, 0);
- xf_emit(ctx, 1, 0xfac6881);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
- xf_emit(ctx, 1, 3);
- xf_emit(ctx, 3, 0);
- xf_emit(ctx, 1, 4);
- xf_emit(ctx, 9, 0);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 2, 1);
- xf_emit(ctx, 1, 2);
- xf_emit(ctx, 3, 1);
- xf_emit(ctx, 1, 0);
- if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 0x10, 1);
- xf_emit(ctx, 8, 2);
- xf_emit(ctx, 0x18, 1);
- xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
+ xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
+ xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
}
- xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
+ /* XXX: demagic this part some day */
if (dev_priv->chipset == 0x50)
xf_emit(ctx, 0x3a0, 0);
else if (dev_priv->chipset < 0x94)
@@ -2303,9 +3240,9 @@ nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
xf_emit(ctx, 0x39f, 0);
else
xf_emit(ctx, 0x3a3, 0);
- xf_emit(ctx, 1, 0x11);
- xf_emit(ctx, 1, 0);
- xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 7 OPERATION */
+ xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */
xf_emit(ctx, 0x2d, 0);
}
@@ -2323,52 +3260,56 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
if (dev_priv->chipset < 0xa0) {
for (i = 0; i < 8; i++) {
ctx->ctxvals_pos = offset + i;
+ /* that little bugger belongs to csched. No idea
+ * what it's doing here. */
if (i == 0)
- xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
if (units & (1 << i))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
}
} else {
/* Strand 0: TPs 0, 1 */
ctx->ctxvals_pos = offset;
- xf_emit(ctx, 1, 0x08100c12);
+ /* that little bugger belongs to csched. No idea
+ * what it's doing here. */
+ xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
if (units & (1 << 0))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 1))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
- /* Strand 0: TPs 2, 3 */
+ /* Strand 1: TPs 2, 3 */
ctx->ctxvals_pos = offset + 1;
if (units & (1 << 2))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 3))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
- /* Strand 0: TPs 4, 5, 6 */
+ /* Strand 2: TPs 4, 5, 6 */
ctx->ctxvals_pos = offset + 2;
if (units & (1 << 4))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 5))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 6))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
- /* Strand 0: TPs 7, 8, 9 */
+ /* Strand 3: TPs 7, 8, 9 */
ctx->ctxvals_pos = offset + 3;
if (units & (1 << 7))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 8))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if (units & (1 << 9))
- nv50_graph_construct_xfer_tp2(ctx);
+ nv50_graph_construct_xfer_mpc(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
size = (ctx->ctxvals_pos-offset)/8;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 91ef93cf1f35..a53fc974332b 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -32,39 +32,87 @@
struct nv50_instmem_priv {
uint32_t save1700[5]; /* 0x1700->0x1710 */
- struct nouveau_gpuobj_ref *pramin_pt;
- struct nouveau_gpuobj_ref *pramin_bar;
- struct nouveau_gpuobj_ref *fb_bar;
+ struct nouveau_gpuobj *pramin_pt;
+ struct nouveau_gpuobj *pramin_bar;
+ struct nouveau_gpuobj *fb_bar;
};
-#define NV50_INSTMEM_PAGE_SHIFT 12
-#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
-#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
+static void
+nv50_channel_del(struct nouveau_channel **pchan)
+{
+ struct nouveau_channel *chan;
-/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
- */
-#define BAR0_WI32(g, o, v) do { \
- uint32_t offset; \
- if ((g)->im_backing) { \
- offset = (g)->im_backing_start; \
- } else { \
- offset = chan->ramin->gpuobj->im_backing_start; \
- offset += (g)->im_pramin->start; \
- } \
- offset += (o); \
- nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
-} while (0)
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
+}
+
+static int
+nv50_channel_new(struct drm_device *dev, u32 size,
+ struct nouveau_channel **pchan)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
+ u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
+
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+ chan->ramin->pinst + pgd,
+ chan->ramin->vinst + pgd,
+ 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->vm_pd);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+ chan->ramin->pinst + fc,
+ chan->ramin->vinst + fc, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ *pchan = chan;
+ return 0;
+}
int
nv50_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
- uint32_t save_nv001700;
- uint64_t v;
struct nv50_instmem_priv *priv;
+ struct nouveau_channel *chan;
int ret, i;
+ u32 tmp;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -75,212 +123,115 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i += 4)
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
- /* Reserve the last MiB of VRAM, we should probably try to avoid
- * setting up the below tables over the top of the VBIOS image at
- * some point.
- */
- dev_priv->ramin_rsvd_vram = 1 << 20;
- c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- c_size = 128 << 10;
- c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
- c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
- c_base = c_vmpd + 0x4000;
- pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
-
- NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
- NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
- (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
- NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
- NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
-
- /* Determine VM layout, we need to do this first to make sure
- * we allocate enough memory for all the page tables.
- */
- dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
- dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
- dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
- dev_priv->vm_vram_size = dev_priv->vram_size;
- if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
- dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
- dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
- dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
- dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
- NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
- dev_priv->vm_gart_base,
- dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
- NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
- dev_priv->vm_vram_base,
- dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
- c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
-
- /* Map BAR0 PRAMIN aperture over the memory we want to use */
- save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
- nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
-
- /* Create a fake channel, and use it as our "dummy" channels 0/127.
- * The main reason for creating a channel is so we can use the gpuobj
- * code. However, it's probably worth noting that NVIDIA also setup
- * their channels 0/127 with the same values they configure here.
- * So, there may be some other reason for doing this.
- *
- * Have to create the entire channel manually, as the real channel
- * creation code assumes we have PRAMIN access, and we don't until
- * we're done here.
- */
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan)
+ /* Global PRAMIN heap */
+ ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
+ if (ret) {
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
return -ENOMEM;
- chan->id = 0;
- chan->dev = dev;
- chan->file_priv = (struct drm_file *)-2;
- dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
-
- INIT_LIST_HEAD(&chan->ramht_refs);
+ }
- /* Channel's PRAMIN object + heap */
- ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
- NULL, &chan->ramin);
+ /* we need a channel to plug into the hw to control the BARs */
+ ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
if (ret)
return ret;
+ chan = dev_priv->fifos[127] = dev_priv->fifos[0];
- if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base))
- return -ENOMEM;
-
- /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
- ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
- 0x4000, 0, NULL, &chan->ramfc);
+ /* allocate page table for PRAMIN BAR */
+ ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
+ &priv->pramin_pt);
if (ret)
return ret;
- for (i = 0; i < c_vmpd; i += 4)
- BAR0_WI32(chan->ramin->gpuobj, i, 0);
+ nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
+ nv_wo32(chan->vm_pd, 0x0004, 0);
- /* VM page directory */
- ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
- 0x4000, 0, &chan->vm_pd, NULL);
+ /* DMA object for PRAMIN BAR */
+ ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
if (ret)
return ret;
- for (i = 0; i < 0x4000; i += 8) {
- BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
- BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
- }
-
- /* PRAMIN page table, cheat and map into VM at 0x0000000000.
- * We map the entire fake channel into the start of the PRAMIN BAR
- */
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
- 0, &priv->pramin_pt);
+ nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
+ nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
+ nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
+ nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
+ nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
+ nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
+
+ /* map channel into PRAMIN, gpuobj didn't do it for us */
+ ret = nv50_instmem_bind(dev, chan->ramin);
if (ret)
return ret;
- v = c_offset | 1;
- if (dev_priv->vram_sys_base) {
- v += dev_priv->vram_sys_base;
- v |= 0x30;
- }
+ /* poke regs... */
+ nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
- i = 0;
- while (v < dev_priv->vram_sys_base + c_offset + c_size) {
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
- v += 0x1000;
- i += 8;
+ tmp = nv_ri32(dev, 0);
+ nv_wi32(dev, 0, ~tmp);
+ if (nv_ri32(dev, 0) != ~tmp) {
+ NV_ERROR(dev, "PRAMIN readback failed\n");
+ return -EIO;
}
+ nv_wi32(dev, 0, tmp);
- while (i < pt_size) {
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
- i += 8;
- }
+ dev_priv->ramin_available = true;
- BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
- BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+ /* Determine VM layout */
+ dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
+ dev_priv->vm_gart_size = NV50_VM_BLOCK;
+
+ dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
+ dev_priv->vm_vram_size = dev_priv->vram_size;
+ if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
+ dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
+ dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
+ dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
+
+ dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
+
+ NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
+ dev_priv->vm_gart_base,
+ dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
+ NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
+ dev_priv->vm_vram_base,
+ dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
/* VRAM page table(s), mapped into VM at +1GiB */
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
- NV50_VM_BLOCK/65536*8, 0, 0,
- &chan->vm_vram_pt[i]);
+ ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
+ 0, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->vm_vram_pt[i]);
if (ret) {
- NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
- ret);
+ NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
dev_priv->vm_vram_pt_nr = i;
return ret;
}
- dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
+ dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
- for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
- v += 4)
- BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
-
- BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
- chan->vm_vram_pt[i]->instance | 0x61);
- BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
+ nv_wo32(chan->vm_pd, 0x10 + (i*8),
+ chan->vm_vram_pt[i]->vinst | 0x61);
+ nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
}
- /* DMA object for PRAMIN BAR */
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
- &priv->pramin_bar);
- if (ret)
- return ret;
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
- BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
-
/* DMA object for FB BAR */
- ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
- &priv->fb_bar);
+ ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
if (ret)
return ret;
- BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
- pci_resource_len(dev->pdev, 1) - 1);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
- BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
+ nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
+ nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
+ pci_resource_len(dev->pdev, 1) - 1);
+ nv_wo32(priv->fb_bar, 0x08, 0x40000000);
+ nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
+ nv_wo32(priv->fb_bar, 0x10, 0x00000000);
+ nv_wo32(priv->fb_bar, 0x14, 0x00000000);
- /* Poke the relevant regs, and pray it works :) */
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
- nv_wr32(dev, NV50_PUNK_UNK1710, 0);
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
- NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
- NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
- NV50_PUNK_BAR3_CTXDMA_VALID);
+ dev_priv->engine.instmem.flush(dev);
+ nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
- /* Assume that praying isn't enough, check that we can re-read the
- * entire fake channel back from the PRAMIN BAR */
- for (i = 0; i < c_size; i += 4) {
- if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
- NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
- i);
- return -EINVAL;
- }
- }
-
- nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
-
- /* Global PRAMIN heap */
- if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) {
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
- }
-
- /*XXX: incorrect, but needed to make hash func "work" */
- dev_priv->ramht_offset = 0x10000;
- dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
@@ -297,29 +248,24 @@ nv50_instmem_takedown(struct drm_device *dev)
if (!priv)
return;
+ dev_priv->ramin_available = false;
+
/* Restore state from before init */
for (i = 0x1700; i <= 0x1710; i += 4)
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
- nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
- nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
- nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+ nouveau_gpuobj_ref(NULL, &priv->fb_bar);
+ nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
+ nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
/* Destroy dummy channel */
if (chan) {
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
- dev_priv->vm_vram_pt[i] = NULL;
- }
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
+ nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
dev_priv->vm_vram_pt_nr = 0;
- nouveau_gpuobj_del(dev, &chan->vm_pd);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->ramin);
- drm_mm_takedown(&chan->ramin_heap);
-
- dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
- kfree(chan);
+ nv50_channel_del(&dev_priv->fifos[0]);
+ dev_priv->fifos[127] = NULL;
}
dev_priv->engine.instmem.priv = NULL;
@@ -331,14 +277,14 @@ nv50_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ struct nouveau_gpuobj *ramin = chan->ramin;
int i;
- ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
+ ramin->im_backing_suspend = vmalloc(ramin->size);
if (!ramin->im_backing_suspend)
return -ENOMEM;
- for (i = 0; i < ramin->im_pramin->size; i += 4)
+ for (i = 0; i < ramin->size; i += 4)
ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
return 0;
}
@@ -349,23 +295,25 @@ nv50_instmem_resume(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ struct nouveau_gpuobj *ramin = chan->ramin;
int i;
- nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
- for (i = 0; i < ramin->im_pramin->size; i += 4)
- BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
+ dev_priv->ramin_available = false;
+ dev_priv->ramin_base = ~0;
+ for (i = 0; i < ramin->size; i += 4)
+ nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
+ dev_priv->ramin_available = true;
vfree(ramin->im_backing_suspend);
ramin->im_backing_suspend = NULL;
/* Poke the relevant regs, and pray it works :) */
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
NV50_PUNK_BAR3_CTXDMA_VALID);
for (i = 0; i < 8; i++)
@@ -381,7 +329,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
if (gpuobj->im_backing)
return -EINVAL;
- *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
+ *sz = ALIGN(*sz, 4096);
if (*sz == 0)
return -EINVAL;
@@ -399,9 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
return ret;
}
- gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
- gpuobj->im_backing_start <<= PAGE_SHIFT;
-
+ gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
return 0;
}
@@ -424,7 +370,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+ struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
uint32_t pte, pte_end;
uint64_t vram;
@@ -436,11 +382,11 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
- vram = gpuobj->im_backing_start;
+ vram = gpuobj->vinst;
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+ NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
vram |= 1;
if (dev_priv->vram_sys_base) {
@@ -449,9 +395,10 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
}
while (pte < pte_end) {
- nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
- nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
- vram += NV50_INSTMEM_PAGE_SIZE;
+ nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
+ nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
+ vram += 0x1000;
+ pte += 2;
}
dev_priv->engine.instmem.flush(dev);
@@ -472,12 +419,17 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (gpuobj->im_bound == 0)
return -EINVAL;
+ /* can happen during late takedown */
+ if (unlikely(!dev_priv->ramin_available))
+ return 0;
+
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
- nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+ nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
+ nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
+ pte += 2;
}
dev_priv->engine.instmem.flush(dev);
@@ -489,7 +441,7 @@ void
nv50_instmem_flush(struct drm_device *dev)
{
nv_wr32(dev, 0x00330c, 0x00000001);
- if (!nv_wait(0x00330c, 0x00000002, 0x00000000))
+ if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
@@ -497,7 +449,7 @@ void
nv84_instmem_flush(struct drm_device *dev)
{
nv_wr32(dev, 0x070000, 0x00000001);
- if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
@@ -505,7 +457,7 @@ void
nv50_vm_flush(struct drm_device *dev, int engine)
{
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000))
+ if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
new file mode 100644
index 000000000000..7dbb305d7e63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+struct nv50_pm_state {
+ struct nouveau_pm_level *perflvl;
+ struct pll_lims pll;
+ enum pll_types type;
+ int N, M, P;
+};
+
+int
+nv50_pm_clock_get(struct drm_device *dev, u32 id)
+{
+ struct pll_lims pll;
+ int P, N, M, ret;
+ u32 reg0, reg1;
+
+ ret = get_pll_limits(dev, id, &pll);
+ if (ret)
+ return ret;
+
+ reg0 = nv_rd32(dev, pll.reg + 0);
+ reg1 = nv_rd32(dev, pll.reg + 4);
+ P = (reg0 & 0x00070000) >> 16;
+ N = (reg1 & 0x0000ff00) >> 8;
+ M = (reg1 & 0x000000ff);
+
+ return ((pll.refclk * N / M) >> P);
+}
+
+void *
+nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
+{
+ struct nv50_pm_state *state;
+ int dummy, ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+ state->type = id;
+ state->perflvl = perflvl;
+
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret < 0) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ }
+
+ ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
+ &dummy, &dummy, &state->P);
+ if (ret < 0) {
+ kfree(state);
+ return ERR_PTR(ret);
+ }
+
+ return state;
+}
+
+void
+nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+ struct nv50_pm_state *state = pre_state;
+ struct nouveau_pm_level *perflvl = state->perflvl;
+ u32 reg = state->pll.reg, tmp;
+ struct bit_entry BIT_M;
+ u16 script;
+ int N = state->N;
+ int M = state->M;
+ int P = state->P;
+
+ if (state->type == PLL_MEMORY && perflvl->memscript &&
+ bit_table(dev, 'M', &BIT_M) == 0 &&
+ BIT_M.version == 1 && BIT_M.length >= 0x0b) {
+ script = ROM16(BIT_M.data[0x05]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+ script = ROM16(BIT_M.data[0x07]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+ script = ROM16(BIT_M.data[0x09]);
+ if (script)
+ nouveau_bios_run_init_table(dev, script, NULL);
+
+ nouveau_bios_run_init_table(dev, perflvl->memscript, NULL);
+ }
+
+ if (state->type == PLL_MEMORY) {
+ nv_wr32(dev, 0x100210, 0);
+ nv_wr32(dev, 0x1002dc, 1);
+ }
+
+ tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff;
+ tmp |= 0x80000000 | (P << 16);
+ nv_wr32(dev, reg + 0, tmp);
+ nv_wr32(dev, reg + 4, (N << 8) | M);
+
+ if (state->type == PLL_MEMORY) {
+ nv_wr32(dev, 0x1002dc, 0);
+ nv_wr32(dev, 0x100210, 0x80000000);
+ }
+
+ kfree(state);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index bcd4cf84a7e6..b4a5ecb199f9 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -92,7 +92,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
/* wait for it to be done */
- if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -108,7 +108,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
- if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
+ if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or),
NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
new file mode 100644
index 000000000000..dbbafed36406
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+/*XXX: boards using limits 0x40 need fixing, the register layout
+ * is correct here, but, there's some other funny magic
+ * that modifies things, so it's not likely we'll set/read
+ * the correct timings yet.. working on it...
+ */
+
+struct nva3_pm_state {
+ struct pll_lims pll;
+ int N, M, P;
+};
+
+int
+nva3_pm_clock_get(struct drm_device *dev, u32 id)
+{
+ struct pll_lims pll;
+ int P, N, M, ret;
+ u32 reg;
+
+ ret = get_pll_limits(dev, id, &pll);
+ if (ret)
+ return ret;
+
+ reg = nv_rd32(dev, pll.reg + 4);
+ P = (reg & 0x003f0000) >> 16;
+ N = (reg & 0x0000ff00) >> 8;
+ M = (reg & 0x000000ff);
+ return pll.refclk * N / M / P;
+}
+
+void *
+nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ u32 id, int khz)
+{
+ struct nva3_pm_state *state;
+ int dummy, ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ ret = get_pll_limits(dev, id, &state->pll);
+ if (ret < 0) {
+ kfree(state);
+ return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ }
+
+ ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy,
+ &state->M, &state->P);
+ if (ret < 0) {
+ kfree(state);
+ return ERR_PTR(ret);
+ }
+
+ return state;
+}
+
+void
+nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+ struct nva3_pm_state *state = pre_state;
+ u32 reg = state->pll.reg;
+
+ nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M);
+ kfree(state);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index d64375871979..890c2b95fbc1 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -43,12 +43,6 @@ nvc0_fifo_reassign(struct drm_device *dev, bool enable)
}
bool
-nvc0_fifo_cache_flush(struct drm_device *dev)
-{
- return true;
-}
-
-bool
nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
{
return false;
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 6b451f864783..13a0f78a9088 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -50,8 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
return ret;
}
- gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
- gpuobj->im_backing_start <<= PAGE_SHIFT;
+ gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
return 0;
}
@@ -84,11 +83,11 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte = gpuobj->im_pramin->start >> 12;
pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- vram = gpuobj->im_backing_start;
+ vram = gpuobj->vinst;
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+ NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
while (pte < pte_end) {
nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
@@ -134,7 +133,7 @@ void
nvc0_instmem_flush(struct drm_device *dev)
{
nv_wr32(dev, 0x070000, 1);
- if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
@@ -221,10 +220,6 @@ nvc0_instmem_init(struct drm_device *dev)
return -ENOMEM;
}
- /*XXX: incorrect, but needed to make hash func "work" */
- dev_priv->ramht_offset = 0x10000;
- dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index ad64673ace1f..881f8a585613 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -263,6 +263,7 @@
# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
# define NV_CIO_CRE_LCD__INDEX 0x33
# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
+# define NV_CIO_CRE_LCD_ROUTE_MASK 0x3b
# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index d42c76c23714..18c3c71e41b1 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -56,8 +56,6 @@ static struct drm_driver driver = {
.irq_uninstall = r128_driver_irq_uninstall,
.irq_handler = r128_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = {
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index aebe00875041..6cae4f2028d2 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index cd0290f946cf..df2b6f2b35f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -398,65 +398,76 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
union atom_enable_ss {
- ENABLE_LVDS_SS_PARAMETERS legacy;
+ ENABLE_LVDS_SS_PARAMETERS lvds_ss;
+ ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
};
-static void atombios_enable_ss(struct drm_crtc *crtc)
+static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+ int enable,
+ int pll_id,
+ struct radeon_atom_ss *ss)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder = NULL;
- struct radeon_encoder *radeon_encoder = NULL;
- struct radeon_encoder_atom_dig *dig = NULL;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
- uint16_t percentage = 0;
- uint8_t type = 0, step = 0, delay = 0, range = 0;
- /* XXX add ss support for DCE4 */
- if (ASIC_IS_DCE4(rdev))
- return;
+ memset(&args, 0, sizeof(args));
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- radeon_encoder = to_radeon_encoder(encoder);
- /* only enable spread spectrum on LVDS */
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- dig = radeon_encoder->enc_priv;
- if (dig && dig->ss) {
- percentage = dig->ss->percentage;
- type = dig->ss->type;
- step = dig->ss->step;
- delay = dig->ss->delay;
- range = dig->ss->range;
- } else
- return;
- } else
- return;
+ if (ASIC_IS_DCE4(rdev)) {
+ args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.v2.ucSpreadSpectrumType = ss->type;
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
+ args.v2.usSpreadSpectrumAmount = ss->amount;
+ args.v2.usSpreadSpectrumStep = ss->step;
+ break;
+ case ATOM_PPLL2:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
+ args.v2.usSpreadSpectrumAmount = ss->amount;
+ args.v2.usSpreadSpectrumStep = ss->step;
break;
+ case ATOM_DCPLL:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
+ args.v2.usSpreadSpectrumAmount = 0;
+ args.v2.usSpreadSpectrumStep = 0;
+ break;
+ case ATOM_PPLL_INVALID:
+ return;
}
- }
-
- if (!radeon_encoder)
- return;
-
- memset(&args, 0, sizeof(args));
- if (ASIC_IS_AVIVO(rdev)) {
- args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- args.v1.ucSpreadSpectrumType = type;
- args.v1.ucSpreadSpectrumStep = step;
- args.v1.ucSpreadSpectrumDelay = delay;
- args.v1.ucSpreadSpectrumRange = range;
- args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.v1.ucEnable = ATOM_ENABLE;
+ args.v2.ucEnable = enable;
+ } else if (ASIC_IS_DCE3(rdev)) {
+ args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.v1.ucSpreadSpectrumType = ss->type;
+ args.v1.ucSpreadSpectrumStep = ss->step;
+ args.v1.ucSpreadSpectrumDelay = ss->delay;
+ args.v1.ucSpreadSpectrumRange = ss->range;
+ args.v1.ucPpll = pll_id;
+ args.v1.ucEnable = enable;
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (enable == ATOM_DISABLE) {
+ atombios_disable_ss(crtc);
+ return;
+ }
+ args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.lvds_ss_2.ucSpreadSpectrumType = ss->type;
+ args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
+ args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
+ args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
+ args.lvds_ss_2.ucEnable = enable;
} else {
- args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- args.legacy.ucSpreadSpectrumType = type;
- args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
- args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
- args.legacy.ucEnable = ATOM_ENABLE;
+ if (enable == ATOM_DISABLE) {
+ atombios_disable_ss(crtc);
+ return;
+ }
+ args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.lvds_ss.ucSpreadSpectrumType = ss->type;
+ args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
+ args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
+ args.lvds_ss.ucEnable = enable;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -468,7 +479,9 @@ union adjust_pixel_clock {
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
- struct radeon_pll *pll)
+ struct radeon_pll *pll,
+ bool ss_enabled,
+ struct radeon_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -482,19 +495,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* reset the pll flags */
pll->flags = 0;
- /* select the PLL algo */
- if (ASIC_IS_AVIVO(rdev)) {
- if (radeon_new_pll == 0)
- pll->algo = PLL_ALGO_LEGACY;
- else
- pll->algo = PLL_ALGO_NEW;
- } else {
- if (radeon_new_pll == 1)
- pll->algo = PLL_ALGO_NEW;
- else
- pll->algo = PLL_ALGO_LEGACY;
- }
-
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
@@ -531,29 +531,22 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
+ /* use recommended ref_div for ss */
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (ss_enabled) {
+ if (ss->refdiv) {
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ pll->reference_div = ss->refdiv;
+ }
+ }
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
- pll->algo = PLL_ALGO_LEGACY;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
- }
- /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
- * (on some boards at least) prefers the legacy algo. I'm not
- * sure whether this should handled generically or on a
- * case-by-case quirk basis. Both algos should work fine in the
- * majority of cases.
- */
- if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
- ((rdev->family == CHIP_RV515) ||
- (rdev->family == CHIP_RV620))) {
- /* allow the user to overrride just in case */
- if (radeon_new_pll == 1)
- pll->algo = PLL_ALGO_NEW;
- else
- pll->algo = PLL_ALGO_LEGACY;
- }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -589,9 +582,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP eventually */
- /* args.v1.ucConfig |=
- ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;
} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
args.v1.ucConfig |=
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
@@ -608,11 +601,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucDispPllConfig = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
@@ -632,17 +624,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
- /* want to enable SS on LVDS eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
+ if (ss_enabled)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
} else {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
@@ -816,6 +808,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_pll *pll;
u32 adjusted_clock;
int encoder_mode = 0;
+ struct radeon_atom_ss ss;
+ bool ss_enabled = false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
@@ -842,25 +836,123 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
break;
}
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector =
+ radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+ int dp_clock;
+
+ switch (encoder_mode) {
+ case ATOM_ENCODER_MODE_DP:
+ /* DP/eDP */
+ dp_clock = dig_connector->dp_clock / 10;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ dig->lcd_ss_id,
+ dp_clock);
+ else
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ dig->lcd_ss_id);
+ } else {
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_DP,
+ dp_clock);
+ else {
+ if (dp_clock == 16200) {
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID2);
+ if (!ss_enabled)
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID1);
+ } else
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID1);
+ }
+ }
+ break;
+ case ATOM_ENCODER_MODE_LVDS:
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ dig->lcd_ss_id,
+ mode->clock / 10);
+ else
+ ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ dig->lcd_ss_id);
+ break;
+ case ATOM_ENCODER_MODE_DVI:
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_TMDS,
+ mode->clock / 10);
+ break;
+ case ATOM_ENCODER_MODE_HDMI:
+ if (ASIC_IS_DCE4(rdev))
+ ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_HDMI,
+ mode->clock / 10);
+ break;
+ default:
+ break;
+ }
+ }
+
/* adjust pixel clock as needed */
- adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
+ atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
ref_div, fb_div, frac_fb_div, post_div);
+ if (ss_enabled) {
+ /* calculate ss amount and step size */
+ if (ASIC_IS_DCE4(rdev)) {
+ u32 step_size;
+ u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
+ ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+ ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+ if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+ step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
+ (125 * 25 * pll->reference_freq / 100);
+ else
+ step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
+ (125 * 25 * pll->reference_freq / 100);
+ ss.step = step_size;
+ }
+
+ atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+ }
}
-static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
+ struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t fb_location;
@@ -868,28 +960,43 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
int r;
/* no fb bound */
- if (!crtc->fb) {
+ if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
- /* Pin framebuffer & get tilling informations */
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
obj = radeon_fb->obj;
rbo = obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
- if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
- return -EINVAL;
+
+ if (atomic)
+ fb_location = radeon_bo_gpu_offset(rbo);
+ else {
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
}
+
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
@@ -909,7 +1016,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
- crtc->fb->bits_per_pixel);
+ target_fb->bits_per_pixel);
return -EINVAL;
}
@@ -955,10 +1062,10 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
- WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -977,8 +1084,8 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
else
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- if (old_fb && old_fb != crtc->fb) {
- radeon_fb = to_radeon_framebuffer(old_fb);
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -993,8 +1100,9 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -1002,33 +1110,48 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
+ struct drm_framebuffer *target_fb;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
int r;
/* no fb bound */
- if (!crtc->fb) {
+ if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
- /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
- if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
- return -EINVAL;
+
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
+ if (atomic)
+ fb_location = radeon_bo_gpu_offset(rbo);
+ else {
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
@@ -1052,7 +1175,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
break;
default:
DRM_ERROR("Unsupported screen depth %d\n",
- crtc->fb->bits_per_pixel);
+ target_fb->bits_per_pixel);
return -EINVAL;
}
@@ -1093,10 +1216,10 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
- WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
- WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+ WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -1115,8 +1238,8 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- if (old_fb && old_fb != crtc->fb) {
- radeon_fb = to_radeon_framebuffer(old_fb);
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -1138,11 +1261,26 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
- return evergreen_crtc_set_base(crtc, x, y, old_fb);
+ return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
else if (ASIC_IS_AVIVO(rdev))
- return avivo_crtc_set_base(crtc, x, y, old_fb);
+ return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
else
- return radeon_crtc_set_base(crtc, x, y, old_fb);
+ return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev))
+ return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+ else if (ASIC_IS_AVIVO(rdev))
+ return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
+ else
+ return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
}
/* properly set additional regs when using atombios */
@@ -1230,12 +1368,19 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
}
- atombios_disable_ss(crtc);
/* always set DCPLL */
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_atom_ss ss;
+ bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_DCPLL,
+ rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
atombios_crtc_set_dcpll(crtc);
+ if (ss_enabled)
+ atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
+ }
atombios_crtc_set_pll(crtc, adjusted_mode);
- atombios_enable_ss(crtc);
if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1311,6 +1456,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.mode_fixup = atombios_crtc_mode_fixup,
.mode_set = atombios_crtc_mode_set,
.mode_set_base = atombios_crtc_set_base,
+ .mode_set_base_atomic = atombios_crtc_set_base_atomic,
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.load_lut = radeon_crtc_load_lut,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2f93d46ae69a..f12a5b3ec050 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -32,6 +32,7 @@
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
+#include "evergreen_blit_shaders.h"
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -284,9 +285,444 @@ void evergreen_hpd_fini(struct radeon_device *rdev)
}
}
+/* watermark setup */
+
+static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+{
+ u32 tmp = 0;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+ * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * the display controllers. The paritioning is done via one of four
+ * preset allocations specified in bits 2:0:
+ * first display controller
+ * 0 - first half of lb (3840 * 2)
+ * 1 - first 3/4 of lb (5760 * 2)
+ * 2 - whole lb (7680 * 2)
+ * 3 - first 1/4 of lb (1920 * 2)
+ * second display controller
+ * 4 - second half of lb (3840 * 2)
+ * 5 - second 3/4 of lb (5760 * 2)
+ * 6 - whole lb (7680 * 2)
+ * 7 - last 1/4 of lb (1920 * 2)
+ */
+ if (mode && other_mode) {
+ if (mode->hdisplay > other_mode->hdisplay) {
+ if (mode->hdisplay > 2560)
+ tmp = 1; /* 3/4 */
+ else
+ tmp = 0; /* 1/2 */
+ } else if (other_mode->hdisplay > mode->hdisplay) {
+ if (other_mode->hdisplay > 2560)
+ tmp = 3; /* 1/4 */
+ else
+ tmp = 0; /* 1/2 */
+ } else
+ tmp = 0; /* 1/2 */
+ } else if (mode)
+ tmp = 2; /* whole */
+ else if (other_mode)
+ tmp = 3; /* 1/4 */
+
+ /* second controller of the pair uses second half of the lb */
+ if (radeon_crtc->crtc_id % 2)
+ tmp += 4;
+ WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
+ switch (tmp) {
+ case 0:
+ case 4:
+ default:
+ return 3840 * 2;
+ case 1:
+ case 5:
+ return 5760 * 2;
+ case 2:
+ case 6:
+ return 7680 * 2;
+ case 3:
+ case 7:
+ return 1920 * 2;
+ }
+}
+
+static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ }
+}
+
+struct evergreen_wm_params {
+ u32 dram_channels; /* number of dram channels */
+ u32 yclk; /* bandwidth per dram data pin in kHz */
+ u32 sclk; /* engine clock in kHz */
+ u32 disp_clk; /* display clock in kHz */
+ u32 src_width; /* viewport width */
+ u32 active_time; /* active display time in ns */
+ u32 blank_time; /* blank time in ns */
+ bool interlaced; /* mode is interlaced */
+ fixed20_12 vsc; /* vertical scale ratio */
+ u32 num_heads; /* number of active crtcs */
+ u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+ u32 lb_size; /* line buffer allocated to pipe */
+ u32 vtaps; /* vertical scaler taps */
+};
+
+static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 dram_efficiency; /* 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ dram_efficiency.full = dfixed_const(7);
+ dram_efficiency.full = dfixed_div(dram_efficiency, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the display Data return Bandwidth */
+ fixed20_12 return_efficiency; /* 0.8 */
+ fixed20_12 sclk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(10);
+ return_efficiency.full = dfixed_const(8);
+ return_efficiency.full = dfixed_div(return_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, sclk);
+ bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the DMIF Request Bandwidth */
+ fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+ fixed20_12 disp_clk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ disp_clk.full = dfixed_const(wm->disp_clk);
+ disp_clk.full = dfixed_div(disp_clk, a);
+ a.full = dfixed_const(10);
+ disp_clk_request_efficiency.full = dfixed_const(8);
+ disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, disp_clk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+ u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
+ u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
+ u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
+
+ return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the display mode Average Bandwidth
+ * DisplayMode should contain the source and destination dimensions,
+ * timing, etc.
+ */
+ fixed20_12 bpp;
+ fixed20_12 line_time;
+ fixed20_12 src_width;
+ fixed20_12 bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+ line_time.full = dfixed_div(line_time, a);
+ bpp.full = dfixed_const(wm->bytes_per_pixel);
+ src_width.full = dfixed_const(wm->src_width);
+ bandwidth.full = dfixed_mul(src_width, bpp);
+ bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+ bandwidth.full = dfixed_div(bandwidth, line_time);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
+{
+ /* First calcualte the latency in ns */
+ u32 mc_latency = 2000; /* 2000 ns. */
+ u32 available_bandwidth = evergreen_available_bandwidth(wm);
+ u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+ u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+ u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+ (wm->num_heads * cursor_line_pair_return_time);
+ u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+ u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+ fixed20_12 a, b, c;
+
+ if (wm->num_heads == 0)
+ return 0;
+
+ a.full = dfixed_const(2);
+ b.full = dfixed_const(1);
+ if ((wm->vsc.full > a.full) ||
+ ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+ (wm->vtaps >= 5) ||
+ ((wm->vsc.full >= a.full) && wm->interlaced))
+ max_src_lines_per_dst_line = 4;
+ else
+ max_src_lines_per_dst_line = 2;
+
+ a.full = dfixed_const(available_bandwidth);
+ b.full = dfixed_const(wm->num_heads);
+ a.full = dfixed_div(a, b);
+
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(c, b);
+ c.full = dfixed_const(wm->bytes_per_pixel);
+ b.full = dfixed_mul(b, c);
+
+ lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
+
+ a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(lb_fill_bw);
+ b.full = dfixed_div(c, b);
+ a.full = dfixed_div(a, b);
+ line_fill_time = dfixed_trunc(a);
+
+ if (line_fill_time < wm->active_time)
+ return latency;
+ else
+ return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+ if (evergreen_average_bandwidth(wm) <=
+ (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
+{
+ if (evergreen_average_bandwidth(wm) <=
+ (evergreen_available_bandwidth(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
+{
+ u32 lb_partitions = wm->lb_size / wm->src_width;
+ u32 line_time = wm->active_time + wm->blank_time;
+ u32 latency_tolerant_lines;
+ u32 latency_hiding;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1);
+ if (wm->vsc.full > a.full)
+ latency_tolerant_lines = 1;
+ else {
+ if (lb_partitions <= (wm->vtaps + 1))
+ latency_tolerant_lines = 1;
+ else
+ latency_tolerant_lines = 2;
+ }
+
+ latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+ if (evergreen_latency_watermark(wm) <= latency_hiding)
+ return true;
+ else
+ return false;
+}
+
+static void evergreen_program_watermarks(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ u32 lb_size, u32 num_heads)
+{
+ struct drm_display_mode *mode = &radeon_crtc->base.mode;
+ struct evergreen_wm_params wm;
+ u32 pixel_period;
+ u32 line_time = 0;
+ u32 latency_watermark_a = 0, latency_watermark_b = 0;
+ u32 priority_a_mark = 0, priority_b_mark = 0;
+ u32 priority_a_cnt = PRIORITY_OFF;
+ u32 priority_b_cnt = PRIORITY_OFF;
+ u32 pipe_offset = radeon_crtc->crtc_id * 16;
+ u32 tmp, arb_control3;
+ fixed20_12 a, b, c;
+
+ if (radeon_crtc->base.enabled && num_heads && mode) {
+ pixel_period = 1000000 / (u32)mode->clock;
+ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ priority_a_cnt = 0;
+ priority_b_cnt = 0;
+
+ wm.yclk = rdev->pm.current_mclk * 10;
+ wm.sclk = rdev->pm.current_sclk * 10;
+ wm.disp_clk = mode->clock;
+ wm.src_width = mode->crtc_hdisplay;
+ wm.active_time = mode->crtc_hdisplay * pixel_period;
+ wm.blank_time = line_time - wm.active_time;
+ wm.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm.interlaced = true;
+ wm.vsc = radeon_crtc->vsc;
+ wm.vtaps = 1;
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ wm.vtaps = 2;
+ wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm.lb_size = lb_size;
+ wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+ wm.num_heads = num_heads;
+
+ /* set for high clocks */
+ latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
+ /* set for low clocks */
+ /* wm.yclk = low clk; wm.sclk = low clk */
+ latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+ !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
+ !evergreen_check_latency_hiding(&wm) ||
+ (rdev->disp_priority == 2)) {
+ DRM_INFO("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_a);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_a_mark = dfixed_trunc(c);
+ priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_b);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_b_mark = dfixed_trunc(c);
+ priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+ }
+
+ /* select wm A */
+ arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+ tmp = arb_control3;
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(1);
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+ WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* select wm B */
+ tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(2);
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+ WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* restore original selection */
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
+
+ /* write the priority marks */
+ WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+ WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 num_heads = 0, lb_size;
+ int i;
+
+ radeon_update_display_priority(rdev);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i]->base.enabled)
+ num_heads++;
+ }
+ for (i = 0; i < rdev->num_crtc; i += 2) {
+ mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+ mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+ lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+ evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+ lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+ evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+ }
}
static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
@@ -677,7 +1113,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
- int r;
+ int r, i;
uint32_t cp_me;
r = radeon_ring_lock(rdev, 7);
@@ -697,16 +1133,39 @@ static int evergreen_cp_start(struct radeon_device *rdev)
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, 4);
+ r = radeon_ring_lock(rdev, evergreen_default_size + 15);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- /* init some VGT regs */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, 0xe);
- radeon_ring_write(rdev, 0x10);
+
+ /* setup clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < evergreen_default_size; i++)
+ radeon_ring_write(rdev, evergreen_default_state[i]);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+ /* Clear consts */
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+
radeon_ring_unlock_commit(rdev);
return 0;
@@ -731,7 +1190,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
- tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -745,8 +1204,19 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
- WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
- WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
@@ -1584,6 +2054,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
DRM_DEBUG("evergreen_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -1760,8 +2231,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
- /* XXX use writeback */
- wptr = RREG32(IH_RB_WPTR);
+ if (rdev->wb.enabled)
+ wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ else
+ wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt
@@ -2000,6 +2473,7 @@ restart_ih:
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
+ radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: CP EOP\n");
@@ -2048,26 +2522,18 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
evergreen_gpu_init(rdev);
-#if 0
- if (!rdev->r600_blit.shader_obj) {
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
- }
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ r = evergreen_blit_init(rdev);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
- return r;
+ evergreen_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
-#endif
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
/* Enable IRQ */
r = r600_irq_init(rdev);
@@ -2087,8 +2553,6 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_resume(rdev);
if (r)
return r;
- /* write back buffer are not vital so don't worry about failure */
- r600_wb_enable(rdev);
return 0;
}
@@ -2122,23 +2586,43 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
-#if 0
int r;
-#endif
+
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
- r600_wb_disable(rdev);
+ radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
-#if 0
+
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
radeon_bo_unpin(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
-#endif
+
+ return 0;
+}
+
+int evergreen_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_pages, struct radeon_fence *fence)
+{
+ int r;
+
+ mutex_lock(&rdev->r600_blit.mutex);
+ rdev->r600_blit.vb_ib = NULL;
+ r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (rdev->r600_blit.vb_ib)
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ mutex_unlock(&rdev->r600_blit.mutex);
+ return r;
+ }
+ evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+ evergreen_blit_done_copy(rdev, fence);
+ mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -2246,8 +2730,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -2269,10 +2753,10 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- /*r600_blit_fini(rdev);*/
+ evergreen_blit_fini(rdev);
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
new file mode 100644
index 000000000000..ac3b6dde23db
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "evergreend.h"
+#include "evergreen_blit_shaders.h"
+
+#define DI_PT_RECTLIST 0x11
+#define DI_INDEX_SIZE_16_BIT 0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8 0x1
+#define FMT_5_6_5 0x8
+#define FMT_8_8_8_8 0x1a
+#define COLOR_8 0x1
+#define COLOR_5_6_5 0x8
+#define COLOR_8_8_8_8 0x1a
+
+/* emits 17 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+ int w, int h, u64 gpu_addr)
+{
+ u32 cb_color_info;
+ int pitch, slice;
+
+ h = ALIGN(h, 8);
+ if (h < 8)
+ h = 8;
+
+ cb_color_info = ((format << 2) | (1 << 24));
+ pitch = (w / 8) - 1;
+ slice = ((w * h) / 64) - 1;
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+ radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, pitch);
+ radeon_ring_write(rdev, slice);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, cb_color_info);
+ radeon_ring_write(rdev, (1 << 4));
+ radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+ u32 sync_type, u32 size,
+ u64 mc_addr)
+{
+ u32 cp_coher_size;
+
+ if (size == 0xffffffff)
+ cp_coher_size = 0xffffffff;
+ else
+ cp_coher_size = ((size + 255) >> 8);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(rdev, sync_type);
+ radeon_ring_write(rdev, cp_coher_size);
+ radeon_ring_write(rdev, mc_addr >> 8);
+ radeon_ring_write(rdev, 10); /* poll interval */
+}
+
+/* emits 11dw + 1 surface sync = 16dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+ u64 gpu_addr;
+
+ /* VS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+ radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, 2);
+ radeon_ring_write(rdev, 0);
+
+ /* PS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+ radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, 1);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 2);
+
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 10 + 1 sync (5) = 15 */
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+ u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
+
+ /* high addr, stride */
+ sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+ /* xyzw swizzles */
+ sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(rdev, 0x580);
+ radeon_ring_write(rdev, gpu_addr & 0xffffffff);
+ radeon_ring_write(rdev, 48 - 1); /* size */
+ radeon_ring_write(rdev, sq_vtx_constant_word2);
+ radeon_ring_write(rdev, sq_vtx_constant_word3);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+
+ if (rdev->family == CHIP_CEDAR)
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+ else
+ cp_set_surface_sync(rdev,
+ PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+
+}
+
+/* emits 10 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+ int format, int w, int h, int pitch,
+ u64 gpu_addr)
+{
+ u32 sq_tex_resource_word0, sq_tex_resource_word1;
+ u32 sq_tex_resource_word4, sq_tex_resource_word7;
+
+ if (h < 1)
+ h = 1;
+
+ sq_tex_resource_word0 = (1 << 0); /* 2D */
+ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
+ ((w - 1) << 18));
+ sq_tex_resource_word1 = ((h - 1) << 0);
+ /* xyzw swizzles */
+ sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
+
+ sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_tex_resource_word0);
+ radeon_ring_write(rdev, sq_tex_resource_word1);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(rdev, sq_tex_resource_word4);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_tex_resource_word7);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+ int x2, int y2)
+{
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, DI_PT_RECTLIST);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(rdev, 1);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(rdev, 3);
+ radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 30 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+ u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
+ int num_ps_gprs, num_vs_gprs, num_temp_gprs;
+ int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
+ int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+ int num_hs_threads, num_ls_threads;
+ int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+ int num_hs_stack_entries, num_ls_stack_entries;
+
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ default:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_REDWOOD:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_JUNIPER:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ }
+
+ if (rdev->family == CHIP_CEDAR)
+ sq_config = 0;
+ else
+ sq_config = VC_ENABLE;
+
+ sq_config |= (EXPORT_SRC_C |
+ CS_PRIO(0) |
+ LS_PRIO(0) |
+ HS_PRIO(0) |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+ NUM_VS_GPRS(num_vs_gprs) |
+ NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+ NUM_ES_GPRS(num_es_gprs));
+ sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+ NUM_LS_GPRS(num_ls_gprs));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+ NUM_VS_THREADS(num_vs_threads) |
+ NUM_GS_THREADS(num_gs_threads) |
+ NUM_ES_THREADS(num_es_threads));
+ sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+ NUM_LS_THREADS(num_ls_threads));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+ NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+ NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+ sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+ NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+ /* set clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
+
+ /* disable dyn gprs */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0);
+
+ /* SQ config */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, sq_config);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt);
+ radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+ radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+
+ /* CONTEXT_CONTROL */
+ radeon_ring_write(rdev, 0xc0012800);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+ /* SET_SAMPLER */
+ radeon_ring_write(rdev, 0xc0036e00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000012);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+}
+
+static inline uint32_t i2f(uint32_t input)
+{
+ u32 result, i, exponent, fraction;
+
+ if ((input & 0x3fff) == 0)
+ result = 0; /* 0 is a special case */
+ else {
+ exponent = 140; /* exponent biased by 127; */
+ fraction = (input & 0x3fff) << 10; /* cheat and only
+ handle numbers below 2^^15 */
+ for (i = 0; i < 14; i++) {
+ if (fraction & 0x800000)
+ break;
+ else {
+ fraction = fraction << 1; /* keep
+ shifting left until top bit = 1 */
+ exponent = exponent - 1;
+ }
+ }
+ result = exponent << 23 | (fraction & 0x7fffff); /* mask
+ off top bit; assumed 1 */
+ }
+ return result;
+}
+
+int evergreen_blit_init(struct radeon_device *rdev)
+{
+ u32 obj_size;
+ int r;
+ void *ptr;
+
+ /* pin copy shader into vram if already initialized */
+ if (rdev->r600_blit.shader_obj)
+ goto done;
+
+ mutex_init(&rdev->r600_blit.mutex);
+ rdev->r600_blit.state_offset = 0;
+ rdev->r600_blit.state_len = 0;
+ obj_size = 0;
+
+ rdev->r600_blit.vs_offset = obj_size;
+ obj_size += evergreen_vs_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ rdev->r600_blit.ps_offset = obj_size;
+ obj_size += evergreen_ps_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("evergreen failed to allocate shader\n");
+ return r;
+ }
+
+ DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
+ obj_size,
+ rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+ if (r) {
+ DRM_ERROR("failed to map blit object %d\n", r);
+ return r;
+ }
+
+ memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
+ memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+done:
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
+ rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ return 0;
+}
+
+void evergreen_blit_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ if (rdev->r600_blit.shader_obj == NULL)
+ return;
+ /* If we can't reserve the bo, unref should be enough to destroy
+ * it when it becomes idle.
+ */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
+}
+
+static int evergreen_vb_ib_get(struct radeon_device *rdev)
+{
+ int r;
+ r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+ if (r) {
+ DRM_ERROR("failed to get IB for vertex buffer\n");
+ return r;
+ }
+
+ rdev->r600_blit.vb_total = 64*1024;
+ rdev->r600_blit.vb_used = 0;
+ return 0;
+}
+
+static void evergreen_vb_ib_put(struct radeon_device *rdev)
+{
+ radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+}
+
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+{
+ int r;
+ int ring_size, line_size;
+ int max_size;
+ /* loops of emits + fence emit possible */
+ int dwords_per_loop = 74, num_loops;
+
+ r = evergreen_vb_ib_get(rdev);
+ if (r)
+ return r;
+
+ /* 8 bpp vs 32 bpp for xfer unit */
+ if (size_bytes & 3)
+ line_size = 8192;
+ else
+ line_size = 8192 * 4;
+
+ max_size = 8192 * line_size;
+
+ /* major loops cover the max size transfer */
+ num_loops = ((size_bytes + max_size) / max_size);
+ /* minor loops cover the extra non aligned bits */
+ num_loops += ((size_bytes % line_size) ? 1 : 0);
+ /* calculate number of loops correctly */
+ ring_size = num_loops * dwords_per_loop;
+ /* set default + shaders */
+ ring_size += 46; /* shaders + def state */
+ ring_size += 10; /* fence emit for VB IB */
+ ring_size += 5; /* done copy */
+ ring_size += 10; /* fence emit for done copy */
+ r = radeon_ring_lock(rdev, ring_size);
+ if (r)
+ return r;
+
+ set_default_state(rdev); /* 30 */
+ set_shaders(rdev); /* 16 */
+ return 0;
+}
+
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+{
+ int r;
+
+ if (rdev->r600_blit.vb_ib)
+ evergreen_vb_ib_put(rdev);
+
+ if (fence)
+ r = radeon_fence_emit(rdev, fence);
+
+ radeon_ring_unlock_commit(rdev);
+}
+
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes)
+{
+ int max_bytes;
+ u64 vb_gpu_addr;
+ u32 *vb;
+
+ DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
+ size_bytes, rdev->r600_blit.vb_used);
+ vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+ max_bytes = 8192;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = src_gpu_addr & 255;
+ int dst_x = dst_gpu_addr & 255;
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
+
+ vb[0] = i2f(dst_x);
+ vb[1] = 0;
+ vb[2] = i2f(src_x);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f(dst_x + cur_size);
+ vb[9] = i2f(h);
+ vb[10] = i2f(src_x + cur_size);
+ vb[11] = i2f(h);
+
+ /* src 10 */
+ set_tex_resource(rdev, FMT_8,
+ src_x + cur_size, h, src_x + cur_size,
+ src_gpu_addr);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+
+ /* dst 17 */
+ set_render_target(rdev, COLOR_8,
+ dst_x + cur_size, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
+
+ /* 15 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ } else {
+ max_bytes = 8192 * 4;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = (src_gpu_addr & 255);
+ int dst_x = (dst_gpu_addr & 255);
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+ WARN_ON(1);
+ }
+
+ vb[0] = i2f(dst_x / 4);
+ vb[1] = 0;
+ vb[2] = i2f(src_x / 4);
+ vb[3] = 0;
+
+ vb[4] = i2f(dst_x / 4);
+ vb[5] = i2f(h);
+ vb[6] = i2f(src_x / 4);
+ vb[7] = i2f(h);
+
+ vb[8] = i2f((dst_x + cur_size) / 4);
+ vb[9] = i2f(h);
+ vb[10] = i2f((src_x + cur_size) / 4);
+ vb[11] = i2f(h);
+
+ /* src 10 */
+ set_tex_resource(rdev, FMT_8_8_8_8,
+ (src_x + cur_size) / 4,
+ h, (src_x + cur_size) / 4,
+ src_gpu_addr);
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+ /* dst 17 */
+ set_render_target(rdev, COLOR_8_8_8_8,
+ (dst_x + cur_size) / 4, h,
+ dst_gpu_addr);
+
+ /* scissors 12 */
+ set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+ /* Vertex buffer setup 15 */
+ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+ set_vtx_resource(rdev, vb_gpu_addr);
+
+ /* draw 10 */
+ draw_auto(rdev);
+
+ /* 5 */
+ cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ /* 74 ring dwords per loop */
+ vb += 12;
+ rdev->r600_blit.vb_used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
new file mode 100644
index 000000000000..ef1d28c07fbf
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 evergreen_default_state[] =
+{
+ 0xc0016900,
+ 0x0000023b,
+ 0x00000000, /* SQ_LDS_ALLOC_PS */
+
+ 0xc0066900,
+ 0x00000240,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000247,
+ 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x00000010,
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0106900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc00d6900,
+ 0x00000202,
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_START_FS */
+
+ 0xc0016900,
+ 0x0000022a,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00ffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc0016900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+
+ 0xc0016900,
+ 0x00000187,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0016900,
+ 0x00000191,
+ 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc00b6900,
+ 0x000001b3,
+ 0x20000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00100000, /* SPI_BARYC_CNTL */
+ 0x00000000, /* SPI_PS_IN_CONTROL_2 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
+
+const u32 evergreen_vs[] =
+{
+ 0x00000004,
+ 0x80800400,
+ 0x0000a03c,
+ 0x95000688,
+ 0x00004000,
+ 0x15200688,
+ 0x00000000,
+ 0x00000000,
+ 0x3c000000,
+ 0x67961001,
+ 0x00080000,
+ 0x00000000,
+ 0x1c000000,
+ 0x67961000,
+ 0x00000008,
+ 0x00000000,
+};
+
+const u32 evergreen_ps[] =
+{
+ 0x00000003,
+ 0xa00c0000,
+ 0x00000008,
+ 0x80400000,
+ 0x00000000,
+ 0x95200688,
+ 0x00380400,
+ 0x00146b10,
+ 0x00380000,
+ 0x20146b10,
+ 0x00380400,
+ 0x40146b00,
+ 0x80380000,
+ 0x60146b00,
+ 0x00000000,
+ 0x00000000,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
+};
+
+const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
+const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
+const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
new file mode 100644
index 000000000000..bb8d6c751595
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef EVERGREEN_BLIT_SHADERS_H
+#define EVERGREEN_BLIT_SHADERS_H
+
+extern const u32 evergreen_ps[];
+extern const u32 evergreen_vs[];
+extern const u32 evergreen_default_state[];
+
+extern const u32 evergreen_ps_size, evergreen_vs_size;
+extern const u32 evergreen_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 9b7532dd30f7..113c70cc8b39 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -412,6 +412,19 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+/* display watermarks */
+#define DC_LB_MEMORY_SPLIT 0x6b0c
+#define PRIORITY_A_CNT 0x6b18
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define PRIORITY_B_CNT 0x6b1c
+#define PIPE0_ARBITRATION_CONTROL3 0x0bf0
+# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define PIPE0_LATENCY_CONTROL 0x0bf4
+# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
@@ -645,6 +658,8 @@
#define PACKET3_EVENT_WRITE_EOP 0x47
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
#define PACKET3_RB_OFFSET 0x4B
#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
@@ -802,6 +817,11 @@
#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
+#define PA_SC_SCREEN_SCISSOR_TL 0x28030
+#define PA_SC_GENERIC_SCISSOR_TL 0x28240
+#define PA_SC_WINDOW_SCISSOR_TL 0x28204
+#define VGT_PRIMITIVE_TYPE 0x8958
+
#define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e59422320bb6..0e8f28a68927 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-int r100_wb_init(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.wb_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
- return r;
- }
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
- if (r) {
- dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- return r;
- }
- r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
- return r;
- }
- }
- WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
- WREG32(R_00070C_CP_RB_RPTR_ADDR,
- S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
- WREG32(R_000770_SCRATCH_UMSK, 0xff);
- return 0;
-}
-
-void r100_wb_disable(struct radeon_device *rdev)
-{
- WREG32(R_000770_SCRATCH_UMSK, 0);
-}
-
-void r100_wb_fini(struct radeon_device *rdev)
-{
- int r;
-
- r100_wb_disable(rdev);
- if (rdev->wb.wb_obj) {
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- dev_err(rdev->dev, "(%d) can't finish WB\n", r);
- return;
- }
- radeon_bo_kunmap(rdev->wb.wb_obj);
- radeon_bo_unpin(rdev->wb.wb_obj);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- radeon_bo_unref(&rdev->wb.wb_obj);
- rdev->wb.wb = NULL;
- rdev->wb.wb_obj = NULL;
- }
-}
-
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
- REG_SET(RADEON_MAX_FETCH, max_fetch) |
- RADEON_RB_NO_UPDATE);
+ REG_SET(RADEON_MAX_FETCH, max_fetch));
#ifdef __BIG_ENDIAN
tmp |= RADEON_BUF_SWAP_32BIT;
#endif
- WREG32(RADEON_CP_RB_CNTL, tmp);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */
- WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(R_00070C_CP_RB_RPTR_ADDR,
+ S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
+ WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
+
+ if (rdev->wb.enabled)
+ WREG32(R_000770_SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RADEON_RB_NO_UPDATE;
+ WREG32(R_000770_SCRATCH_UMSK, 0);
+ }
+
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
@@ -1052,6 +1003,7 @@ void r100_cp_disable(struct radeon_device *rdev)
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
+ WREG32(R_000770_SCRATCH_UMSK, 0);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
@@ -2318,6 +2270,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
+ if (rdev->mc.aper_size > config_aper_size)
+ config_aper_size = rdev->mc.aper_size;
+
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
@@ -3225,6 +3180,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
+ if (track->textures[u].lookup_disable)
+ continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
@@ -3459,6 +3416,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
+ track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
@@ -3737,6 +3695,12 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3746,9 +3710,6 @@ static int r100_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -3782,7 +3743,7 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
@@ -3792,7 +3753,7 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
@@ -3905,7 +3866,7 @@ int r100_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index f47cdca1c004..af65600e6564 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -46,6 +46,7 @@ struct r100_cs_track_texture {
unsigned height_11;
bool use_pitch;
bool enabled;
+ bool lookup_disable;
bool roundup_w;
bool roundup_h;
unsigned compress_format;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 0266d72e0a4c..d2408c395619 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -447,6 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
}
+ if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+ track->textures[i].lookup_disable = true;
switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case R200_TXFORMAT_I8:
case R200_TXFORMAT_RGB332:
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c827738ad7dd..34527e600fe9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
@@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 59f7bccc5be0..c387346f93a9 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_pipes_init(rdev);
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_cp_errata_init(rdev);
- r = r100_wb_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
- }
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev)
{
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
@@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 1458dee902dd..3c8677f9e385 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7b65e4efe8af..33952a12f0a3 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1608,8 +1608,11 @@ void r600_gpu_init(struct radeon_device *rdev)
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
- tiling_config |= GROUP_SIZE(0);
- rdev->config.r600.tiling_group_size = 256;
+ tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+ rdev->config.r600.tiling_group_size = 512;
+ else
+ rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1920,6 +1923,7 @@ void r600_cp_stop(struct radeon_device *rdev)
{
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ WREG32(SCRATCH_UMSK, 0);
}
int r600_init_microcode(struct radeon_device *rdev)
@@ -2152,7 +2156,7 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
- tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2166,8 +2170,19 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
- WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
- WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
@@ -2219,9 +2234,10 @@ void r600_scratch_init(struct radeon_device *rdev)
int i;
rdev->scratch.num_reg = 7;
+ rdev->scratch.reg_base = SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
- rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
+ rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
@@ -2265,88 +2281,34 @@ int r600_ring_test(struct radeon_device *rdev)
return r;
}
-void r600_wb_disable(struct radeon_device *rdev)
-{
- int r;
-
- WREG32(SCRATCH_UMSK, 0);
- if (rdev->wb.wb_obj) {
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0))
- return;
- radeon_bo_kunmap(rdev->wb.wb_obj);
- radeon_bo_unpin(rdev->wb.wb_obj);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- }
-}
-
-void r600_wb_fini(struct radeon_device *rdev)
-{
- r600_wb_disable(rdev);
- if (rdev->wb.wb_obj) {
- radeon_bo_unref(&rdev->wb.wb_obj);
- rdev->wb.wb = NULL;
- rdev->wb.wb_obj = NULL;
- }
-}
-
-int r600_wb_enable(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
- return r;
- }
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- r600_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->wb.wb_obj);
- dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
- r600_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
- r600_wb_fini(rdev);
- return r;
- }
- }
- WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
- WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
- WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
- WREG32(SCRATCH_UMSK, 0xff);
- return 0;
-}
-
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
-
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
- /* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
- /* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, fence->seq);
- /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
- radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
- radeon_ring_write(rdev, RB_INT_STAT);
+ if (rdev->wb.use_event) {
+ u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
+ (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(rdev, addr & 0xffffffff);
+ radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, 0);
+ } else {
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ /* wait for 3D idle clean */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ /* Emit fence sequence & fire IRQ */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(rdev, fence->seq);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
+ }
}
int r600_copy_blit(struct radeon_device *rdev,
@@ -2428,19 +2390,12 @@ int r600_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
- /* pin copy shader into vram */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2459,8 +2414,7 @@ int r600_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev);
if (r)
return r;
- /* write back buffer are not vital so don't worry about failure */
- r600_wb_enable(rdev);
+
return 0;
}
@@ -2519,7 +2473,7 @@ int r600_suspend(struct radeon_device *rdev)
r600_cp_stop(rdev);
rdev->cp.ready = false;
r600_irq_suspend(rdev);
- r600_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
if (rdev->r600_blit.shader_obj) {
@@ -2616,8 +2570,8 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -2647,8 +2601,8 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
@@ -2983,10 +2937,13 @@ int r600_irq_init(struct radeon_device *rdev)
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
(rb_bufsz << 1));
- /* WPTR writeback, not yet */
- /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
- WREG32(IH_RB_WPTR_ADDR_LO, 0);
- WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+ if (rdev->wb.enabled)
+ ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+ /* set the writeback address whether it's enabled or not */
+ WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
WREG32(IH_RB_CNTL, ih_rb_cntl);
@@ -3070,6 +3027,7 @@ int r600_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3244,8 +3202,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
- /* XXX use writeback */
- wptr = RREG32(IH_RB_WPTR);
+ if (rdev->wb.enabled)
+ wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ else
+ wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt
@@ -3433,6 +3393,7 @@ restart_ih:
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
+ radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: CP EOP\n");
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 3473c00781ff..8362974ef41a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
- /* don't reinitialize blit */
+ /* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
- return 0;
+ goto done;
+
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
@@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev)
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+done:
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
rdev->mc.active_vram_size = rdev->mc.real_vram_size;
return 0;
}
@@ -554,7 +567,7 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev)
{
int r;
r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
@@ -568,7 +581,7 @@ int r600_vb_ib_get(struct radeon_device *rdev)
return 0;
}
-void r600_vb_ib_put(struct radeon_device *rdev)
+static void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
@@ -650,8 +663,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
- src_gpu_addr = src_gpu_addr & ~255;
- dst_gpu_addr = dst_gpu_addr & ~255;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
@@ -672,17 +685,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
WARN_ON(1);
-
-#if 0
- r600_vb_ib_put(rdev);
-
- r600_nomm_put_vb(dev);
- r600_nomm_get_vb(dev);
- if (!dev_priv->blit_vb)
- return;
- set_shaders(dev);
- vb = r600_nomm_get_vb_ptr(dev);
-#endif
}
vb[0] = i2f(dst_x);
@@ -744,8 +746,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
- src_gpu_addr = src_gpu_addr & ~255;
- dst_gpu_addr = dst_gpu_addr & ~255;
+ src_gpu_addr = src_gpu_addr & ~255ULL;
+ dst_gpu_addr = dst_gpu_addr & ~255ULL;
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
@@ -767,17 +769,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
WARN_ON(1);
}
-#if 0
- if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) {
- r600_nomm_put_vb(dev);
- r600_nomm_get_vb(dev);
- if (!rdev->blit_vb)
- return;
-
- set_shaders(dev);
- vb = r600_nomm_get_vb_ptr(dev);
- }
-#endif
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 250a3a918193..37cc2aa9f923 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -170,6 +170,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
struct r600_cs_track *track = p->track;
u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
volatile u32 *ib = p->ib->ptr;
+ unsigned array_mode;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
@@ -185,12 +186,12 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
/* pitch is the number of 8x8 tiles per row */
pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
- height = size / (pitch * 8 * bpe);
+ slice_tile_max *= 64;
+ height = slice_tile_max / (pitch * 8);
if (height > 8192)
height = 8192;
- if (height > 7)
- height &= ~0x7;
- switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
+ array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+ switch (array_mode) {
case V_0280A0_ARRAY_LINEAR_GENERAL:
/* technically height & 0x7 */
break;
@@ -214,6 +215,9 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
__func__, __LINE__, pitch);
return -EINVAL;
}
+ /* avoid breaking userspace */
+ if (height > 7)
+ height &= ~0x7;
if (!IS_ALIGNED(height, 8)) {
dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
__func__, __LINE__, height);
@@ -222,13 +226,13 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
break;
case V_0280A0_ARRAY_2D_TILED_THIN1:
pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
+ (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8;
if (!IS_ALIGNED(pitch, pitch_align)) {
dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
__func__, __LINE__, pitch);
return -EINVAL;
}
- if (!IS_ALIGNED((height / 8), track->nbanks)) {
+ if (!IS_ALIGNED((height / 8), track->npipes)) {
dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
__func__, __LINE__, height);
return -EINVAL;
@@ -243,8 +247,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
/* check offset */
tmp = height * pitch * 8 * bpe;
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
- dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
- return -EINVAL;
+ if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+ /* the initial DDX does bad things with the CB size occasionally */
+ /* it rounds up height too far for slice tile max but the BO is smaller */
+ tmp = (height - 7) * 8 * bpe;
+ if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+ return -EINVAL;
+ }
+ } else {
+ dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+ return -EINVAL;
+ }
}
if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
@@ -296,7 +310,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
/* Check depth buffer */
if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
+ u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max;
if (track->db_bo == NULL) {
dev_warn(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
@@ -340,11 +354,11 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
} else {
size = radeon_bo_size(track->db_bo);
pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
- height = size / (pitch * 8 * bpe);
- height &= ~0x7;
- if (!height)
- height = 8;
-
+ slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / (pitch * 8);
+ if (height > 8192)
+ height = 8192;
switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
case V_028010_ARRAY_1D_TILED_THIN1:
pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
@@ -353,6 +367,8 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
__func__, __LINE__, pitch);
return -EINVAL;
}
+ /* don't break userspace */
+ height &= ~0x7;
if (!IS_ALIGNED(height, 8)) {
dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
__func__, __LINE__, height);
@@ -361,13 +377,13 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
break;
case V_028010_ARRAY_2D_TILED_THIN1:
pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+ (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
if (!IS_ALIGNED(pitch, pitch_align)) {
dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
__func__, __LINE__, pitch);
return -EINVAL;
}
- if ((height / 8) & (track->nbanks - 1)) {
+ if (!IS_ALIGNED((height / 8), track->npipes)) {
dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
__func__, __LINE__, height);
return -EINVAL;
@@ -1138,7 +1154,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
break;
case V_038000_ARRAY_2D_TILED_THIN1:
pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+ (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
if (!IS_ALIGNED(pitch, pitch_align)) {
dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
__func__, __LINE__, pitch);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 858a1920c0d7..966a793e225b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -474,6 +474,7 @@
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
#define VGT_EVENT_INITIATOR 0x28a90
+# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
#define VM_CONTEXT0_CNTL 0x1410
@@ -775,7 +776,27 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - TS events
+ */
#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
#define PACKET3_ONE_REG_WRITE 0x57
#define PACKET3_SET_CONFIG_REG 0x68
#define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9ff38c99a6ea..73f600d39ad4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -88,7 +88,6 @@ extern int radeon_benchmarking;
extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
-extern int radeon_new_pll;
extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
@@ -366,6 +365,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev);
*/
struct radeon_scratch {
unsigned num_reg;
+ uint32_t reg_base;
bool free[32];
uint32_t reg[32];
};
@@ -594,8 +594,15 @@ struct radeon_wb {
struct radeon_bo *wb_obj;
volatile uint32_t *wb;
uint64_t gpu_addr;
+ bool enabled;
+ bool use_event;
};
+#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define R600_WB_IH_WPTR_OFFSET 2048
+#define R600_WB_EVENT_OFFSET 3072
+
/**
* struct radeon_pm - power management datas
* @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
@@ -1124,6 +1131,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
@@ -1341,6 +1354,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
+extern void radeon_wb_fini(struct radeon_device *rdev);
+extern int radeon_wb_init(struct radeon_device *rdev);
+extern void radeon_wb_disable(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
@@ -1425,9 +1441,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int r600_ib_test(struct radeon_device *rdev);
extern int r600_ring_test(struct radeon_device *rdev);
-extern void r600_wb_fini(struct radeon_device *rdev);
-extern int r600_wb_enable(struct radeon_device *rdev);
-extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
@@ -1465,6 +1478,8 @@ extern void r700_cp_stop(struct radeon_device *rdev);
extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
extern int evergreen_irq_set(struct radeon_device *rdev);
+extern int evergreen_blit_init(struct radeon_device *rdev);
+extern void evergreen_blit_fini(struct radeon_device *rdev);
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 25e1dd197791..64fb89ecbf74 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
- .copy_blit = NULL,
- .copy_dma = NULL,
- .copy = NULL,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a5aff755f0d2..740988244143 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_vram_init_sizes(struct radeon_device *rdev);
-void r100_wb_disable(struct radeon_device *rdev);
-void r100_wb_fini(struct radeon_device *rdev);
-int r100_wb_init(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
void r100_restore_sanity(struct radeon_device *rdev);
@@ -257,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
@@ -307,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
+int evergreen_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 8e43ddae70cc..04cac7ec9039 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1112,8 +1112,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family.
*/
- if (!radeon_new_pll)
- p1pll->pll_out_min = 64800;
+ p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -1277,36 +1276,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
return false;
}
-static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
- radeon_encoder
- *encoder,
- int id)
+bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id)
{
- struct drm_device *dev = encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
- uint16_t data_offset;
+ uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev;
- struct radeon_atom_ss *ss = NULL;
- int i;
-
- if (id > ATOM_MAX_SS_ENTRY)
- return NULL;
+ int i, num_indices;
- if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ memset(ss, 0, sizeof(struct radeon_atom_ss));
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
ss_info =
(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
- ss =
- kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
-
- if (!ss)
- return NULL;
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
- for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+ for (i = 0; i < num_indices; i++) {
if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->percentage =
le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
@@ -1315,11 +1305,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
- break;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+union asic_ss_info {
+ struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+ struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+ struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id, u32 clock)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ uint16_t data_offset, size;
+ union asic_ss_info *ss_info;
+ uint8_t frev, crev;
+ int i, num_indices;
+
+ memset(ss, 0, sizeof(struct radeon_atom_ss));
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+
+ ss_info =
+ (union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 1:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+ return true;
+ }
+ }
+ break;
+ case 2:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ return true;
+ }
}
+ break;
+ case 3:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ return true;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+ break;
}
+
}
- return ss;
+ return false;
}
union lvds_info {
@@ -1371,7 +1438,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
- lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+ lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
if (misc & ATOM_VSYNC_POLARITY)
@@ -1388,19 +1455,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
- lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
-
- if (ASIC_IS_AVIVO(rdev)) {
- if (radeon_new_pll == 0)
- lvds->pll_algo = PLL_ALGO_LEGACY;
- else
- lvds->pll_algo = PLL_ALGO_NEW;
- } else {
- if (radeon_new_pll == 1)
- lvds->pll_algo = PLL_ALGO_NEW;
- else
- lvds->pll_algo = PLL_ALGO_LEGACY;
- }
+ lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
encoder->native_mode = lvds->native_mode;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ecc1a8fafbfd..4dac4b0a02ee 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
}
}
+ if (property == rdev->mode_info.underscan_hborder_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_hborder != val) {
+ radeon_encoder->underscan_hborder = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
+ if (property == rdev->mode_info.underscan_vborder_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_vborder != val) {
+ radeon_encoder->underscan_vborder = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.tv_std_property) {
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
if (!encoder) {
@@ -635,6 +663,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
}
} else {
+
+ /* if we aren't forcing don't do destructive polling */
+ if (!force)
+ return connector->status;
+
if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
@@ -822,6 +855,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
goto out;
+ if (!force) {
+ ret = connector->status;
+ goto out;
+ }
+
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -1153,10 +1191,17 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1181,10 +1226,17 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
@@ -1212,10 +1264,17 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
UNDERSCAN_AUTO);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3eef567b0421..017ac54920fb 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
}
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
- uint32_t gpu_addr)
+ uint64_t gpu_addr)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(gpu_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
} else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
}
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
} else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 256d204a6d24..8adfedfe547f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev)
} else {
rdev->scratch.num_reg = 7;
}
+ rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
- rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
+ rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
@@ -149,6 +150,86 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
}
+void radeon_wb_disable(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->wb.wb_obj) {
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ }
+ rdev->wb.enabled = false;
+}
+
+void radeon_wb_fini(struct radeon_device *rdev)
+{
+ radeon_wb_disable(rdev);
+ if (rdev->wb.wb_obj) {
+ radeon_bo_unref(&rdev->wb.wb_obj);
+ rdev->wb.wb = NULL;
+ rdev->wb.wb_obj = NULL;
+ }
+}
+
+int radeon_wb_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->wb.wb_obj == NULL) {
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+ return r;
+ }
+ }
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+
+ /* disable event_write fences */
+ rdev->wb.use_event = false;
+ /* disabled via module param */
+ if (radeon_no_wb == 1)
+ rdev->wb.enabled = false;
+ else {
+ /* often unreliable on AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ rdev->wb.enabled = false;
+ } else {
+ rdev->wb.enabled = true;
+ /* event_write fences are only available on r600+ */
+ if (rdev->family >= CHIP_R600)
+ rdev->wb.use_event = true;
+ }
+ }
+
+ dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+
+ return 0;
+}
+
/**
* radeon_vram_location - try to find VRAM location
* @rdev: radeon device structure holding all necessary informations
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b92d2f2fcbed..0383631da69c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -454,13 +454,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
return n;
}
-static void radeon_compute_pll_legacy(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+void radeon_compute_pll(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
@@ -513,7 +513,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+ for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
uint32_t ref_div;
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -631,214 +631,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
-static bool
-calc_fb_div(struct radeon_pll *pll,
- uint32_t freq,
- uint32_t post_div,
- uint32_t ref_div,
- uint32_t *fb_div,
- uint32_t *fb_div_frac)
-{
- fixed20_12 feedback_divider, a, b;
- u32 vco_freq;
-
- vco_freq = freq * post_div;
- /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
- a.full = dfixed_const(pll->reference_freq);
- feedback_divider.full = dfixed_const(vco_freq);
- feedback_divider.full = dfixed_div(feedback_divider, a);
- a.full = dfixed_const(ref_div);
- feedback_divider.full = dfixed_mul(feedback_divider, a);
-
- if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
- /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
- a.full = dfixed_const(10);
- feedback_divider.full = dfixed_mul(feedback_divider, a);
- feedback_divider.full += dfixed_const_half(0);
- feedback_divider.full = dfixed_floor(feedback_divider);
- feedback_divider.full = dfixed_div(feedback_divider, a);
-
- /* *fb_div = floor(feedback_divider); */
- a.full = dfixed_floor(feedback_divider);
- *fb_div = dfixed_trunc(a);
- /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
- a.full = dfixed_const(10);
- b.full = dfixed_mul(feedback_divider, a);
-
- feedback_divider.full = dfixed_floor(feedback_divider);
- feedback_divider.full = dfixed_mul(feedback_divider, a);
- feedback_divider.full = b.full - feedback_divider.full;
- *fb_div_frac = dfixed_trunc(feedback_divider);
- } else {
- /* *fb_div = floor(feedback_divider + 0.5); */
- feedback_divider.full += dfixed_const_half(0);
- feedback_divider.full = dfixed_floor(feedback_divider);
-
- *fb_div = dfixed_trunc(feedback_divider);
- *fb_div_frac = 0;
- }
-
- if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
- return false;
- else
- return true;
-}
-
-static bool
-calc_fb_ref_div(struct radeon_pll *pll,
- uint32_t freq,
- uint32_t post_div,
- uint32_t *fb_div,
- uint32_t *fb_div_frac,
- uint32_t *ref_div)
-{
- fixed20_12 ffreq, max_error, error, pll_out, a;
- u32 vco;
- u32 pll_out_min, pll_out_max;
-
- if (pll->flags & RADEON_PLL_IS_LCD) {
- pll_out_min = pll->lcd_pll_out_min;
- pll_out_max = pll->lcd_pll_out_max;
- } else {
- pll_out_min = pll->pll_out_min;
- pll_out_max = pll->pll_out_max;
- }
-
- ffreq.full = dfixed_const(freq);
- /* max_error = ffreq * 0.0025; */
- a.full = dfixed_const(400);
- max_error.full = dfixed_div(ffreq, a);
-
- for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
- if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
- vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
- vco = vco / ((*ref_div) * 10);
-
- if ((vco < pll_out_min) || (vco > pll_out_max))
- continue;
-
- /* pll_out = vco / post_div; */
- a.full = dfixed_const(post_div);
- pll_out.full = dfixed_const(vco);
- pll_out.full = dfixed_div(pll_out, a);
-
- if (pll_out.full >= ffreq.full) {
- error.full = pll_out.full - ffreq.full;
- if (error.full <= max_error.full)
- return true;
- }
- }
- }
- return false;
-}
-
-static void radeon_compute_pll_new(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
-{
- u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
- u32 best_freq = 0, vco_frequency;
- u32 pll_out_min, pll_out_max;
-
- if (pll->flags & RADEON_PLL_IS_LCD) {
- pll_out_min = pll->lcd_pll_out_min;
- pll_out_max = pll->lcd_pll_out_max;
- } else {
- pll_out_min = pll->pll_out_min;
- pll_out_max = pll->pll_out_max;
- }
-
- /* freq = freq / 10; */
- do_div(freq, 10);
-
- if (pll->flags & RADEON_PLL_USE_POST_DIV) {
- post_div = pll->post_div;
- if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
- goto done;
-
- vco_frequency = freq * post_div;
- if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
- goto done;
-
- if (pll->flags & RADEON_PLL_USE_REF_DIV) {
- ref_div = pll->reference_div;
- if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
- goto done;
- if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
- goto done;
- }
- } else {
- for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
- if (pll->flags & RADEON_PLL_LEGACY) {
- if ((post_div == 5) ||
- (post_div == 7) ||
- (post_div == 9) ||
- (post_div == 10) ||
- (post_div == 11))
- continue;
- }
-
- if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
- continue;
-
- vco_frequency = freq * post_div;
- if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
- continue;
- if (pll->flags & RADEON_PLL_USE_REF_DIV) {
- ref_div = pll->reference_div;
- if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
- goto done;
- if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
- break;
- } else {
- if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
- break;
- }
- }
- }
-
- best_freq = pll->reference_freq * 10 * fb_div;
- best_freq += pll->reference_freq * fb_div_frac;
- best_freq = best_freq / (ref_div * post_div);
-
-done:
- if (best_freq == 0)
- DRM_ERROR("Couldn't find valid PLL dividers\n");
-
- *dot_clock_p = best_freq / 10;
- *fb_div_p = fb_div;
- *frac_fb_div_p = fb_div_frac;
- *ref_div_p = ref_div;
- *post_div_p = post_div;
-
- DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
-}
-
-void radeon_compute_pll(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
-{
- switch (pll->algo) {
- case PLL_ALGO_NEW:
- radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
- frac_fb_div_p, ref_div_p, post_div_p);
- break;
- case PLL_ALGO_LEGACY:
- default:
- radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
- frac_fb_div_p, ref_div_p, post_div_p);
- break;
- }
-}
-
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -1002,6 +794,24 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
radeon_underscan_enum_list[i].name);
}
+ rdev->mode_info.underscan_hborder_property =
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "underscan hborder", 2);
+ if (!rdev->mode_info.underscan_hborder_property)
+ return -ENOMEM;
+ rdev->mode_info.underscan_hborder_property->values[0] = 0;
+ rdev->mode_info.underscan_hborder_property->values[1] = 128;
+
+ rdev->mode_info.underscan_vborder_property =
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_RANGE,
+ "underscan vborder", 2);
+ if (!rdev->mode_info.underscan_vborder_property)
+ return -ENOMEM;
+ rdev->mode_info.underscan_vborder_property->values[0] = 0;
+ rdev->mode_info.underscan_vborder_property->values[1] = 128;
+
return 0;
}
@@ -1159,8 +969,14 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(radeon_connector->edid) &&
is_hdtv_mode(mode)))) {
- radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
- radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+ if (radeon_encoder->underscan_hborder != 0)
+ radeon_crtc->h_border = radeon_encoder->underscan_hborder;
+ else
+ radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+ if (radeon_encoder->underscan_vborder != 0)
+ radeon_crtc->v_border = radeon_encoder->underscan_vborder;
+ else
+ radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
radeon_crtc->rmx_type = RMX_FULL;
src_v = crtc->mode.vdisplay;
dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
@@ -1195,3 +1011,156 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
return true;
}
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu.
+ *
+ * \param rdev Device to query.
+ * \param crtc Crtc to query.
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * RADEON_SCANOUTPOS_VALID = Query successfull.
+ * RADEON_SCANOUTPOS_INVBL = Inside vblank.
+ * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+{
+ u32 stat_crtc = 0, vbl = 0, position = 0;
+ int vbl_start, vbl_end, vtotal, ret = 0;
+ bool in_vbl = true;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ if (crtc == 0) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 2) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 3) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 4) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 5) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (crtc == 0) {
+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ } else {
+ /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
+ if (crtc == 0) {
+ /* Assume vbl_end == 0, get vbl_start from
+ * upper 16 bits.
+ */
+ vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
+ RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+ /* Only retrieve vpos from upper 16 bits, set hpos == 0. */
+ position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ stat_crtc = RREG32(RADEON_CRTC_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
+ RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+ position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+
+ ret |= RADEON_SCANOUTPOS_VALID;
+ }
+ }
+
+ /* Decode into vertical and horizontal scanout position. */
+ *vpos = position & 0x1fff;
+ *hpos = (position >> 16) & 0x1fff;
+
+ /* Valid vblank area boundaries from gpu retrieved? */
+ if (vbl > 0) {
+ /* Yes: Decode. */
+ ret |= RADEON_SCANOUTPOS_ACCURATE;
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+ }
+ else {
+ /* No: Fake something reasonable which gives at least ok results. */
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+ vbl_end = 0;
+ }
+
+ /* Test scanout position against vblank region. */
+ if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+ in_vbl = false;
+
+ /* Check if inside vblank area and apply corrective offsets:
+ * vpos will then be >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+
+ /* Inside "upper part" of vblank area? Apply corrective offset if so: */
+ if (in_vbl && (*vpos >= vbl_start)) {
+ vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+ *vpos = *vpos - vtotal;
+ }
+
+ /* Correct for shifted end of vbl at vbl_end. */
+ *vpos = *vpos - vbl_end;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= RADEON_SCANOUTPOS_INVBL;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29c1237c2e7b..88e4ea925900 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -47,9 +47,10 @@
* - 2.4.0 - add crtc id query
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+ * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 6
+#define KMS_DRIVER_MINOR 7
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -93,7 +94,6 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_new_pll = -1;
int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
@@ -131,9 +131,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
-MODULE_PARM_DESC(new_pll, "Select new PLL code");
-module_param_named(new_pll, radeon_new_pll, int, 0444);
-
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
@@ -203,8 +200,6 @@ static struct drm_driver driver_old = {
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = {
@@ -291,8 +286,6 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 2c293e8304d6..ae58b6849a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -529,9 +529,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
if (dig->linkb)
@@ -558,18 +558,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
- if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
- if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
- if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
- if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+ if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 40b0c087b592..efa211898fe6 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -59,6 +59,8 @@ static struct fb_ops radeonfb_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index b1f9a81b5d1d..216392d0353b 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -72,7 +72,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
- seq = RREG32(rdev->fence_drv.scratch_reg);
+ if (rdev->wb.enabled) {
+ u32 scratch_index;
+ if (rdev->wb.use_event)
+ scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ else
+ scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+ seq = rdev->wb.wb[scratch_index/4];
+ } else
+ seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 305049afde15..ace2e6384d40 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -348,10 +348,25 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
+ return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
+ struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
uint64_t base;
@@ -364,14 +379,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
DRM_DEBUG_KMS("\n");
/* no fb bound */
- if (!crtc->fb) {
+ if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
- radeon_fb = to_radeon_framebuffer(crtc->fb);
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
format = 2;
break;
@@ -415,10 +437,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
crtc_offset_cntl = 0;
- pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
- crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) +
- ((crtc->fb->bits_per_pixel * 8) - 1)) /
- (crtc->fb->bits_per_pixel * 8));
+ pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
+ ((target_fb->bits_per_pixel * 8) - 1)) /
+ (target_fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
@@ -443,14 +465,14 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
crtc_tile_x0_y0 = x | (y << 16);
base &= ~0x7ff;
} else {
- int byteshift = crtc->fb->bits_per_pixel >> 4;
+ int byteshift = target_fb->bits_per_pixel >> 4;
int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
crtc_offset_cntl |= (y % 16);
}
} else {
int offset = y * pitch_pixels + x;
- switch (crtc->fb->bits_per_pixel) {
+ switch (target_fb->bits_per_pixel) {
case 8:
offset *= 1;
break;
@@ -496,8 +518,8 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
- if (old_fb && old_fb != crtc->fb) {
- radeon_fb = to_radeon_framebuffer(old_fb);
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -717,10 +739,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY;
- if (radeon_new_pll == 1)
- pll->algo = PLL_ALGO_NEW;
- else
- pll->algo = PLL_ALGO_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
@@ -1040,6 +1058,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_fixup = radeon_crtc_mode_fixup,
.mode_set = radeon_crtc_mode_set,
.mode_set_base = radeon_crtc_set_base,
+ .mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 454c1dc7ea45..92457163d070 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -35,6 +35,7 @@
#include <drm_edid.h>
#include <drm_dp_helper.h>
#include <drm_fixed.h>
+#include <drm_crtc_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -149,12 +150,6 @@ struct radeon_tmds_pll {
#define RADEON_PLL_USE_POST_DIV (1 << 12)
#define RADEON_PLL_IS_LCD (1 << 13)
-/* pll algo */
-enum radeon_pll_algo {
- PLL_ALGO_LEGACY,
- PLL_ALGO_NEW
-};
-
struct radeon_pll {
/* reference frequency */
uint32_t reference_freq;
@@ -187,8 +182,6 @@ struct radeon_pll {
/* pll id */
uint32_t id;
- /* pll algo */
- enum radeon_pll_algo algo;
};
struct radeon_i2c_chan {
@@ -240,6 +233,8 @@ struct radeon_mode_info {
struct drm_property *tmds_pll_property;
/* underscan */
struct drm_property *underscan_property;
+ struct drm_property *underscan_hborder_property;
+ struct drm_property *underscan_vborder_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
@@ -335,22 +330,24 @@ struct radeon_encoder_ext_tmds {
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
- uint8_t step;
+ uint16_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
+ /* asic_ss */
+ uint16_t rate;
+ uint16_t amount;
};
struct radeon_encoder_atom_dig {
bool linkb;
/* atom dig */
bool coherent_mode;
- int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
- /* atom lvds */
- uint32_t lvds_misc;
+ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+ /* atom lvds/edp */
+ uint32_t lcd_misc;
uint16_t panel_pwr_delay;
- enum radeon_pll_algo pll_algo;
- struct radeon_atom_ss *ss;
+ uint32_t lcd_ss_id;
/* panel mode */
struct drm_display_mode native_mode;
};
@@ -369,6 +366,8 @@ struct radeon_encoder {
uint32_t pixel_clock;
enum radeon_rmx_type rmx_type;
enum radeon_underscan_type underscan_type;
+ uint32_t underscan_hborder;
+ uint32_t underscan_vborder;
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
@@ -435,6 +434,11 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+/* radeon_get_crtc_scanoutpos() return flags */
+#define RADEON_SCANOUTPOS_VALID (1 << 0)
+#define RADEON_SCANOUTPOS_INVBL (1 << 1)
+#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
+
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
@@ -490,6 +494,13 @@ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id);
+extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id, u32 clock);
+
extern void radeon_compute_pll(struct radeon_pll *pll,
uint64_t freq,
uint32_t *dot_clock_p,
@@ -513,6 +524,10 @@ extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
+extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y,
+ enum mode_set_atomic state);
extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -522,7 +537,13 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
-
+extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y,
+ enum mode_set_atomic state);
+extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic);
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -531,6 +552,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b3b5306bb578..d7ab91416410 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
- bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+ bo->tbo.mem.start << PAGE_SHIFT,
bo->tbo.num_pages << PAGE_SHIFT);
return 0;
}
@@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rdev = rbo->rdev;
if (bo->mem.mem_type == TTM_PL_VRAM) {
size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
if ((offset + size) > rdev->mc.visible_vram_size) {
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
if (unlikely(r != 0))
return r;
- offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
/* this should not happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f87efec76236..8c9b2ef32c68 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
- u32 stat_crtc = 0, vbl = 0, position = 0;
+ int crtc, vpos, hpos, vbl_status;
bool in_vbl = true;
- if (ASIC_IS_DCE4(rdev)) {
- if (rdev->pm.active_crtcs & (1 << 0)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 2)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 3)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 4)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 5)) {
- vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
- EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
- position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
- EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
- }
- } else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->pm.active_crtcs & (1 << 0)) {
- vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
- position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
- position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
- }
- if (position < vbl && position > 1)
- in_vbl = false;
- } else {
- if (rdev->pm.active_crtcs & (1 << 0)) {
- stat_crtc = RREG32(RADEON_CRTC_STATUS);
- if (!(stat_crtc & 1))
- in_vbl = false;
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- stat_crtc = RREG32(RADEON_CRTC2_STATUS);
- if (!(stat_crtc & 1))
+ /* Iterate over all active crtc's. All crtc's must be in vblank,
+ * otherwise return in_vbl == false.
+ */
+ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
+ if (rdev->pm.active_crtcs & (1 << crtc)) {
+ vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
+ if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
+ !(vbl_status & RADEON_SCANOUTPOS_INVBL))
in_vbl = false;
}
}
- if (position < vbl && position > 1)
- in_vbl = false;
-
return in_vbl;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index c332f46340d5..64928814de53 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -2836,6 +2836,7 @@
# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
+# define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27)
# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 261e98a276db..6ea798ce8218 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
*/
void radeon_ring_free_size(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_R600)
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- else
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ if (rdev->wb.enabled)
+ rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+ else {
+ if (rdev->family >= CHIP_R600)
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ else
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ }
/* This works because ring_size is a power of 2 */
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
rdev->cp.ring_free_dw -= rdev->cp.wptr;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a823d8fe54c2..fe95bb35317e 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
+ man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
+ man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- old_start = old_mem->mm_node->start << PAGE_SHIFT;
- new_start = new_mem->mm_node->start << PAGE_SHIFT;
+ old_start = old_mem->start << PAGE_SHIFT;
+ new_start = new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -326,14 +328,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
}
r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
- if (tmp_mem.mm_node) {
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
- spin_lock(&glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&glob->lru_lock);
- return r;
- }
+ ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
@@ -372,14 +367,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
out_cleanup:
- if (tmp_mem.mm_node) {
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
- spin_lock(&glob->lru_lock);
- drm_mm_put_block(tmp_mem.mm_node);
- spin_unlock(&glob->lru_lock);
- return r;
- }
+ ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
@@ -449,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
/* RADEON_IS_AGP is set only if AGP is active */
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
}
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
return -EINVAL;
@@ -699,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
int r;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
+ gtt->offset = bo_mem->start << PAGE_SHIFT;
if (!gtt->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
}
@@ -798,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].show = &radeon_mm_dump_table;
radeon_mem_types_list[i].driver_features = 0;
if (i == 0)
- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
else
- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
}
/* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index f78fd592544d..ac40fd39d787 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -22,6 +22,10 @@ evergreen 0x9400
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D90 SQ_DYN_GPR_OPTIMIZATION
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
+0x00008D9C SQ_DYN_GPR_LDS_LIMIT
0x00008C00 SQ_CONFIG
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
0x00008C08 SQ_GPR_RESOURCE_MGMT_2
@@ -34,6 +38,10 @@ evergreen 0x9400
0x00008C24 SQ_STACK_RESOURCE_MGMT_2
0x00008C28 SQ_STACK_RESOURCE_MGMT_3
0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E2C SQ_LDS_RESOURCE_MGMT
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ae2b76b9a388..f683e51a2a06 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev)
r = rs400_gart_enable(rdev);
if (r)
return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
r100_irq_disable(rdev);
rs400_gart_disable(rdev);
return 0;
@@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
@@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 51d5f7b5ab21..b091a1f6fa4e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -796,6 +796,12 @@ static int rs600_startup(struct radeon_device *rdev)
r = rs600_gart_enable(rdev);
if (r)
return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -805,9 +811,6 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -848,7 +851,7 @@ int rs600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
rs600_gart_disable(rdev);
return 0;
@@ -858,7 +861,7 @@ void rs600_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rs600_gart_fini(rdev);
@@ -932,7 +935,7 @@ int rs600_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 4dc2a87ea680..0137d3e3728d 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -616,6 +616,12 @@ static int rs690_startup(struct radeon_device *rdev)
r = rs400_gart_enable(rdev);
if (r)
return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -625,9 +631,6 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -668,7 +671,7 @@ int rs690_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
rs400_gart_disable(rdev);
return 0;
@@ -678,7 +681,7 @@ void rs690_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
@@ -753,7 +756,7 @@ int rs690_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 4d6e86041a9f..5d569f41f4ae 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
- r = r100_wb_init(rdev);
- if (r)
- dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev)
int rv515_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
- r100_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rv370_pcie_gart_fini(rdev);
@@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
- r100_wb_fini(rdev);
+ radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9490da700749..245374e2b778 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -269,6 +269,7 @@ void r700_cp_stop(struct radeon_device *rdev)
{
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+ WREG32(SCRATCH_UMSK, 0);
}
static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -643,10 +644,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
-
- gb_tiling_config |= GROUP_SIZE(0);
- rdev->config.rv770.tiling_group_size = 256;
-
+ gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+ rdev->config.rv770.tiling_group_size = 512;
+ else
+ rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -1030,19 +1032,12 @@ static int rv770_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
- /* pin copy shader into vram */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
- return r;
- }
- }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1061,8 +1056,7 @@ static int rv770_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev);
if (r)
return r;
- /* write back buffer are not vital so don't worry about failure */
- r600_wb_enable(rdev);
+
return 0;
}
@@ -1108,7 +1102,7 @@ int rv770_suspend(struct radeon_device *rdev)
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_irq_suspend(rdev);
- r600_wb_disable(rdev);
+ radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
if (rdev->r600_blit.shader_obj) {
@@ -1203,8 +1197,8 @@ int rv770_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -1236,8 +1230,8 @@ void rv770_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
r700_cp_fini(rdev);
- r600_wb_fini(rdev);
r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rv770_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 2a2830f5a840..fa64d25d4248 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,8 +42,6 @@ static struct drm_driver driver = {
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
.reclaim_buffers = savage_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = {
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4bb10ef6676a..4caf5d01cfd3 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -67,13 +67,10 @@ static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
.load = sis_driver_load,
.unload = sis_driver_unload,
- .context_dtor = NULL,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = sis_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 640567ef713d..b70fa91d761a 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -42,8 +42,6 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
.reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b256d4adfafe..f3cf6f02c997 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
+ ttm_bo_manager.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4bf69c404491..f999e36f30b4 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend);
+ struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem = agp_be->mem;
int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
- ret = agp_bind_memory(mem, bo_mem->mm_node->start);
+ ret = agp_bind_memory(mem, node->start);
if (ret)
printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index db809e034cc4..a1cb783c7131 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
man->available_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching);
- if (mem_type != TTM_PL_SYSTEM) {
- spin_lock(&bdev->glob->lru_lock);
- drm_mm_debug_table(&man->manager, TTM_PFX);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ if (mem_type != TTM_PL_SYSTEM)
+ (*man->func->debug)(man, TTM_PFX);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -169,18 +166,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
{
-
if (interruptible) {
- int ret = 0;
-
- ret = wait_event_interruptible(bo->event_queue,
+ return wait_event_interruptible(bo->event_queue,
atomic_read(&bo->reserved) == 0);
- if (unlikely(ret != 0))
- return ret;
} else {
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ return 0;
}
- return 0;
}
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
@@ -421,7 +413,7 @@ moved:
if (bo->mem.mm_node) {
spin_lock(&bo->lock);
- bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
spin_unlock(&bo->lock);
@@ -442,135 +434,144 @@ out_err:
}
/**
- * Call bo::reserved and with the lru lock held.
+ * Call bo::reserved.
* Will release GPU memory type usage on destruction.
- * This is the place to put in driver specific hooks.
- * Will release the bo::reserved lock and the
- * lru lock on exit.
+ * This is the place to put in driver specific hooks to release
+ * driver private resources.
+ * Will release the bo::reserved lock.
*/
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
- struct ttm_bo_global *glob = bo->glob;
-
if (bo->ttm) {
-
- /**
- * Release the lru_lock, since we don't want to have
- * an atomic requirement on ttm_tt[unbind|destroy].
- */
-
- spin_unlock(&glob->lru_lock);
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
- spin_lock(&glob->lru_lock);
}
- if (bo->mem.mm_node) {
- drm_mm_put_block(bo->mem.mm_node);
- bo->mem.mm_node = NULL;
- }
+ ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
- spin_unlock(&glob->lru_lock);
}
-
-/**
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, and already on delayed list, do nothing.
- * If not idle, and not on delayed list, put on delayed list,
- * up the list_kref and schedule a delayed list check.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver = bdev->driver;
+ struct ttm_bo_driver *driver;
+ void *sync_obj;
+ void *sync_obj_arg;
+ int put_count;
int ret;
spin_lock(&bo->lock);
-retry:
- (void) ttm_bo_wait(bo, false, false, !remove_all);
-
+ (void) ttm_bo_wait(bo, false, false, true);
if (!bo->sync_obj) {
- int put_count;
-
- spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
/**
- * Someone else has the object reserved. Bail and retry.
+ * Lock inversion between bo::reserve and bo::lock here,
+ * but that's OK, since we're only trylocking.
*/
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- spin_lock(&bo->lock);
- goto requeue;
- }
-
- /**
- * We can re-check for sync object without taking
- * the bo::lock since setting the sync object requires
- * also bo::reserved. A busy object at this point may
- * be caused by another thread starting an accelerated
- * eviction.
- */
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(bo->sync_obj)) {
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
- spin_unlock(&glob->lru_lock);
- spin_lock(&bo->lock);
- if (remove_all)
- goto retry;
- else
- goto requeue;
- }
+ if (unlikely(ret == -EBUSY))
+ goto queue;
+ spin_unlock(&bo->lock);
put_count = ttm_bo_del_from_lru(bo);
- if (!list_empty(&bo->ddestroy)) {
- list_del_init(&bo->ddestroy);
- ++put_count;
- }
-
+ spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- return 0;
+ return;
+ } else {
+ spin_lock(&glob->lru_lock);
}
-requeue:
+queue:
+ sync_obj = bo->sync_obj;
+ sync_obj_arg = bo->sync_obj_arg;
+ driver = bdev->driver;
+
+ kref_get(&bo->list_kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&glob->lru_lock);
+ spin_unlock(&bo->lock);
+
+ if (sync_obj)
+ driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+
+/**
+ * function ttm_bo_cleanup_refs
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, do nothing.
+ *
+ * @interruptible Any sleeps should occur interruptibly.
+ * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
+ * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
+ */
+
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count;
+ int ret = 0;
+
+retry:
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ spin_unlock(&bo->lock);
+
+ if (unlikely(ret != 0))
+ return ret;
+
spin_lock(&glob->lru_lock);
- if (list_empty(&bo->ddestroy)) {
- void *sync_obj = bo->sync_obj;
- void *sync_obj_arg = bo->sync_obj_arg;
+ ret = ttm_bo_reserve_locked(bo, interruptible,
+ no_wait_reserve, false, 0);
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
+ return ret;
+ }
- if (sync_obj)
- driver->sync_obj_flush(sync_obj, sync_obj_arg);
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
- ret = 0;
+ /**
+ * We can re-check for sync object without taking
+ * the bo::lock since setting the sync object requires
+ * also bo::reserved. A busy object at this point may
+ * be caused by another thread recently starting an accelerated
+ * eviction.
+ */
- } else {
+ if (unlikely(bo->sync_obj)) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
- ret = -EBUSY;
+ goto retry;
}
- return ret;
+ put_count = ttm_bo_del_from_lru(bo);
+ list_del_init(&bo->ddestroy);
+ ++put_count;
+
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
+
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+ return 0;
}
/**
@@ -602,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
}
spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(entry, remove_all);
+ ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
+ !remove_all);
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;
@@ -645,7 +647,7 @@ static void ttm_bo_release(struct kref *kref)
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
- ttm_bo_cleanup_refs(bo, false);
+ ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
write_lock(&bdev->vm_lock);
}
@@ -680,7 +682,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
struct ttm_placement placement;
int ret = 0;
@@ -726,12 +727,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
- spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
- spin_unlock(&glob->lru_lock);
+ ttm_bo_mem_put(bo, &evict_mem);
goto out;
}
bo->evicted = true;
@@ -759,6 +755,18 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_cleanup_refs(bo, interruptible,
+ no_wait_reserve, no_wait_gpu);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+
+ if (likely(ret == 0 || ret == -ERESTARTSYS))
+ return ret;
+
+ goto retry;
+ }
+
ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
@@ -792,41 +800,14 @@ retry:
return ret;
}
-static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
- struct ttm_mem_type_manager *man,
- struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- struct drm_mm_node **node)
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
- struct ttm_bo_global *glob = bo->glob;
- unsigned long lpfn;
- int ret;
+ struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
- lpfn = placement->lpfn;
- if (!lpfn)
- lpfn = man->size;
- *node = NULL;
- do {
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret))
- return ret;
-
- spin_lock(&glob->lru_lock);
- *node = drm_mm_search_free_in_range(&man->manager,
- mem->num_pages, mem->page_alignment,
- placement->fpfn, lpfn, 1);
- if (unlikely(*node == NULL)) {
- spin_unlock(&glob->lru_lock);
- return 0;
- }
- *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
- mem->page_alignment,
- placement->fpfn,
- lpfn);
- spin_unlock(&glob->lru_lock);
- } while (*node == NULL);
- return 0;
+ if (mem->mm_node)
+ (*man->func->put_node)(man, mem);
}
+EXPORT_SYMBOL(ttm_bo_mem_put);
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
@@ -843,14 +824,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct drm_mm_node *node;
int ret;
do {
- ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+ ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret != 0))
return ret;
- if (node)
+ if (mem->mm_node)
break;
spin_lock(&glob->lru_lock);
if (list_empty(&man->lru)) {
@@ -863,9 +843,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
if (unlikely(ret != 0))
return ret;
} while (1);
- if (node == NULL)
+ if (mem->mm_node == NULL)
return -ENOMEM;
- mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
}
@@ -939,7 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
bool type_ok = false;
bool has_erestartsys = false;
- struct drm_mm_node *node = NULL;
int i, ret;
mem->mm_node = NULL;
@@ -973,17 +951,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (man->has_type && man->use_type) {
type_found = true;
- ret = ttm_bo_man_get_node(bo, man, placement, mem,
- &node);
+ ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret))
return ret;
}
- if (node)
+ if (mem->mm_node)
break;
}
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
- mem->mm_node = node;
+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
return 0;
@@ -1053,7 +1029,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{
- struct ttm_bo_global *glob = bo->glob;
int ret = 0;
struct ttm_mem_reg mem;
@@ -1081,11 +1056,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
- if (ret && mem.mm_node) {
- spin_lock(&glob->lru_lock);
- drm_mm_put_block(mem.mm_node);
- spin_unlock(&glob->lru_lock);
- }
+ if (ret && mem.mm_node)
+ ttm_bo_mem_put(bo, &mem);
return ret;
}
@@ -1093,11 +1065,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
int i;
- struct drm_mm_node *node = mem->mm_node;
- if (node && placement->lpfn != 0 &&
- (node->start < placement->fpfn ||
- node->start + node->size > placement->lpfn))
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
return -1;
for (i = 0; i < placement->num_placement; i++) {
@@ -1341,7 +1312,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man;
int ret = -EINVAL;
@@ -1364,13 +1334,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
if (mem_type > 0) {
ttm_bo_force_list_clean(bdev, mem_type, false);
- spin_lock(&glob->lru_lock);
- if (drm_mm_clean(&man->manager))
- drm_mm_takedown(&man->manager);
- else
- ret = -EBUSY;
-
- spin_unlock(&glob->lru_lock);
+ ret = (*man->func->takedown)(man);
}
return ret;
@@ -1421,6 +1385,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
return ret;
+ man->bdev = bdev;
ret = 0;
if (type != TTM_PL_SYSTEM) {
@@ -1430,7 +1395,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type);
return ret;
}
- ret = drm_mm_init(&man->manager, 0, p_size);
+
+ ret = (*man->func->init)(man, p_size);
if (ret)
return ret;
}
@@ -1824,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
struct ttm_buffer_object, swap);
kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ spin_unlock(&glob->lru_lock);
+ (void) ttm_bo_cleanup_refs(bo, false, false, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ continue;
+ }
+
/**
* Reserve buffer. Since we unlock while sleeping, we need
* to re-check that nobody removed us from the swap-list while
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 000000000000..7410c190c891
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/module.h>
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_bo_global *glob = man->bdev->glob;
+ struct drm_mm *mm = man->priv;
+ struct drm_mm_node *node = NULL;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ do {
+ ret = drm_mm_pre_get(mm);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&glob->lru_lock);
+ node = drm_mm_search_free_in_range(mm,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(node == NULL)) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ spin_unlock(&glob->lru_lock);
+ } while (node == NULL);
+
+ mem->mm_node = node;
+ mem->start = node->start;
+ return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_bo_global *glob = man->bdev->glob;
+
+ if (mem->mm_node) {
+ spin_lock(&glob->lru_lock);
+ drm_mm_put_block(mem->mm_node);
+ spin_unlock(&glob->lru_lock);
+ mem->mm_node = NULL;
+ }
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct drm_mm *mm;
+ int ret;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return -ENOMEM;
+
+ ret = drm_mm_init(mm, 0, p_size);
+ if (ret) {
+ kfree(mm);
+ return ret;
+ }
+
+ man->priv = mm;
+ return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+ struct ttm_bo_global *glob = man->bdev->glob;
+ struct drm_mm *mm = man->priv;
+ int ret = 0;
+
+ spin_lock(&glob->lru_lock);
+ if (drm_mm_clean(mm)) {
+ drm_mm_takedown(mm);
+ kfree(mm);
+ man->priv = NULL;
+ } else
+ ret = -EBUSY;
+ spin_unlock(&glob->lru_lock);
+ return ret;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct ttm_bo_global *glob = man->bdev->glob;
+ struct drm_mm *mm = man->priv;
+
+ spin_lock(&glob->lru_lock);
+ drm_mm_debug_table(mm, prefix);
+ spin_unlock(&glob->lru_lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+ ttm_bo_man_init,
+ ttm_bo_man_takedown,
+ ttm_bo_man_get_node,
+ ttm_bo_man_put_node,
+ ttm_bo_man_debug
+};
+EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3451a82adba7..3106d5bcce32 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -39,14 +39,7 @@
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- if (old_mem->mm_node) {
- spin_lock(&bo->glob->lru_lock);
- drm_mm_put_block(old_mem->mm_node);
- spin_unlock(&bo->glob->lru_lock);
- }
- old_mem->mm_node = NULL;
+ ttm_bo_mem_put(bo, &bo->mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
@@ -170,7 +163,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
- dst = kmap_atomic_prot(d, KM_USER0, prot);
+ dst = kmap_atomic_prot(d, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot);
@@ -183,7 +176,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
memcpy_fromio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86
- kunmap_atomic(dst, KM_USER0);
+ kunmap_atomic(dst);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst);
@@ -206,7 +199,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
- src = kmap_atomic_prot(s, KM_USER0, prot);
+ src = kmap_atomic_prot(s, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot);
@@ -219,7 +212,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
memcpy_toio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(src);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src);
@@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->mm_node->start <
- old_mem->mm_node->start + old_mem->mm_node->size)) {
+ (new_mem->start < old_mem->start + old_mem->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index b8984a5ae521..e1ff4e7a6eb0 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -51,8 +51,6 @@ static struct drm_driver driver = {
.reclaim_buffers_locked = NULL,
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = via_ioctls,
.fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 4505e17df3f5..c9281a1b1d3b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
- vmwgfx_overlay.o vmwgfx_fence.o
+ vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c4f5114aee7c..80bc37b274e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
+static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
+ TTM_PL_FLAG_CACHED;
+
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = {
.busy_placement = &vram_placement_flags
};
+static uint32_t vram_gmr_placement_flags[] = {
+ TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+ VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_vram_gmr_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 2,
+ .placement = vram_gmr_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &gmr_placement_flags
+};
+
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
@@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = {
struct vmw_ttm_backend {
struct ttm_backend backend;
+ struct page **pages;
+ unsigned long num_pages;
+ struct vmw_private *dev_priv;
+ int gmr_id;
};
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page)
{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_be->pages = pages;
+ vmw_be->num_pages = num_pages;
+
return 0;
}
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
- return 0;
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_be->gmr_id = bo_mem->start;
+
+ return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
+ vmw_be->num_pages, vmw_be->gmr_id);
}
static int vmw_ttm_unbind(struct ttm_backend *backend)
{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
static void vmw_ttm_clear(struct ttm_backend *backend)
{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ vmw_be->pages = NULL;
+ vmw_be->num_pages = 0;
}
static void vmw_ttm_destroy(struct ttm_backend *backend)
@@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
return NULL;
vmw_be->backend.func = &vmw_ttm_func;
+ vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
return &vmw_be->backend;
}
@@ -142,15 +185,28 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
/* System memory */
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
+ man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
+ man->func = &ttm_bo_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_WC;
+ man->available_caching = TTM_PL_FLAG_CACHED;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case VMW_PL_GMR:
+ /*
+ * "Guest Memory Regions" is an aperture like feature with
+ * one slot per bo. There is an upper limit of the number of
+ * slots as well as the bo size.
+ */
+ man->func = &vmw_gmrid_manager_func;
+ man->gpu_offset = 0;
+ man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_CACHED;
+ man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -174,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
-static void vmw_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- if (new_mem->mem_type != TTM_PL_SYSTEM)
- vmw_dmabuf_gmr_unbind(bo);
-}
-
-static void vmw_swap_notify(struct ttm_buffer_object *bo)
-{
- vmw_dmabuf_gmr_unbind(bo);
-}
-
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -200,10 +244,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
- /* System memory */
+ case VMW_PL_GMR:
return 0;
case TTM_PL_VRAM:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start;
mem->bus.is_iomem = true;
break;
@@ -276,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
- .move_notify = vmw_move_notify,
- .swap_notify = vmw_swap_notify,
+ .move_notify = NULL,
+ .swap_notify = NULL,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2ef93df9e8ae..10ca97ee0206 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
idr_init(&dev_priv->context_idr);
idr_init(&dev_priv->surface_idr);
idr_init(&dev_priv->stream_idr);
- ida_init(&dev_priv->gmr_ida);
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
atomic_set(&dev_priv->fence_queue_waiters, 0);
atomic_set(&dev_priv->fifo_queue_waiters, 0);
- INIT_LIST_HEAD(&dev_priv->gmr_lru);
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err2;
}
+ dev_priv->has_gmr = true;
+ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+ dev_priv->max_gmr_ids) != 0) {
+ DRM_INFO("No GMR memory available. "
+ "Graphics memory resources are very limited.\n");
+ dev_priv->has_gmr = false;
+ }
+
dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
dev_priv->mmio_size, DRM_MTRR_WC);
@@ -440,13 +446,14 @@ out_err4:
out_err3:
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
dev_priv->mmio_size, DRM_MTRR_WC);
+ if (dev_priv->has_gmr)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_err2:
(void)ttm_bo_device_release(&dev_priv->bdev);
out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
- ida_destroy(&dev_priv->gmr_ida);
idr_destroy(&dev_priv->surface_idr);
idr_destroy(&dev_priv->context_idr);
idr_destroy(&dev_priv->stream_idr);
@@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev)
iounmap(dev_priv->mmio_virt);
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
dev_priv->mmio_size, DRM_MTRR_WC);
+ if (dev_priv->has_gmr)
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
- ida_destroy(&dev_priv->gmr_ida);
idr_destroy(&dev_priv->surface_idr);
idr_destroy(&dev_priv->context_idr);
idr_destroy(&dev_priv->stream_idr);
@@ -597,6 +605,8 @@ static void vmw_lastclose(struct drm_device *dev)
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
+ INIT_LIST_HEAD(&vmaster->fb_surf);
+ mutex_init(&vmaster->fb_surf_mutex);
}
static int vmw_master_create(struct drm_device *dev,
@@ -608,7 +618,7 @@ static int vmw_master_create(struct drm_device *dev,
if (unlikely(vmaster == NULL))
return -ENOMEM;
- ttm_lock_init(&vmaster->lock);
+ vmw_master_init(vmaster);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
master->driver_priv = vmaster;
@@ -699,6 +709,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+ vmw_kms_idle_workqueues(vmaster);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -751,15 +762,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* Buffer contents is moved to swappable memory.
*/
ttm_bo_swapout_all(&dev_priv->bdev);
+
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
ttm_suspend_unlock(&vmaster->lock);
+
break;
case PM_RESTORE_PREPARE:
break;
- case PM_POST_RESTORE:
- break;
default:
break;
}
@@ -770,21 +782,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* These might not be needed with the virtual SVGA device.
*/
-int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ if (dev_priv->num_3d_resources != 0) {
+ DRM_INFO("Can't suspend or hibernate "
+ "while 3D resources are active.\n");
+ return -EBUSY;
+ }
+
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-int vmw_pci_resume(struct pci_dev *pdev)
+static int vmw_pci_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return pci_enable_device(pdev);
}
+static int vmw_pm_suspend(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct pm_message dummy;
+
+ dummy.event = 0;
+
+ return vmw_pci_suspend(pdev, dummy);
+}
+
+static int vmw_pm_resume(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+
+ return vmw_pci_resume(pdev);
+}
+
+static int vmw_pm_prepare(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ /**
+ * Release 3d reference held by fbdev and potentially
+ * stop fifo.
+ */
+ dev_priv->suspended = true;
+ if (dev_priv->enable_fb)
+ vmw_3d_resource_dec(dev_priv);
+
+ if (dev_priv->num_3d_resources != 0) {
+
+ DRM_INFO("Can't suspend or hibernate "
+ "while 3D resources are active.\n");
+
+ if (dev_priv->enable_fb)
+ vmw_3d_resource_inc(dev_priv);
+ dev_priv->suspended = false;
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void vmw_pm_complete(struct device *kdev)
+{
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ /**
+ * Reclaim 3d reference held by fbdev and potentially
+ * start fifo.
+ */
+ if (dev_priv->enable_fb)
+ vmw_3d_resource_inc(dev_priv);
+
+ dev_priv->suspended = false;
+}
+
+static const struct dev_pm_ops vmw_pm_ops = {
+ .prepare = vmw_pm_prepare,
+ .complete = vmw_pm_complete,
+ .suspend = vmw_pm_suspend,
+ .resume = vmw_pm_resume,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
@@ -798,8 +887,6 @@ static struct drm_driver driver = {
.irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
.reclaim_buffers_locked = NULL,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
.dma_quiescent = NULL, /*vmw_dma_quiescent, */
@@ -821,15 +908,16 @@ static struct drm_driver driver = {
.compat_ioctl = drm_compat_ioctl,
#endif
.llseek = noop_llseek,
- },
+ },
.pci_driver = {
- .name = VMWGFX_DRIVER_NAME,
- .id_table = vmw_pci_id_list,
- .probe = vmw_probe,
- .remove = vmw_remove,
- .suspend = vmw_pci_suspend,
- .resume = vmw_pci_resume
- },
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .driver = {
+ .pm = &vmw_pm_ops
+ }
+ },
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
@@ -863,3 +951,7 @@ module_exit(vmwgfx_exit);
MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
+ __stringify(VMWGFX_DRIVER_MINOR) "."
+ __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
+ "0");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 58de6393f611..e7a58d055041 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,9 +39,9 @@
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
-#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_DATE "20100927"
#define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 2
+#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -49,6 +49,9 @@
#define VMWGFX_MAX_GMRS 2048
#define VMWGFX_MAX_DISPLAYS 16
+#define VMW_PL_GMR TTM_PL_PRIV0
+#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
@@ -57,8 +60,6 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head validate_list;
- struct list_head gmr_lru;
- uint32_t gmr_id;
bool gmr_bound;
uint32_t cur_validate_node;
bool on_validate_list;
@@ -151,6 +152,8 @@ struct vmw_overlay;
struct vmw_master {
struct ttm_lock lock;
+ struct mutex fb_surf_mutex;
+ struct list_head fb_surf;
};
struct vmw_vga_topology_state {
@@ -182,6 +185,7 @@ struct vmw_private {
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
+ bool has_gmr;
struct mutex hw_mutex;
/*
@@ -264,14 +268,6 @@ struct vmw_private {
struct mutex cmdbuf_mutex;
/**
- * GMR management. Protected by the lru spinlock.
- */
-
- struct ida gmr_ida;
- struct list_head gmr_lru;
-
-
- /**
* Operating mode.
*/
@@ -286,6 +282,7 @@ struct vmw_private {
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
+ bool suspended;
struct mutex release_mutex;
uint32_t num_3d_resources;
@@ -331,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv);
*/
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo);
+ struct page *pages[],
+ unsigned long num_pages,
+ int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
@@ -380,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
-extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
-extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
-extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
-extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -439,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
+extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -518,6 +514,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned bbp, unsigned depth);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ uint32_t pitch,
+ uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
/**
@@ -537,6 +537,12 @@ int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
/**
+ * GMR Id manager
+ */
+
+extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+
+/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 8e396850513c..51d9f9f1d7f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
reloc = &sw_context->relocs[i];
validate = &sw_context->val_bufs[reloc->index];
bo = validate->bo;
- reloc->location->offset += bo->offset;
- reloc->location->gmrId = vmw_dmabuf_gmr(bo);
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ reloc->location->offset += bo->offset;
+ reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
+ } else
+ reloc->location->gmrId = bo->mem.start;
}
vmw_free_relocations(sw_context);
}
@@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
int ret;
- if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
- return 0;
-
/**
- * Put BO in VRAM, only if there is space.
+ * Put BO in VRAM if there is space, otherwise as a GMR.
+ * If there is no space in VRAM and GMR ids are all used up,
+ * start evicting GMRs to make room. If the DMA buffer can't be
+ * used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
- if (unlikely(ret == -ERESTARTSYS))
- return ret;
-
- /**
- * Otherwise, set it up as GMR.
- */
-
- if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
- return 0;
-
- ret = vmw_gmr_bind(dev_priv, bo);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
+ DRM_INFO("Falling through to VRAM.\n");
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 409e172f4abf..41d9a5b73c03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
+ info->fix.line_length,
+ var->yoffset + var->yres)) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
SVGAFifoCmdUpdate body;
} *cmd;
+ if (vmw_priv->suspended)
+ return;
+
spin_lock_irqsave(&par->dirty.lock, flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, flags);
@@ -616,7 +626,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
goto err_unlock;
if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.mm_node->start < bo->num_pages)
+ bo->mem.start < bo->num_pages &&
+ bo->mem.start > 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
false, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 5f8908a5d7fd..de0c5948521d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
*/
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
- unsigned long num_pages)
+ unsigned long num_pages)
{
unsigned long prev_pfn = ~(0UL);
unsigned long pfn;
@@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
}
int vmw_gmr_bind(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo)
+ struct page *pages[],
+ unsigned long num_pages,
+ int gmr_id)
{
- struct ttm_tt *ttm = bo->ttm;
- unsigned long descriptors;
- int ret;
- uint32_t id;
struct list_head desc_pages;
+ int ret;
- if (!(dev_priv->capabilities & SVGA_CAP_GMR))
+ if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
return -EINVAL;
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
-
- descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
- if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
+ if (vmw_gmr_count_descriptors(pages, num_pages) >
+ dev_priv->max_gmr_descriptors)
return -EINVAL;
INIT_LIST_HEAD(&desc_pages);
- ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
- ttm->num_pages);
- if (unlikely(ret != 0))
- return ret;
- ret = vmw_gmr_id_alloc(dev_priv, &id);
+ ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
if (unlikely(ret != 0))
- goto out_no_id;
+ return ret;
- vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
+ vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
vmw_gmr_free_descriptors(&desc_pages);
- vmw_dmabuf_set_gmr(bo, id);
- return 0;
-out_no_id:
- vmw_gmr_free_descriptors(&desc_pages);
- return ret;
+ return 0;
}
+
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
mutex_lock(&dev_priv->hw_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
new file mode 100644
index 000000000000..ac6e0d1bd629
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+
+struct vmwgfx_gmrid_man {
+ spinlock_t lock;
+ struct ida gmr_ida;
+ uint32_t max_gmr_ids;
+};
+
+static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct vmwgfx_gmrid_man *gman =
+ (struct vmwgfx_gmrid_man *)man->priv;
+ int ret;
+ int id;
+
+ mem->mm_node = NULL;
+
+ do {
+ if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ spin_lock(&gman->lock);
+ ret = ida_get_new(&gman->gmr_ida, &id);
+
+ if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
+ ida_remove(&gman->gmr_ida, id);
+ spin_unlock(&gman->lock);
+ return 0;
+ }
+
+ spin_unlock(&gman->lock);
+
+ } while (ret == -EAGAIN);
+
+ if (likely(ret == 0)) {
+ mem->mm_node = gman;
+ mem->start = id;
+ }
+
+ return ret;
+}
+
+static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct vmwgfx_gmrid_man *gman =
+ (struct vmwgfx_gmrid_man *)man->priv;
+
+ if (mem->mm_node) {
+ spin_lock(&gman->lock);
+ ida_remove(&gman->gmr_ida, mem->start);
+ spin_unlock(&gman->lock);
+ mem->mm_node = NULL;
+ }
+}
+
+static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct vmwgfx_gmrid_man *gman =
+ kzalloc(sizeof(*gman), GFP_KERNEL);
+
+ if (unlikely(gman == NULL))
+ return -ENOMEM;
+
+ spin_lock_init(&gman->lock);
+ ida_init(&gman->gmr_ida);
+ gman->max_gmr_ids = p_size;
+ man->priv = (void *) gman;
+ return 0;
+}
+
+static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
+{
+ struct vmwgfx_gmrid_man *gman =
+ (struct vmwgfx_gmrid_man *)man->priv;
+
+ if (gman) {
+ ida_destroy(&gman->gmr_ida);
+ kfree(gman);
+ }
+ return 0;
+}
+
+static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ printk(KERN_INFO "%s: No debug info available for the GMR "
+ "id manager.\n", prefix);
+}
+
+const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
+ vmw_gmrid_man_init,
+ vmw_gmrid_man_takedown,
+ vmw_gmrid_man_get_node,
+ vmw_gmrid_man_put_node,
+ vmw_gmrid_man_debug
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 1c7a316454d8..570d57775a58 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_CAPS:
param->value = dev_priv->fifo.capabilities;
break;
+ case DRM_VMW_PARAM_MAX_FB_SIZE:
+ param->value = dev_priv->vram_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e882ba099f0c..87c6e6156d7d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface {
struct delayed_work d_work;
struct mutex work_lock;
bool present_fs;
+ struct list_head head;
+ struct drm_master *master;
};
+/**
+ * vmw_kms_idle_workqueues - Flush workqueues on this master
+ *
+ * @vmaster - Pointer identifying the master, for the surfaces of which
+ * we idle the dirty work queues.
+ *
+ * This function should be called with the ttm lock held in exclusive mode
+ * to idle all dirty work queues before the fifo is taken down.
+ *
+ * The work task may actually requeue itself, but after the flush returns we're
+ * sure that there's nothing to present, since the ttm lock is held in
+ * exclusive mode, so the fifo will never get used.
+ */
+
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
+{
+ struct vmw_framebuffer_surface *entry;
+
+ mutex_lock(&vmaster->fb_surf_mutex);
+ list_for_each_entry(entry, &vmaster->fb_surf, head) {
+ if (cancel_delayed_work_sync(&entry->d_work))
+ (void) entry->d_work.work.func(&entry->d_work.work);
+
+ (void) cancel_delayed_work_sync(&entry->d_work);
+ }
+ mutex_unlock(&vmaster->fb_surf_mutex);
+}
+
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
- struct vmw_framebuffer_surface *vfb =
+ struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_master *vmaster = vmw_master(vfbs->master);
+
- cancel_delayed_work_sync(&vfb->d_work);
+ mutex_lock(&vmaster->fb_surf_mutex);
+ list_del(&vfbs->head);
+ mutex_unlock(&vmaster->fb_surf_mutex);
+
+ cancel_delayed_work_sync(&vfbs->d_work);
+ drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
- vmw_surface_unreference(&vfb->surface);
+ vmw_surface_unreference(&vfbs->surface);
- kfree(framebuffer);
+ kfree(vfbs);
}
static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
SVGA3dCopyRect cr;
} *cmd;
+ /**
+ * Strictly we should take the ttm_lock in read mode before accessing
+ * the fifo, to make sure the fifo is present and up. However,
+ * instead we flush all workqueues under the ttm lock in exclusive mode
+ * before taking down the fifo.
+ */
mutex_lock(&vfbs->work_lock);
if (!vfbs->present_fs)
goto out_unlock;
@@ -392,17 +435,20 @@ out_unlock:
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct vmw_surface *surf = vfbs->surface;
struct drm_clip_rect norect;
SVGA3dCopyRect *cr;
int i, inc = 1;
+ int ret;
struct {
SVGA3dCmdHeader header;
@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
SVGA3dCopyRect cr;
} *cmd;
+ if (unlikely(vfbs->master != file_priv->master))
+ return -EINVAL;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
if (!num_clips ||
!(dev_priv->fifo.capabilities &
SVGA_FIFO_CAP_SCREEN_OBJECT)) {
@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
*/
vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
}
+ ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
+ ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
}
vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
-
+ ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.create_handle = vmw_framebuffer_create_handle,
};
-int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct vmw_surface *surface,
- struct vmw_framebuffer **out,
- unsigned width, unsigned height)
+static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_surface *surface,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd
+ *mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
+ enum SVGA3dSurfaceFormat format;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
+ /*
+ * Sanity checks.
+ */
+
+ if (unlikely(surface->mip_levels[0] != 1 ||
+ surface->num_sizes != 1 ||
+ surface->sizes[0].width < mode_cmd->width ||
+ surface->sizes[0].height < mode_cmd->height ||
+ surface->sizes[0].depth != 1)) {
+ DRM_ERROR("Incompatible surface dimensions "
+ "for requested mode.\n");
+ return -EINVAL;
+ }
+
+ switch (mode_cmd->depth) {
+ case 32:
+ format = SVGA3D_A8R8G8B8;
+ break;
+ case 24:
+ format = SVGA3D_X8R8G8B8;
+ break;
+ case 16:
+ format = SVGA3D_R5G6B5;
+ break;
+ case 15:
+ format = SVGA3D_A1R5G5B5;
+ break;
+ default:
+ DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ return -EINVAL;
+ }
+
+ if (unlikely(format != surface->format)) {
+ DRM_ERROR("Invalid surface format for requested mode.\n");
+ return -EINVAL;
+ }
+
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
}
/* XXX get the first 3 from the surface info */
- vfbs->base.base.bits_per_pixel = 32;
- vfbs->base.base.pitch = width * 32 / 4;
- vfbs->base.base.depth = 24;
- vfbs->base.base.width = width;
- vfbs->base.base.height = height;
+ vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
+ vfbs->base.base.pitch = mode_cmd->pitch;
+ vfbs->base.base.depth = mode_cmd->depth;
+ vfbs->base.base.width = mode_cmd->width;
+ vfbs->base.base.height = mode_cmd->height;
vfbs->base.pin = &vmw_surface_dmabuf_pin;
vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
+ vfbs->master = drm_master_get(file_priv->master);
mutex_init(&vfbs->work_lock);
+
+ mutex_lock(&vmaster->fb_surf_mutex);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
+ list_add_tail(&vfbs->head, &vmaster->fb_surf);
+ mutex_unlock(&vmaster->fb_surf_mutex);
+
*out = &vfbs->base;
return 0;
@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
}
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_clip_rect norect;
+ int ret;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
int i, increment = 1;
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
+ ttm_read_unlock(&vmaster->lock);
return -ENOMEM;
}
@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
}
vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
+ ttm_read_unlock(&vmaster->lock);
return 0;
}
@@ -659,16 +770,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
}
-int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *dmabuf,
- struct vmw_framebuffer **out,
- unsigned width, unsigned height)
+static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd
+ *mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
+ unsigned int requested_size;
int ret;
+ requested_size = mode_cmd->height * mode_cmd->pitch;
+ if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+ DRM_ERROR("Screen buffer object size is too small "
+ "for requested mode.\n");
+ return -EINVAL;
+ }
+
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
@@ -685,12 +805,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err3;
}
- /* XXX get the first 3 from the surface info */
- vfbd->base.base.bits_per_pixel = 32;
- vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
- vfbd->base.base.depth = 24;
- vfbd->base.base.width = width;
- vfbd->base.base.height = height;
+ vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
+ vfbd->base.base.pitch = mode_cmd->pitch;
+ vfbd->base.base.depth = mode_cmd->depth;
+ vfbd->base.base.width = mode_cmd->width;
+ vfbd->base.base.height = mode_cmd->height;
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
vfbd->buffer = dmabuf;
@@ -719,8 +838,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
+ u64 required_size;
int ret;
+ /**
+ * This code should be conditioned on Screen Objects not being used.
+ * If screen objects are used, we can allocate a GMR to hold the
+ * requested framebuffer.
+ */
+
+ required_size = mode_cmd->pitch * mode_cmd->height;
+ if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+ DRM_ERROR("VRAM size is too small for requested mode.\n");
+ return NULL;
+ }
+
+ /**
+ * End conditioned code.
+ */
+
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
mode_cmd->handle, &surface);
if (ret)
@@ -729,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (!surface->scanout)
goto err_not_scanout;
- ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
- mode_cmd->width, mode_cmd->height);
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
+ &vfb, mode_cmd);
/* vmw_user_surface_lookup takes one ref so does new_fb */
vmw_surface_unreference(&surface);
@@ -751,7 +887,7 @@ try_dmabuf:
}
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd->width, mode_cmd->height);
+ mode_cmd);
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
vmw_dmabuf_unreference(&bo);
@@ -889,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_priv->num_displays = vmw_read(vmw_priv,
SVGA_REG_NUM_GUEST_DISPLAYS);
+ if (vmw_priv->num_displays == 0)
+ vmw_priv->num_displays = 1;
+
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
@@ -997,6 +1136,13 @@ out_unlock:
return ret;
}
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+ uint32_t pitch,
+ uint32_t height)
+{
+ return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+}
+
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
{
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 11cb39e3accb..a01c47ddb5bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
{
struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
mode->hdisplay = ldu->pref_width;
mode->vdisplay = ldu->pref_height;
mode->vrefresh = drm_mode_vrefresh(mode);
- drm_mode_probed_add(connector, mode);
+ if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
- if (ldu->pref_mode) {
- list_del_init(&ldu->pref_mode->head);
- drm_mode_destroy(dev, ldu->pref_mode);
- }
+ if (ldu->pref_mode) {
+ list_del_init(&ldu->pref_mode->head);
+ drm_mode_destroy(dev, ldu->pref_mode);
+ }
- ldu->pref_mode = mode;
+ ldu->pref_mode = mode;
+ }
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
- if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
- vmw_ldu_connector_builtin[i].vdisplay > max_height)
+ bmode = &vmw_ldu_connector_builtin[i];
+ if (bmode->hdisplay > max_width ||
+ bmode->vdisplay > max_height)
+ continue;
+
+ if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+ bmode->vdisplay))
continue;
- mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
+ mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
mode->vrefresh = drm_mode_vrefresh(mode);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c8c40e9979db..36e129f0023f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
return bo_user_size + page_array_size;
}
-void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- struct vmw_private *dev_priv =
- container_of(bo->bdev, struct vmw_private, bdev);
-
- if (vmw_bo->gmr_bound) {
- vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
- spin_lock(&glob->lru_lock);
- ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
- spin_unlock(&glob->lru_lock);
- vmw_bo->gmr_bound = false;
- }
-}
-
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
struct ttm_bo_global *glob = bo->glob;
- vmw_dmabuf_gmr_unbind(bo);
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo);
}
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
memset(vmw_bo, 0, sizeof(*vmw_bo));
- INIT_LIST_HEAD(&vmw_bo->gmr_lru);
INIT_LIST_HEAD(&vmw_bo->validate_list);
- vmw_bo->gmr_id = 0;
- vmw_bo->gmr_bound = false;
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
struct ttm_bo_global *glob = bo->glob;
- vmw_dmabuf_gmr_unbind(bo);
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_user_bo);
}
@@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
vmw_bo->on_validate_list = false;
}
-uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo;
-
- if (bo->mem.mem_type == TTM_PL_VRAM)
- return SVGA_GMR_FRAMEBUFFER;
-
- vmw_bo = vmw_dma_buffer(bo);
-
- return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
-}
-
-void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- vmw_bo->gmr_bound = true;
- vmw_bo->gmr_id = id;
-}
-
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
@@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return 0;
}
-/**
- * TODO: Implement a gmr id eviction mechanism. Currently we just fail
- * when we're out of ids, causing GMR space to be allocated
- * out of VRAM.
- */
-
-int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
-{
- struct ttm_bo_global *glob = dev_priv->bdev.glob;
- int id;
- int ret;
-
- do {
- if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- spin_lock(&glob->lru_lock);
- ret = ida_get_new(&dev_priv->gmr_ida, &id);
- spin_unlock(&glob->lru_lock);
- } while (ret == -EAGAIN);
-
- if (unlikely(ret != 0))
- return ret;
-
- if (unlikely(id >= dev_priv->max_gmr_ids)) {
- spin_lock(&glob->lru_lock);
- ida_remove(&dev_priv->gmr_ida, id);
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
- *p_id = (uint32_t) id;
- return 0;
-}
-
/*
* Stream management
*/
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
new file mode 100644
index 000000000000..742c423567cf
--- /dev/null
+++ b/drivers/gpu/stub/Kconfig
@@ -0,0 +1,13 @@
+config STUB_POULSBO
+ tristate "Intel GMA500 Stub Driver"
+ depends on PCI
+ # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select ACPI_VIDEO if ACPI
+ help
+ Choose this option if you have a system that has Intel GMA500
+ (Poulsbo) integrated graphics. If M is selected, the module will
+ be called Poulsbo. This driver is a stub driver for Poulsbo that
+ will call poulsbo.ko to enable the acpi backlight control sysfs
+ entry file because there have no poulsbo native driver can support
+ intel opregion.
diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile
new file mode 100644
index 000000000000..cd940cc9d36d
--- /dev/null
+++ b/drivers/gpu/stub/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_STUB_POULSBO) += poulsbo.o
diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c
new file mode 100644
index 000000000000..7edfd27b8dee
--- /dev/null
+++ b/drivers/gpu/stub/poulsbo.c
@@ -0,0 +1,64 @@
+/*
+ * Intel Poulsbo Stub driver
+ *
+ * Copyright (C) 2010 Novell <jlee@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#define DRIVER_NAME "poulsbo"
+
+enum {
+ CHIP_PSB_8108 = 0,
+ CHIP_PSB_8109 = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
+ {0, 0, 0}
+};
+
+static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return acpi_video_register();
+}
+
+static void poulsbo_remove(struct pci_dev *pdev)
+{
+ acpi_video_unregister();
+}
+
+static struct pci_driver poulsbo_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = poulsbo_probe,
+ .remove = poulsbo_remove,
+};
+
+static int __init poulsbo_init(void)
+{
+ return pci_register_driver(&poulsbo_driver);
+}
+
+static void __exit poulsbo_exit(void)
+{
+ pci_unregister_driver(&poulsbo_driver);
+}
+
+module_init(poulsbo_init);
+module_exit(poulsbo_exit);
+
+MODULE_AUTHOR("Lee, Chun-Yi <jlee@novell.com>");
+MODULE_DESCRIPTION("Poulsbo Stub Driver");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6539ac2907e9..fd455a2fdd12 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -95,9 +95,9 @@ config I2C_I801
ESB2
ICH8
ICH9
- Tolapai
+ EP80579 (Tolapai)
ICH10
- 3400/5 Series (PCH)
+ 5/3400 Series (PCH)
Cougar Point (PCH)
This driver can also be built as a module. If so, the module
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c60081169cc3..59d65981eed7 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -38,10 +38,10 @@
82801G (ICH7) 0x27da 32 hard yes yes yes
82801H (ICH8) 0x283e 32 hard yes yes yes
82801I (ICH9) 0x2930 32 hard yes yes yes
- Tolapai 0x5032 32 hard yes yes yes
+ EP80579 (Tolapai) 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
- 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
+ 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes
Cougar Point (PCH) 0x1c22 32 hard yes yes yes
Features supported by this driver:
@@ -587,11 +587,11 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 4cb4bb009950..53fab518b3da 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -560,7 +560,8 @@ static const struct pci_device_id scx200_pci[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
.driver_data = 1 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
- .driver_data = 2 }
+ .driver_data = 2 },
+ { 0, }
};
static struct {
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cb3ccf3ed221..41665d2f9f93 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -74,7 +74,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
static unsigned int mwait_substates;
/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
-static unsigned int lapic_timer_reliable_states;
+static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
@@ -94,7 +94,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 3,
- .power_usage = 1000,
.target_residency = 6,
.enter = &intel_idle },
{ /* MWAIT C2 */
@@ -103,7 +102,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
- .power_usage = 500,
.target_residency = 80,
.enter = &intel_idle },
{ /* MWAIT C3 */
@@ -112,11 +110,46 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
- .power_usage = 350,
.target_residency = 800,
.enter = &intel_idle },
};
+static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "SNB-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "SNB-C3",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 80,
+ .target_residency = 160,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */
+ .name = "SNB-C6",
+ .desc = "MWAIT 0x20",
+ .driver_data = (void *) 0x20,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 104,
+ .target_residency = 208,
+ .enter = &intel_idle },
+ { /* MWAIT C4 */
+ .name = "SNB-C7",
+ .desc = "MWAIT 0x30",
+ .driver_data = (void *) 0x30,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 109,
+ .target_residency = 300,
+ .enter = &intel_idle },
+};
+
static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C0 */ },
{ /* MWAIT C1 */
@@ -125,7 +158,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
- .power_usage = 1000,
.target_residency = 4,
.enter = &intel_idle },
{ /* MWAIT C2 */
@@ -134,7 +166,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 20,
- .power_usage = 500,
.target_residency = 80,
.enter = &intel_idle },
{ /* MWAIT C3 */ },
@@ -144,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
- .power_usage = 250,
.target_residency = 400,
.enter = &intel_idle },
{ /* MWAIT C5 */ },
@@ -154,7 +184,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.driver_data = (void *) 0x52,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
- .power_usage = 150,
.target_residency = 560,
.enter = &intel_idle },
};
@@ -179,13 +208,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
local_irq_disable();
/*
- * If the state flag indicates that the TLB will be flushed or if this
- * is the deepest c-state supported, do a voluntary leave mm to avoid
- * costly and mostly unnecessary wakeups for flushing the user TLB's
- * associated with the active mm.
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
*/
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
- (&dev->states[dev->state_count - 1] == state))
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
@@ -269,9 +295,14 @@ static int intel_idle_probe(void)
case 0x1C: /* 28 - Atom Processor */
case 0x26: /* 38 - Lincroft Atom Processor */
- lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+ lapic_timer_reliable_states = (1 << 1); /* C1 */
cpuidle_state_table = atom_cstates;
break;
+
+ case 0x2A: /* SNB */
+ case 0x2D: /* SNB Xeon */
+ cpuidle_state_table = snb_cstates;
+ break;
#ifdef FUTURE_USE
case 0x17: /* 23 - Core 2 Duo */
lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index ae7c2880e624..91916a8d5de4 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -59,8 +59,8 @@ __ib_get_agent_port(struct ib_device *device, int port_num)
struct ib_agent_port_private *entry;
list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if (entry->agent[0]->device == device &&
- entry->agent[0]->port_num == port_num)
+ if (entry->agent[1]->device == device &&
+ entry->agent[1]->port_num == port_num)
return entry;
}
return NULL;
@@ -155,14 +155,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
goto error1;
}
- /* Obtain send only MAD agent for SMI QP */
- port_priv->agent[0] = ib_register_mad_agent(device, port_num,
- IB_QPT_SMI, NULL, 0,
- &agent_send_handler,
- NULL, NULL);
- if (IS_ERR(port_priv->agent[0])) {
- ret = PTR_ERR(port_priv->agent[0]);
- goto error2;
+ if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
+ /* Obtain send only MAD agent for SMI QP */
+ port_priv->agent[0] = ib_register_mad_agent(device, port_num,
+ IB_QPT_SMI, NULL, 0,
+ &agent_send_handler,
+ NULL, NULL);
+ if (IS_ERR(port_priv->agent[0])) {
+ ret = PTR_ERR(port_priv->agent[0]);
+ goto error2;
+ }
}
/* Obtain send only MAD agent for GSI QP */
@@ -182,7 +184,8 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
return 0;
error3:
- ib_unregister_mad_agent(port_priv->agent[0]);
+ if (port_priv->agent[0])
+ ib_unregister_mad_agent(port_priv->agent[0]);
error2:
kfree(port_priv);
error1:
@@ -205,7 +208,9 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
ib_unregister_mad_agent(port_priv->agent[1]);
- ib_unregister_mad_agent(port_priv->agent[0]);
+ if (port_priv->agent[0])
+ ib_unregister_mad_agent(port_priv->agent[0]);
+
kfree(port_priv);
return 0;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b930b8110a63..6884da24fde1 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -59,6 +59,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
+#define CMA_IBOE_PACKET_LIFETIME 18
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device);
@@ -157,6 +158,7 @@ struct cma_multicast {
struct list_head list;
void *context;
struct sockaddr_storage addr;
+ struct kref mcref;
};
struct cma_work {
@@ -173,6 +175,12 @@ struct cma_ndev_work {
struct rdma_cm_event event;
};
+struct iboe_mcast_work {
+ struct work_struct work;
+ struct rdma_id_private *id;
+ struct cma_multicast *mc;
+};
+
union cma_ip_addr {
struct in6_addr ip6;
struct {
@@ -281,6 +289,8 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
atomic_inc(&cma_dev->refcount);
id_priv->cma_dev = cma_dev;
id_priv->id.device = cma_dev->device;
+ id_priv->id.route.addr.dev_addr.transport =
+ rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
}
@@ -290,6 +300,14 @@ static inline void cma_deref_dev(struct cma_device *cma_dev)
complete(&cma_dev->comp);
}
+static inline void release_mc(struct kref *kref)
+{
+ struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
+
+ kfree(mc->multicast.ib);
+ kfree(mc);
+}
+
static void cma_detach_from_dev(struct rdma_id_private *id_priv)
{
list_del(&id_priv->list);
@@ -323,22 +341,63 @@ static int cma_set_qkey(struct rdma_id_private *id_priv)
return ret;
}
+static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
+{
+ int i;
+ int err;
+ struct ib_port_attr props;
+ union ib_gid tmp;
+
+ err = ib_query_port(device, port_num, &props);
+ if (err)
+ return 1;
+
+ for (i = 0; i < props.gid_tbl_len; ++i) {
+ err = ib_query_gid(device, port_num, i, &tmp);
+ if (err)
+ return 1;
+ if (!memcmp(&tmp, gid, sizeof tmp))
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
static int cma_acquire_dev(struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct cma_device *cma_dev;
- union ib_gid gid;
+ union ib_gid gid, iboe_gid;
int ret = -ENODEV;
+ u8 port;
+ enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
+ IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
- rdma_addr_get_sgid(dev_addr, &gid);
+ iboe_addr_get_sgid(dev_addr, &iboe_gid);
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof gid);
list_for_each_entry(cma_dev, &dev_list, list) {
- ret = ib_find_cached_gid(cma_dev->device, &gid,
- &id_priv->id.port_num, NULL);
- if (!ret) {
- cma_attach_to_dev(id_priv, cma_dev);
- break;
+ for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+ if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
+ if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
+ rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
+ ret = find_gid_port(cma_dev->device, &iboe_gid, port);
+ else
+ ret = find_gid_port(cma_dev->device, &gid, port);
+
+ if (!ret) {
+ id_priv->id.port_num = port;
+ goto out;
+ } else if (ret == 1)
+ break;
+ }
}
}
+
+out:
+ if (!ret)
+ cma_attach_to_dev(id_priv, cma_dev);
+
return ret;
}
@@ -556,10 +615,16 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int ret;
+ u16 pkey;
+
+ if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
+ IB_LINK_LAYER_INFINIBAND)
+ pkey = ib_addr_get_pkey(dev_addr);
+ else
+ pkey = 0xffff;
ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
- ib_addr_get_pkey(dev_addr),
- &qp_attr->pkey_index);
+ pkey, &qp_attr->pkey_index);
if (ret)
return ret;
@@ -737,8 +802,8 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
static void cma_cancel_route(struct rdma_id_private *id_priv)
{
- switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
if (id_priv->query)
ib_sa_cancel_query(id_priv->query_id, id_priv->query);
break;
@@ -816,8 +881,17 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
mc = container_of(id_priv->mc_list.next,
struct cma_multicast, list);
list_del(&mc->list);
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
+ switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ib_sa_free_multicast(mc->multicast.ib);
+ kfree(mc);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ kref_put(&mc->mcref, release_mc);
+ break;
+ default:
+ break;
+ }
}
}
@@ -833,7 +907,7 @@ void rdma_destroy_id(struct rdma_cm_id *id)
mutex_lock(&lock);
if (id_priv->cma_dev) {
mutex_unlock(&lock);
- switch (rdma_node_get_transport(id->device->node_type)) {
+ switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB:
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -1708,6 +1782,81 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
return 0;
}
+static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+{
+ struct rdma_route *route = &id_priv->id.route;
+ struct rdma_addr *addr = &route->addr;
+ struct cma_work *work;
+ int ret;
+ struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr;
+ struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr;
+ struct net_device *ndev = NULL;
+ u16 vid;
+
+ if (src_addr->sin_family != dst_addr->sin_family)
+ return -EINVAL;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+
+ route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
+ if (!route->path_rec) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ route->num_paths = 1;
+
+ if (addr->dev_addr.bound_dev_if)
+ ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
+
+ vid = rdma_vlan_dev_vlan_id(ndev);
+
+ iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
+ iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
+
+ route->path_rec->hop_limit = 1;
+ route->path_rec->reversible = 1;
+ route->path_rec->pkey = cpu_to_be16(0xffff);
+ route->path_rec->mtu_selector = IB_SA_EQ;
+ route->path_rec->sl = id_priv->tos >> 5;
+
+ route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+ route->path_rec->rate_selector = IB_SA_EQ;
+ route->path_rec->rate = iboe_get_rate(ndev);
+ dev_put(ndev);
+ route->path_rec->packet_life_time_selector = IB_SA_EQ;
+ route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ if (!route->path_rec->mtu) {
+ ret = -EINVAL;
+ goto err2;
+ }
+
+ work->old_state = CMA_ROUTE_QUERY;
+ work->new_state = CMA_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ work->event.status = 0;
+
+ queue_work(cma_wq, &work->work);
+
+ return 0;
+
+err2:
+ kfree(route->path_rec);
+ route->path_rec = NULL;
+err1:
+ kfree(work);
+ return ret;
+}
+
int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
{
struct rdma_id_private *id_priv;
@@ -1720,7 +1869,16 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
atomic_inc(&id_priv->refcount);
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = cma_resolve_ib_route(id_priv, timeout_ms);
+ switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ret = cma_resolve_ib_route(id_priv, timeout_ms);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ ret = cma_resolve_iboe_route(id_priv);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
break;
case RDMA_TRANSPORT_IWARP:
ret = cma_resolve_iw_route(id_priv, timeout_ms);
@@ -1773,7 +1931,7 @@ port_found:
goto out;
id_priv->id.route.addr.dev_addr.dev_type =
- (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ?
+ (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
ARPHRD_INFINIBAND : ARPHRD_ETHER;
rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@ -2758,6 +2916,102 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
return 0;
}
+static void iboe_mcast_work_handler(struct work_struct *work)
+{
+ struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
+ struct cma_multicast *mc = mw->mc;
+ struct ib_sa_multicast *m = mc->multicast.ib;
+
+ mc->multicast.ib->context = mc;
+ cma_ib_mc_handler(0, m);
+ kref_put(&mc->mcref, release_mc);
+ kfree(mw);
+}
+
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+
+ if (cma_any_addr(addr)) {
+ memset(mgid, 0, sizeof *mgid);
+ } else if (addr->sa_family == AF_INET6) {
+ memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
+ } else {
+ mgid->raw[0] = 0xff;
+ mgid->raw[1] = 0x0e;
+ mgid->raw[2] = 0;
+ mgid->raw[3] = 0;
+ mgid->raw[4] = 0;
+ mgid->raw[5] = 0;
+ mgid->raw[6] = 0;
+ mgid->raw[7] = 0;
+ mgid->raw[8] = 0;
+ mgid->raw[9] = 0;
+ mgid->raw[10] = 0xff;
+ mgid->raw[11] = 0xff;
+ *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
+ }
+}
+
+static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
+{
+ struct iboe_mcast_work *work;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ int err;
+ struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+ struct net_device *ndev = NULL;
+
+ if (cma_zero_addr((struct sockaddr *)&mc->addr))
+ return -EINVAL;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
+ if (!mc->multicast.ib) {
+ err = -ENOMEM;
+ goto out1;
+ }
+
+ cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
+
+ mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
+ if (id_priv->id.ps == RDMA_PS_UDP)
+ mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ if (!ndev) {
+ err = -ENODEV;
+ goto out2;
+ }
+ mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
+ mc->multicast.ib->rec.hop_limit = 1;
+ mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
+ dev_put(ndev);
+ if (!mc->multicast.ib->rec.mtu) {
+ err = -EINVAL;
+ goto out2;
+ }
+ iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
+ work->id = id_priv;
+ work->mc = mc;
+ INIT_WORK(&work->work, iboe_mcast_work_handler);
+ kref_get(&mc->mcref);
+ queue_work(cma_wq, &work->work);
+
+ return 0;
+
+out2:
+ kfree(mc->multicast.ib);
+out1:
+ kfree(work);
+ return err;
+}
+
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
void *context)
{
@@ -2784,7 +3038,17 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = cma_join_ib_multicast(id_priv, mc);
+ switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ret = cma_join_ib_multicast(id_priv, mc);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ kref_init(&mc->mcref);
+ ret = cma_iboe_join_multicast(id_priv, mc);
+ break;
+ default:
+ ret = -EINVAL;
+ }
break;
default:
ret = -ENOSYS;
@@ -2817,8 +3081,19 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
mc->multicast.ib->rec.mlid);
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
+ if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
+ switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ib_sa_free_multicast(mc->multicast.ib);
+ kfree(mc);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ kref_put(&mc->mcref, release_mc);
+ break;
+ default:
+ break;
+ }
+ }
return;
}
}
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index bfead5bc25f6..2a1e9ae134b4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -506,6 +506,8 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
cm_id->device->iwcm->add_ref(qp);
@@ -565,6 +567,8 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
}
cm_id->device->iwcm->add_ref(qp);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef1304f151dc..822cfdcd9f78 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2598,6 +2598,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
struct ib_mad_private *recv;
struct ib_mad_list_head *mad_list;
+ if (!qp_info->qp)
+ return;
+
while (!list_empty(&qp_info->recv_queue.list)) {
mad_list = list_entry(qp_info->recv_queue.list.next,
@@ -2639,6 +2642,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
qp = port_priv->qp_info[i].qp;
+ if (!qp)
+ continue;
+
/*
* PKey index for QP1 is irrelevant but
* one is needed for the Reset to Init transition
@@ -2680,6 +2686,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
}
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
+ if (!port_priv->qp_info[i].qp)
+ continue;
+
ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
if (ret) {
printk(KERN_ERR PFX "Couldn't post receive WRs\n");
@@ -2758,6 +2767,9 @@ error:
static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
{
+ if (!qp_info->qp)
+ return;
+
ib_destroy_qp(qp_info->qp);
kfree(qp_info->snoop_table);
}
@@ -2773,6 +2785,7 @@ static int ib_mad_port_open(struct ib_device *device,
struct ib_mad_port_private *port_priv;
unsigned long flags;
char name[sizeof "ib_mad123"];
+ int has_smi;
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
@@ -2788,7 +2801,11 @@ static int ib_mad_port_open(struct ib_device *device,
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
- cq_size = (mad_sendq_size + mad_recvq_size) * 2;
+ cq_size = mad_sendq_size + mad_recvq_size;
+ has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
+ if (has_smi)
+ cq_size *= 2;
+
port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0);
@@ -2812,9 +2829,11 @@ static int ib_mad_port_open(struct ib_device *device,
goto error5;
}
- ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
- if (ret)
- goto error6;
+ if (has_smi) {
+ ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
+ if (ret)
+ goto error6;
+ }
ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
if (ret)
goto error7;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index a519801dcfb7..68b4162fd9d2 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -774,6 +774,10 @@ static void mcast_event_handler(struct ib_event_handler *handler,
int index;
dev = container_of(handler, struct mcast_device, event_handler);
+ if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
+ IB_LINK_LAYER_INFINIBAND)
+ return;
+
index = event->element.port_num - dev->start_port;
switch (event->event) {
@@ -796,6 +800,7 @@ static void mcast_add_one(struct ib_device *device)
struct mcast_device *dev;
struct mcast_port *port;
int i;
+ int count = 0;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
@@ -813,6 +818,9 @@ static void mcast_add_one(struct ib_device *device)
}
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
+ if (rdma_port_get_link_layer(device, dev->start_port + i) !=
+ IB_LINK_LAYER_INFINIBAND)
+ continue;
port = &dev->port[i];
port->dev = dev;
port->port_num = dev->start_port + i;
@@ -820,6 +828,12 @@ static void mcast_add_one(struct ib_device *device)
port->table = RB_ROOT;
init_completion(&port->comp);
atomic_set(&port->refcount, 1);
+ ++count;
+ }
+
+ if (!count) {
+ kfree(dev);
+ return;
}
dev->device = device;
@@ -843,9 +857,12 @@ static void mcast_remove_one(struct ib_device *device)
flush_workqueue(mcast_wq);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
- port = &dev->port[i];
- deref_port(port);
- wait_for_completion(&port->comp);
+ if (rdma_port_get_link_layer(device, dev->start_port + i) ==
+ IB_LINK_LAYER_INFINIBAND) {
+ port = &dev->port[i];
+ deref_port(port);
+ wait_for_completion(&port->comp);
+ }
}
kfree(dev);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7e1ffd8ccd5c..91a660310b7c 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -416,6 +416,9 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
struct ib_sa_port *port =
&sa_dev->port[event->element.port_num - sa_dev->start_port];
+ if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
+ return;
+
spin_lock_irqsave(&port->ah_lock, flags);
if (port->sm_ah)
kref_put(&port->sm_ah->ref, free_sm_ah);
@@ -493,6 +496,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
{
int ret;
u16 gid_index;
+ int force_grh;
memset(ah_attr, 0, sizeof *ah_attr);
ah_attr->dlid = be16_to_cpu(rec->dlid);
@@ -502,7 +506,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr->port_num = port_num;
ah_attr->static_rate = rec->rate;
- if (rec->hop_limit > 1) {
+ force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
+
+ if (rec->hop_limit > 1 || force_grh) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.dgid = rec->dgid;
@@ -1007,7 +1013,7 @@ static void ib_sa_add_one(struct ib_device *device)
e = device->phys_port_cnt;
}
- sa_dev = kmalloc(sizeof *sa_dev +
+ sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
GFP_KERNEL);
if (!sa_dev)
@@ -1017,9 +1023,12 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->end_port = e;
for (i = 0; i <= e - s; ++i) {
+ spin_lock_init(&sa_dev->port[i].ah_lock);
+ if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
+ continue;
+
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
- spin_lock_init(&sa_dev->port[i].ah_lock);
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
@@ -1045,13 +1054,15 @@ static void ib_sa_add_one(struct ib_device *device)
goto err;
for (i = 0; i <= e - s; ++i)
- update_sm_ah(&sa_dev->port[i].update_task);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ update_sm_ah(&sa_dev->port[i].update_task);
return;
err:
while (--i >= 0)
- ib_unregister_mad_agent(sa_dev->port[i].agent);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ ib_unregister_mad_agent(sa_dev->port[i].agent);
kfree(sa_dev);
@@ -1071,9 +1082,12 @@ static void ib_sa_remove_one(struct ib_device *device)
flush_scheduled_work();
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
- ib_unregister_mad_agent(sa_dev->port[i].agent);
- if (sa_dev->port[i].sm_ah)
- kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
+ ib_unregister_mad_agent(sa_dev->port[i].agent);
+ if (sa_dev->port[i].sm_ah)
+ kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+ }
+
}
kfree(sa_dev);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 3627300e2a10..9ab5df72df7b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -222,6 +222,19 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
}
}
+static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
+ char *buf)
+{
+ switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ return sprintf(buf, "%s\n", "InfiniBand");
+ case IB_LINK_LAYER_ETHERNET:
+ return sprintf(buf, "%s\n", "Ethernet");
+ default:
+ return sprintf(buf, "%s\n", "Unknown");
+ }
+}
+
static PORT_ATTR_RO(state);
static PORT_ATTR_RO(lid);
static PORT_ATTR_RO(lid_mask_count);
@@ -230,6 +243,7 @@ static PORT_ATTR_RO(sm_sl);
static PORT_ATTR_RO(cap_mask);
static PORT_ATTR_RO(rate);
static PORT_ATTR_RO(phys_state);
+static PORT_ATTR_RO(link_layer);
static struct attribute *port_default_attrs[] = {
&port_attr_state.attr,
@@ -240,6 +254,7 @@ static struct attribute *port_default_attrs[] = {
&port_attr_cap_mask.attr,
&port_attr_rate.attr,
&port_attr_phys_state.attr,
+ &port_attr_link_layer.attr,
NULL
};
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ac7edc24165c..ca12acf38379 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -40,6 +40,7 @@
#include <linux/in6.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
+#include <linux/sysctl.h>
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -50,8 +51,24 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
MODULE_LICENSE("Dual BSD/GPL");
-enum {
- UCMA_MAX_BACKLOG = 128
+static unsigned int max_backlog = 1024;
+
+static struct ctl_table_header *ucma_ctl_table_hdr;
+static ctl_table ucma_ctl_table[] = {
+ {
+ .procname = "max_backlog",
+ .data = &max_backlog,
+ .maxlen = sizeof max_backlog,
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { }
+};
+
+static struct ctl_path ucma_ctl_path[] = {
+ { .procname = "net" },
+ { .procname = "rdma_ucm" },
+ { }
};
struct ucma_file {
@@ -583,6 +600,42 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
}
}
+static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
+ struct rdma_route *route)
+{
+ struct rdma_dev_addr *dev_addr;
+ struct net_device *dev;
+ u16 vid = 0;
+
+ resp->num_paths = route->num_paths;
+ switch (route->num_paths) {
+ case 0:
+ dev_addr = &route->addr.dev_addr;
+ dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ if (dev) {
+ vid = rdma_vlan_dev_vlan_id(dev);
+ dev_put(dev);
+ }
+
+ iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
+ dev_addr->dst_dev_addr, vid);
+ iboe_addr_get_sgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].sgid);
+ resp->ib_route[0].pkey = cpu_to_be16(0xffff);
+ break;
+ case 2:
+ ib_copy_path_rec_to_user(&resp->ib_route[1],
+ &route->path_rec[1]);
+ /* fall through */
+ case 1:
+ ib_copy_path_rec_to_user(&resp->ib_route[0],
+ &route->path_rec[0]);
+ break;
+ default:
+ break;
+ }
+}
+
static ssize_t ucma_query_route(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -617,12 +670,17 @@ static ssize_t ucma_query_route(struct ucma_file *file,
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
resp.port_num = ctx->cm_id->port_num;
- switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
- case RDMA_TRANSPORT_IB:
- ucma_copy_ib_route(&resp, &ctx->cm_id->route);
- break;
- default:
- break;
+ if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
+ switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+ break;
+ case IB_LINK_LAYER_ETHERNET:
+ ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
+ break;
+ default:
+ break;
+ }
}
out:
@@ -686,8 +744,8 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
- cmd.backlog : UCMA_MAX_BACKLOG;
+ ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
+ cmd.backlog : max_backlog;
ret = rdma_listen(ctx->cm_id, ctx->backlog);
ucma_put_ctx(ctx);
return ret;
@@ -1279,16 +1337,26 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) {
printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
- goto err;
+ goto err1;
+ }
+
+ ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
+ if (!ucma_ctl_table_hdr) {
+ printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+ ret = -ENOMEM;
+ goto err2;
}
return 0;
-err:
+err2:
+ device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
+err1:
misc_deregister(&ucma_misc);
return ret;
}
static void __exit ucma_cleanup(void)
{
+ unregister_sysctl_table(ucma_ctl_table_hdr);
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc);
idr_destroy(&ctx_idr);
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 650b501eb142..bb7e19280821 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -33,6 +33,7 @@
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/if_ether.h>
#include <rdma/ib_pack.h>
@@ -80,6 +81,40 @@ static const struct ib_field lrh_table[] = {
.size_bits = 16 }
};
+static const struct ib_field eth_table[] = {
+ { STRUCT_FIELD(eth, dmac_h),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { STRUCT_FIELD(eth, dmac_l),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { STRUCT_FIELD(eth, smac_h),
+ .offset_words = 1,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { STRUCT_FIELD(eth, smac_l),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { STRUCT_FIELD(eth, type),
+ .offset_words = 3,
+ .offset_bits = 0,
+ .size_bits = 16 }
+};
+
+static const struct ib_field vlan_table[] = {
+ { STRUCT_FIELD(vlan, tag),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { STRUCT_FIELD(vlan, type),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 }
+};
+
static const struct ib_field grh_table[] = {
{ STRUCT_FIELD(grh, ip_version),
.offset_words = 0,
@@ -180,38 +215,43 @@ static const struct ib_field deth_table[] = {
/**
* ib_ud_header_init - Initialize UD header structure
* @payload_bytes:Length of packet payload
+ * @lrh_present: specify if LRH is present
+ * @eth_present: specify if Eth header is present
+ * @vlan_present: packet is tagged vlan
* @grh_present:GRH flag (if non-zero, GRH will be included)
- * @immediate_present: specify if immediate data should be used
+ * @immediate_present: specify if immediate data is present
* @header:Structure to initialize
- *
- * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
- * lrh.packet_length, grh.ip_version, grh.payload_length,
- * grh.next_header, bth.opcode, bth.pad_count and
- * bth.transport_header_version fields of a &struct ib_ud_header given
- * the payload length and whether a GRH will be included.
*/
void ib_ud_header_init(int payload_bytes,
+ int lrh_present,
+ int eth_present,
+ int vlan_present,
int grh_present,
int immediate_present,
struct ib_ud_header *header)
{
- u16 packet_length;
-
memset(header, 0, sizeof *header);
- header->lrh.link_version = 0;
- header->lrh.link_next_header =
- grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
- packet_length = (IB_LRH_BYTES +
- IB_BTH_BYTES +
- IB_DETH_BYTES +
- payload_bytes +
- 4 + /* ICRC */
- 3) / 4; /* round up */
-
- header->grh_present = grh_present;
+ if (lrh_present) {
+ u16 packet_length;
+
+ header->lrh.link_version = 0;
+ header->lrh.link_next_header =
+ grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
+ packet_length = (IB_LRH_BYTES +
+ IB_BTH_BYTES +
+ IB_DETH_BYTES +
+ (grh_present ? IB_GRH_BYTES : 0) +
+ payload_bytes +
+ 4 + /* ICRC */
+ 3) / 4; /* round up */
+ header->lrh.packet_length = cpu_to_be16(packet_length);
+ }
+
+ if (vlan_present)
+ header->eth.type = cpu_to_be16(ETH_P_8021Q);
+
if (grh_present) {
- packet_length += IB_GRH_BYTES / 4;
header->grh.ip_version = 6;
header->grh.payload_length =
cpu_to_be16((IB_BTH_BYTES +
@@ -222,19 +262,52 @@ void ib_ud_header_init(int payload_bytes,
header->grh.next_header = 0x1b;
}
- header->lrh.packet_length = cpu_to_be16(packet_length);
-
- header->immediate_present = immediate_present;
if (immediate_present)
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
else
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
header->bth.pad_count = (4 - payload_bytes) & 3;
header->bth.transport_header_version = 0;
+
+ header->lrh_present = lrh_present;
+ header->eth_present = eth_present;
+ header->vlan_present = vlan_present;
+ header->grh_present = grh_present;
+ header->immediate_present = immediate_present;
}
EXPORT_SYMBOL(ib_ud_header_init);
/**
+ * ib_lrh_header_pack - Pack LRH header struct into wire format
+ * @lrh:unpacked LRH header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_lrh_header_pack() packs the LRH header structure @lrh into
+ * wire format in the buffer @buf.
+ */
+int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf)
+{
+ ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf);
+ return 0;
+}
+EXPORT_SYMBOL(ib_lrh_header_pack);
+
+/**
+ * ib_lrh_header_unpack - Unpack LRH structure from wire format
+ * @lrh:unpacked LRH header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_lrh_header_unpack() unpacks the LRH header structure from
+ * wire format (in buf) into @lrh.
+ */
+int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh)
+{
+ ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh);
+ return 0;
+}
+EXPORT_SYMBOL(ib_lrh_header_unpack);
+
+/**
* ib_ud_header_pack - Pack UD header struct into wire format
* @header:UD header struct
* @buf:Buffer to pack into
@@ -247,10 +320,21 @@ int ib_ud_header_pack(struct ib_ud_header *header,
{
int len = 0;
- ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
- &header->lrh, buf);
- len += IB_LRH_BYTES;
-
+ if (header->lrh_present) {
+ ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
+ &header->lrh, buf + len);
+ len += IB_LRH_BYTES;
+ }
+ if (header->eth_present) {
+ ib_pack(eth_table, ARRAY_SIZE(eth_table),
+ &header->eth, buf + len);
+ len += IB_ETH_BYTES;
+ }
+ if (header->vlan_present) {
+ ib_pack(vlan_table, ARRAY_SIZE(vlan_table),
+ &header->vlan, buf + len);
+ len += IB_VLAN_BYTES;
+ }
if (header->grh_present) {
ib_pack(grh_table, ARRAY_SIZE(grh_table),
&header->grh, buf + len);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 5fa856909511..cd1996d0ad08 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1022,7 +1022,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->ib_dev = device;
port->port_num = port_num;
- init_MUTEX(&port->sm_sem);
+ sema_init(&port->sm_sem, 1);
mutex_init(&port->file_mutex);
INIT_LIST_HEAD(&port->file_list);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6fcfbeb24a23..b342248aec05 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -460,6 +460,8 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.active_width = attr.active_width;
resp.active_speed = attr.active_speed;
resp.phys_state = attr.phys_state;
+ resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
+ cmd.port_num);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e0fa22238715..af7a8b08b2e9 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -94,6 +94,22 @@ rdma_node_get_transport(enum rdma_node_type node_type)
}
EXPORT_SYMBOL(rdma_node_get_transport);
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
+{
+ if (device->get_link_layer)
+ return device->get_link_layer(device, port_num);
+
+ switch (rdma_node_get_transport(device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ return IB_LINK_LAYER_INFINIBAND;
+ case RDMA_TRANSPORT_IWARP:
+ return IB_LINK_LAYER_ETHERNET;
+ default:
+ return IB_LINK_LAYER_UNSPECIFIED;
+ }
+}
+EXPORT_SYMBOL(rdma_port_get_link_layer);
+
/* Protection domains */
struct ib_pd *ib_alloc_pd(struct ib_device *device)
diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/infiniband/hw/amso1100/Kbuild
index 06964c4af849..950dfabcd89d 100644
--- a/drivers/infiniband/hw/amso1100/Kbuild
+++ b/drivers/infiniband/hw/amso1100/Kbuild
@@ -1,6 +1,4 @@
-ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 3b5095470cb3..0ebe4e806b86 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -62,8 +62,8 @@ void c2_rnic_interrupt(struct c2_dev *c2dev)
static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
{
if (c2dev->qptr_array[mq_index] == NULL) {
- pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n",
- mq_index);
+ pr_debug("handle_mq: stray activity for mq_index=%d\n",
+ mq_index);
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 7e7b5a66f042..621619c794e5 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -1,10 +1,8 @@
-EXTRA_CFLAGS += -Idrivers/net/cxgb3
+ccflags-y := -Idrivers/net/cxgb3
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
-ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_INFINIBAND_CXGB3_DEBUG) += -DDEBUG
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 005b7b52bc1e..09dda0b8740e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -160,6 +160,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
+ size += 1; /* one extra page for storing cq-in-err state */
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index e5ddb63e7d23..4bb997aa39d0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -728,6 +728,22 @@ struct t3_cq {
#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
CQE_GENBIT(*cqe))
+struct t3_cq_status_page {
+ u32 cq_err;
+};
+
+static inline int cxio_cq_in_error(struct t3_cq *cq)
+{
+ return ((struct t3_cq_status_page *)
+ &cq->queue[1 << cq->size_log2])->cq_err;
+}
+
+static inline void cxio_set_cq_in_error(struct t3_cq *cq)
+{
+ ((struct t3_cq_status_page *)
+ &cq->queue[1 << cq->size_log2])->cq_err = 1;
+}
+
static inline void cxio_set_wq_in_error(struct t3_wq *wq)
{
wq->queue->wq_in_err.err |= 1;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 13c88871dc3b..d02dcc6e5963 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1093,8 +1093,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
PDBG("%s ep %p credits %u\n", __func__, ep, credits);
if (credits == 0) {
- PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
+ PDBG("%s 0 credit ack ep %p state %u\n",
+ __func__, ep, state_read(&ep->com));
return CPL_RET_BUF_DONE;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 6afc89e7572c..71e0d845da3d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -76,6 +76,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
atomic_inc(&qhp->refcnt);
spin_unlock(&rnicp->lock);
+ if (qhp->attr.state == IWCH_QP_STATE_RTS) {
+ attrs.next_state = IWCH_QP_STATE_TERMINATE;
+ iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ if (send_term)
+ iwch_post_terminate(qhp, rsp_msg);
+ }
+
event.event = ib_event;
event.device = chp->ibcq.device;
if (ib_event == IB_EVENT_CQ_ERR)
@@ -86,13 +94,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
- if (qhp->attr.state == IWCH_QP_STATE_RTS) {
- attrs.next_state = IWCH_QP_STATE_TERMINATE;
- iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (send_term)
- iwch_post_terminate(qhp, rsp_msg);
- }
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
@@ -179,7 +181,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
case TPT_ERR_BOUND:
case TPT_ERR_INVALIDATE_SHARED_MR:
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index fca0b4b747e4..2e2741307af4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -154,6 +154,8 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
struct iwch_create_cq_resp uresp;
struct iwch_create_cq_req ureq;
struct iwch_ucontext *ucontext = NULL;
+ static int warned;
+ size_t resplen;
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
rhp = to_iwch_dev(ibdev);
@@ -217,15 +219,26 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
uresp.key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
- if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
+ mm->key = uresp.key;
+ mm->addr = virt_to_phys(chp->cq.queue);
+ if (udata->outlen < sizeof uresp) {
+ if (!warned++)
+ printk(KERN_WARNING MOD "Warning - "
+ "downlevel libcxgb3 (non-fatal).\n");
+ mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
+ sizeof(struct t3_cqe));
+ resplen = sizeof(struct iwch_create_cq_resp_v0);
+ } else {
+ mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
+ sizeof(struct t3_cqe));
+ uresp.memsize = mm->len;
+ resplen = sizeof uresp;
+ }
+ if (ib_copy_to_udata(udata, &uresp, resplen)) {
kfree(mm);
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-EFAULT);
}
- mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
- mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
- sizeof (struct t3_cqe));
insert_mmap(ucontext, mm);
}
PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
@@ -1414,6 +1427,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.post_send = iwch_post_send;
dev->ibdev.post_recv = iwch_post_receive;
dev->ibdev.get_protocol_stats = iwch_get_mib;
+ dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index c64d27bf2c15..0993137181d7 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -802,14 +802,12 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
/*
* Assumes qhp lock is held.
*/
-static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
+ struct iwch_cq *schp, unsigned long *flag)
{
- struct iwch_cq *rchp, *schp;
int count;
int flushed;
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */
@@ -847,10 +845,23 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
{
- if (qhp->ibqp.uobject)
+ struct iwch_cq *rchp, *schp;
+
+ rchp = get_chp(qhp->rhp, qhp->attr.rcq);
+ schp = get_chp(qhp->rhp, qhp->attr.scq);
+
+ if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq);
- else
- __flush_qp(qhp, flag);
+ cxio_set_cq_in_error(&rchp->cq);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ if (schp != rchp) {
+ cxio_set_cq_in_error(&schp->cq);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ }
+ return;
+ }
+ __flush_qp(qhp, rchp, schp, flag);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/drivers/infiniband/hw/cxgb3/iwch_user.h
index cb7086f558c1..a277c31fcaf7 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_user.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_user.h
@@ -45,10 +45,18 @@ struct iwch_create_cq_req {
__u64 user_rptr_addr;
};
+struct iwch_create_cq_resp_v0 {
+ __u64 key;
+ __u32 cqid;
+ __u32 size_log2;
+};
+
struct iwch_create_cq_resp {
__u64 key;
__u32 cqid;
__u32 size_log2;
+ __u32 memsize;
+ __u32 reserved;
};
struct iwch_create_qp_resp {
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index e31a499f0172..cd20b1342aec 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -Idrivers/net/cxgb4
+ccflags-y := -Idrivers/net/cxgb4
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 32d352a88d50..0dc62b1438be 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -117,9 +117,9 @@ static int rcv_win = 256 * 1024;
module_param(rcv_win, int, 0644);
MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
-static int snd_win = 32 * 1024;
+static int snd_win = 128 * 1024;
module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
static struct workqueue_struct *workq;
@@ -172,7 +172,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
@@ -187,7 +187,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
@@ -219,12 +219,11 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
{
- unsigned long flags;
enum c4iw_ep_state state;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->mutex);
state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->mutex);
return state;
}
@@ -235,12 +234,10 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->mutex);
PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
__state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->mutex);
return;
}
@@ -251,8 +248,8 @@ static void *alloc_ep(int size, gfp_t gfp)
epc = kzalloc(size, gfp);
if (epc) {
kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
+ mutex_init(&epc->mutex);
+ c4iw_init_wr_wait(&epc->wr_wait);
}
PDBG("%s alloc ep %p\n", __func__, epc);
return epc;
@@ -1131,7 +1128,6 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
- unsigned long flags;
int release = 0;
unsigned int tid = GET_TID(rpl);
struct tid_info *t = dev->rdev.lldi.tids;
@@ -1139,7 +1135,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(!ep);
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
__state_set(&ep->com, DEAD);
@@ -1150,7 +1146,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
__func__, ep, ep->com.state);
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
@@ -1213,9 +1209,9 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ ep->com.wr_wait.ret = status2errno(rpl->status);
+ ep->com.wr_wait.done = 1;
+ wake_up(&ep->com.wr_wait.wait);
return 0;
}
@@ -1249,9 +1245,9 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
PDBG("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ ep->com.wr_wait.ret = status2errno(rpl->status);
+ ep->com.wr_wait.done = 1;
+ wake_up(&ep->com.wr_wait.wait);
return 0;
}
@@ -1478,7 +1474,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_peer_close *hdr = cplhdr(skb);
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
- unsigned long flags;
int disconnect = 1;
int release = 0;
int closing = 0;
@@ -1489,7 +1484,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case MPA_REQ_WAIT:
__state_set(&ep->com, CLOSING);
@@ -1507,17 +1502,17 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
* in rdma connection migration (see c4iw_accept_cr()).
*/
__state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
+ wake_up(&ep->com.wr_wait.wait);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
+ wake_up(&ep->com.wr_wait.wait);
break;
case FPDU_MODE:
start_ep_timer(ep);
@@ -1550,7 +1545,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
default:
BUG_ON(1);
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (closing) {
attrs.next_state = C4IW_QP_STATE_CLOSING;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1581,7 +1576,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
- unsigned long flags;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
@@ -1591,9 +1585,17 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep->hwtid);
return 0;
}
- spin_lock_irqsave(&ep->com.lock, flags);
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+ */
+ ep->com.wr_wait.done = 1;
+ ep->com.wr_wait.ret = -ECONNRESET;
+ wake_up(&ep->com.wr_wait.wait);
+
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CONNECTING:
break;
@@ -1605,23 +1607,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
connect_reply_upcall(ep, -ECONNRESET);
break;
case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
break;
case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see c4iw_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
- wake_up(&ep->com.waitq);
break;
case MORIBUND:
case CLOSING:
@@ -1644,7 +1631,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case DEAD:
PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
return 0;
default:
BUG_ON(1);
@@ -1655,7 +1642,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
__state_set(&ep->com, DEAD);
release = 1;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
if (!rpl_skb) {
@@ -1681,7 +1668,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
struct cpl_close_con_rpl *rpl = cplhdr(skb);
- unsigned long flags;
int release = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
@@ -1692,7 +1678,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(!ep);
/* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CLOSING:
__state_set(&ep->com, MORIBUND);
@@ -1717,7 +1703,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(1);
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
return 0;
@@ -1725,23 +1711,24 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct c4iw_ep *ep;
- struct cpl_rdma_terminate *term = cplhdr(skb);
+ struct cpl_rdma_terminate *rpl = cplhdr(skb);
struct tid_info *t = dev->rdev.lldi.tids;
- unsigned int tid = GET_TID(term);
+ unsigned int tid = GET_TID(rpl);
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
ep = lookup_tid(t, tid);
+ BUG_ON(!ep);
- if (state_read(&ep->com) != FPDU_MODE)
- return 0;
+ if (ep->com.qp) {
+ printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
+ ep->com.qp->wq.sq.qid);
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ } else
+ printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb_pull(skb, sizeof *term);
- PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
return 0;
}
@@ -1762,8 +1749,8 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
if (credits == 0) {
- PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ PDBG("%s 0 credit ack ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, state_read(&ep->com));
return 0;
}
@@ -2042,6 +2029,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
state_set(&ep->com, LISTEN);
+ c4iw_init_wr_wait(&ep->com.wr_wait);
err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.local_addr.sin_addr.s_addr,
ep->com.local_addr.sin_port,
@@ -2050,15 +2038,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail3;
/* wait for pass_open_rpl */
- wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
- if (ep->com.rpl_done)
- err = ep->com.rpl_err;
- else {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(ep->com.dev->rdev.lldi.pdev));
- ep->com.dev->rdev.flags = T4_FATAL_ERROR;
- err = -EIO;
- }
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+ __func__);
if (!err) {
cm_id->provider_data = ep;
goto out;
@@ -2082,20 +2063,12 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
might_sleep();
state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
+ c4iw_init_wr_wait(&ep->com.wr_wait);
err = listen_stop(ep);
if (err)
goto done;
- wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
- if (ep->com.rpl_done)
- err = ep->com.rpl_err;
- else {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(ep->com.dev->rdev.lldi.pdev));
- ep->com.dev->rdev.flags = T4_FATAL_ERROR;
- err = -EIO;
- }
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+ __func__);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
cm_id->rem_ref(cm_id);
@@ -2106,12 +2079,11 @@ done:
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
{
int ret = 0;
- unsigned long flags;
int close = 0;
int fatal = 0;
struct c4iw_rdev *rdev;
- spin_lock_irqsave(&ep->com.lock, flags);
+ mutex_lock(&ep->com.mutex);
PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
states[ep->com.state], abrupt);
@@ -2158,7 +2130,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
break;
}
- spin_unlock_irqrestore(&ep->com.lock, flags);
+ mutex_unlock(&ep->com.mutex);
if (close) {
if (abrupt)
ret = abort_connection(ep, NULL, gfp);
@@ -2172,6 +2144,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return ret;
}
+static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_fw6_msg *rpl = cplhdr(skb);
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ return 0;
+}
+
/*
* These are the real handlers that are called from a
* work queue.
@@ -2190,7 +2169,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_ABORT_REQ_RSS] = peer_abort,
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
- [CPL_FW4_ACK] = fw4_ack
+ [CPL_FW4_ACK] = fw4_ack,
+ [CPL_FW6_MSG] = async_event
};
static void process_timeout(struct c4iw_ep *ep)
@@ -2198,7 +2178,7 @@ static void process_timeout(struct c4iw_ep *ep)
struct c4iw_qp_attributes attrs;
int abort = 1;
- spin_lock_irq(&ep->com.lock);
+ mutex_lock(&ep->com.mutex);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
switch (ep->com.state) {
@@ -2225,7 +2205,7 @@ static void process_timeout(struct c4iw_ep *ep)
WARN_ON(1);
abort = 0;
}
- spin_unlock_irq(&ep->com.lock);
+ mutex_unlock(&ep->com.mutex);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
c4iw_put_ep(&ep->com);
@@ -2309,6 +2289,7 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
"for tid %u\n", rpl->status, GET_TID(rpl));
}
+ kfree_skb(skb);
return 0;
}
@@ -2323,20 +2304,25 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
switch (rpl->type) {
case 1:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
- wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+ wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
if (wr_waitp) {
- wr_waitp->ret = ret;
+ if (ret)
+ wr_waitp->ret = -ret;
+ else
+ wr_waitp->ret = 0;
wr_waitp->done = 1;
wake_up(&wr_waitp->wait);
}
+ kfree_skb(skb);
break;
case 2:
- c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ sched(dev, skb);
break;
default:
printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
rpl->type);
+ kfree_skb(skb);
break;
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b3daf39eed4a..8d8f8add6fcd 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
V_FW_RI_RES_WR_NRES(1) |
FW_WR_COMPL(1));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (u64)&wr_wait;
+ res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -64,14 +64,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
c4iw_init_wr_wait(&wr_wait);
ret = c4iw_ofld_send(rdev, skb);
if (!ret) {
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
}
kfree(cq->sw_queue);
@@ -132,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
V_FW_RI_RES_WR_NRES(1) |
FW_WR_COMPL(1));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (u64)&wr_wait;
+ res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -157,14 +150,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
if (ret)
goto err4;
PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
if (ret)
goto err4;
@@ -476,6 +462,11 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
goto proc_cqe;
}
+ if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
/*
* RECV completion.
*/
@@ -696,6 +687,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
case T4_ERR_MSN_RANGE:
case T4_ERR_IRD_OVERFLOW:
case T4_ERR_OPCODE:
+ case T4_ERR_INTERNAL_ERR:
wc->status = IB_WC_FATAL_ERR;
break;
case T4_ERR_SWFLUSH:
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 9bbf491d5d9e..54fbc1118abe 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -49,29 +49,33 @@ static DEFINE_MUTEX(dev_mutex);
static struct dentry *c4iw_debugfs_root;
-struct debugfs_qp_data {
+struct c4iw_debugfs_data {
struct c4iw_dev *devp;
char *buf;
int bufsize;
int pos;
};
-static int count_qps(int id, void *p, void *data)
+static int count_idrs(int id, void *p, void *data)
{
- struct c4iw_qp *qp = p;
int *countp = data;
- if (id != qp->wq.sq.qid)
- return 0;
-
*countp = *countp + 1;
return 0;
}
-static int dump_qps(int id, void *p, void *data)
+static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct c4iw_debugfs_data *d = file->private_data;
+
+ return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
+}
+
+static int dump_qp(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
- struct debugfs_qp_data *qpd = data;
+ struct c4iw_debugfs_data *qpd = data;
int space;
int cc;
@@ -101,7 +105,7 @@ static int dump_qps(int id, void *p, void *data)
static int qp_release(struct inode *inode, struct file *file)
{
- struct debugfs_qp_data *qpd = file->private_data;
+ struct c4iw_debugfs_data *qpd = file->private_data;
if (!qpd) {
printk(KERN_INFO "%s null qpd?\n", __func__);
return 0;
@@ -113,7 +117,7 @@ static int qp_release(struct inode *inode, struct file *file)
static int qp_open(struct inode *inode, struct file *file)
{
- struct debugfs_qp_data *qpd;
+ struct c4iw_debugfs_data *qpd;
int ret = 0;
int count = 1;
@@ -126,7 +130,7 @@ static int qp_open(struct inode *inode, struct file *file)
qpd->pos = 0;
spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, count_qps, &count);
+ idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
spin_unlock_irq(&qpd->devp->lock);
qpd->bufsize = count * 128;
@@ -137,7 +141,7 @@ static int qp_open(struct inode *inode, struct file *file)
}
spin_lock_irq(&qpd->devp->lock);
- idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
+ idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
spin_unlock_irq(&qpd->devp->lock);
qpd->buf[qpd->pos++] = 0;
@@ -149,43 +153,86 @@ out:
return ret;
}
-static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static const struct file_operations qp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qp_open,
+ .release = qp_release,
+ .read = debugfs_read,
+ .llseek = default_llseek,
+};
+
+static int dump_stag(int id, void *p, void *data)
{
- struct debugfs_qp_data *qpd = file->private_data;
- loff_t pos = *ppos;
- loff_t avail = qpd->pos;
+ struct c4iw_debugfs_data *stagd = data;
+ int space;
+ int cc;
- if (pos < 0)
- return -EINVAL;
- if (pos >= avail)
+ space = stagd->bufsize - stagd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
+ if (cc < space)
+ stagd->pos += cc;
+ return 0;
+}
+
+static int stag_release(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *stagd = file->private_data;
+ if (!stagd) {
+ printk(KERN_INFO "%s null stagd?\n", __func__);
return 0;
- if (count > avail - pos)
- count = avail - pos;
+ }
+ kfree(stagd->buf);
+ kfree(stagd);
+ return 0;
+}
- while (count) {
- size_t len = 0;
+static int stag_open(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *stagd;
+ int ret = 0;
+ int count = 1;
- len = min((int)count, (int)qpd->pos - (int)pos);
- if (copy_to_user(buf, qpd->buf + pos, len))
- return -EFAULT;
- if (len == 0)
- return -EINVAL;
+ stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
+ if (!stagd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ stagd->devp = inode->i_private;
+ stagd->pos = 0;
+
+ spin_lock_irq(&stagd->devp->lock);
+ idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
+ spin_unlock_irq(&stagd->devp->lock);
- buf += len;
- pos += len;
- count -= len;
+ stagd->bufsize = count * sizeof("0x12345678\n");
+ stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
+ if (!stagd->buf) {
+ ret = -ENOMEM;
+ goto err1;
}
- count = pos - *ppos;
- *ppos = pos;
- return count;
+
+ spin_lock_irq(&stagd->devp->lock);
+ idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
+ spin_unlock_irq(&stagd->devp->lock);
+
+ stagd->buf[stagd->pos++] = 0;
+ file->private_data = stagd;
+ goto out;
+err1:
+ kfree(stagd);
+out:
+ return ret;
}
-static const struct file_operations qp_debugfs_fops = {
+static const struct file_operations stag_debugfs_fops = {
.owner = THIS_MODULE,
- .open = qp_open,
- .release = qp_release,
- .read = qp_read,
+ .open = stag_open,
+ .release = stag_release,
+ .read = debugfs_read,
+ .llseek = default_llseek,
};
static int setup_debugfs(struct c4iw_dev *devp)
@@ -199,6 +246,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
(void *)devp, &qp_debugfs_fops);
if (de && de->d_inode)
de->d_inode->i_size = 4096;
+
+ de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &stag_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
return 0;
}
@@ -290,7 +342,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
goto err3;
}
+ err = c4iw_ocqp_pool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
+ goto err4;
+ }
return 0;
+err4:
+ c4iw_rqtpool_destroy(rdev);
err3:
c4iw_pblpool_destroy(rdev);
err2:
@@ -317,6 +376,7 @@ static void c4iw_remove(struct c4iw_dev *dev)
idr_destroy(&dev->cqidr);
idr_destroy(&dev->qpidr);
idr_destroy(&dev->mmidr);
+ iounmap(dev->rdev.oc_mw_kva);
ib_dealloc_device(&dev->ibdev);
}
@@ -332,6 +392,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
}
devp->rdev.lldi = *infop;
+ devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
+ (pci_resource_len(devp->rdev.lldi.pdev, 2) -
+ roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
+ devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
+ devp->rdev.lldi.vr->ocq.size);
+
+ printk(KERN_INFO MOD "ocq memory: "
+ "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
+ devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
+ devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
+
mutex_lock(&dev_mutex);
ret = c4iw_rdev_open(&devp->rdev);
@@ -383,46 +454,6 @@ out:
return dev;
}
-static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
- unsigned int skb_len,
- unsigned int pull_len)
-{
- struct sk_buff *skb;
- struct skb_shared_info *ssi;
-
- if (gl->tot_len <= 512) {
- skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, gl->tot_len);
- skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
- } else {
- skb = alloc_skb(skb_len, GFP_ATOMIC);
- if (unlikely(!skb))
- goto out;
- __skb_put(skb, pull_len);
- skb_copy_to_linear_data(skb, gl->va, pull_len);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
- ssi->frags[0].size = gl->frags[0].size - pull_len;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags - 1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
-
- skb->len = gl->tot_len;
- skb->data_len = skb->len - pull_len;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
- }
-out:
- return skb;
-}
-
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *gl)
{
@@ -447,7 +478,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
c4iw_ev_handler(dev, qid);
return 0;
} else {
- skb = t4_pktgl_to_skb(gl, 128, 128);
+ skb = cxgb4_pktgl_to_skb(gl, 128, 128);
if (unlikely(!skb))
goto nomem;
}
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 491e76a0327f..c13041a0aeba 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->attr.state == C4IW_QP_STATE_RTS) {
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
- &attrs, 1);
+ &attrs, 0);
}
event.event = ib_event;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index ed459b8f800f..16032cdb4337 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -46,6 +46,7 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/kfifo.h>
+#include <linux/mutex.h>
#include <asm/byteorder.h>
@@ -79,21 +80,6 @@ static inline void *cplhdr(struct sk_buff *skb)
return skb->data;
}
-#define C4IW_WR_TO (10*HZ)
-
-struct c4iw_wr_wait {
- wait_queue_head_t wait;
- int done;
- int ret;
-};
-
-static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
-{
- wr_waitp->ret = 0;
- wr_waitp->done = 0;
- init_waitqueue_head(&wr_waitp->wait);
-}
-
struct c4iw_resource {
struct kfifo tpt_fifo;
spinlock_t tpt_fifo_lock;
@@ -127,8 +113,11 @@ struct c4iw_rdev {
struct c4iw_dev_ucontext uctx;
struct gen_pool *pbl_pool;
struct gen_pool *rqt_pool;
+ struct gen_pool *ocqp_pool;
u32 flags;
struct cxgb4_lld_info lldi;
+ unsigned long oc_mw_pa;
+ void __iomem *oc_mw_kva;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -141,6 +130,44 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
}
+#define C4IW_WR_TO (10*HZ)
+
+struct c4iw_wr_wait {
+ wait_queue_head_t wait;
+ int done;
+ int ret;
+};
+
+static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ wr_waitp->done = 0;
+ init_waitqueue_head(&wr_waitp->wait);
+}
+
+static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
+ struct c4iw_wr_wait *wr_waitp,
+ u32 hwtid, u32 qpid,
+ const char *func)
+{
+ unsigned to = C4IW_WR_TO;
+ do {
+
+ wait_event_timeout(wr_waitp->wait, wr_waitp->done, to);
+ if (!wr_waitp->done) {
+ printk(KERN_ERR MOD "%s - Device %s not responding - "
+ "tid %u qpid %u\n", func,
+ pci_name(rdev->lldi.pdev), hwtid, qpid);
+ to = to << 2;
+ }
+ } while (!wr_waitp->done);
+ if (wr_waitp->ret)
+ printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n",
+ pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+ return wr_waitp->ret;
+}
+
+
struct c4iw_dev {
struct ib_device ibdev;
struct c4iw_rdev rdev;
@@ -327,6 +354,7 @@ struct c4iw_qp {
struct c4iw_qp_attributes attr;
struct t4_wq wq;
spinlock_t lock;
+ struct mutex mutex;
atomic_t refcnt;
wait_queue_head_t wait;
struct timer_list timer;
@@ -579,12 +607,10 @@ struct c4iw_ep_common {
struct c4iw_dev *dev;
enum c4iw_ep_state state;
struct kref kref;
- spinlock_t lock;
+ struct mutex mutex;
struct sockaddr_in local_addr;
struct sockaddr_in remote_addr;
- wait_queue_head_t waitq;
- int rpl_done;
- int rpl_err;
+ struct c4iw_wr_wait wr_wait;
unsigned long flags;
};
@@ -654,8 +680,10 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
+int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_register_device(struct c4iw_dev *dev);
@@ -721,6 +749,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
void c4iw_flush_hw_cq(struct t4_cq *cq);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 269373a62f22..273ffe49525a 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -71,7 +71,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
FW_WR_COMPL(1));
- req->wr.wr_lo = (__force __be64)&wr_wait;
+ req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
@@ -103,14 +103,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
len -= C4IW_MAX_INLINE_SIZE;
}
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 8f645c83a125..f66dd8bf5128 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -54,9 +54,9 @@
#include "iw_cxgb4.h"
-static int fastreg_support;
+static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
-MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
+MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
static int c4iw_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
@@ -149,19 +149,28 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
addr = mm->addr;
kfree(mm);
- if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
- pci_resource_len(rdev->lldi.pdev, 2)))) {
+ if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
+ pci_resource_len(rdev->lldi.pdev, 0)))) {
/*
- * Map T4 DB register.
+ * MA_SYNC register...
*/
- if (vma->vm_flags & VM_READ)
- return -EPERM;
-
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_flags &= ~VM_MAYREAD;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+
+ /*
+ * Map user DB or OCQP memory...
+ */
+ if (addr >= rdev->oc_mw_pa)
+ vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
@@ -382,7 +391,17 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
static int c4iw_get_mib(struct ib_device *ibdev,
union rdma_protocol_stats *stats)
{
- return -ENOSYS;
+ struct tp_tcp_stats v4, v6;
+ struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
+
+ cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
+ memset(stats, 0, sizeof *stats);
+ stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
+ stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
+ stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
+ stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
+
+ return 0;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -472,6 +491,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
dev->ibdev.get_protocol_stats = c4iw_get_mib;
+ dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 93f6e5bf0ec5..057cb2505ea1 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -31,6 +31,63 @@
*/
#include "iw_cxgb4.h"
+static int ocqp_support;
+module_param(ocqp_support, int, 0644);
+MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)");
+
+static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
+{
+ unsigned long flag;
+ spin_lock_irqsave(&qhp->lock, flag);
+ qhp->attr.state = state;
+ spin_unlock_irqrestore(&qhp->lock, flag);
+}
+
+static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
+}
+
+static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
+ pci_unmap_addr(sq, mapping));
+}
+
+static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ if (t4_sq_onchip(sq))
+ dealloc_oc_sq(rdev, sq);
+ else
+ dealloc_host_sq(rdev, sq);
+}
+
+static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ if (!ocqp_support || !t4_ocqp_supported())
+ return -ENOSYS;
+ sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
+ if (!sq->dma_addr)
+ return -ENOMEM;
+ sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
+ rdev->lldi.vr->ocq.start;
+ sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
+ rdev->lldi.vr->ocq.start);
+ sq->flags |= T4_SQ_ONCHIP;
+ return 0;
+}
+
+static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+ sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
+ &(sq->dma_addr), GFP_KERNEL);
+ if (!sq->queue)
+ return -ENOMEM;
+ sq->phys_addr = virt_to_phys(sq->queue);
+ pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+ return 0;
+}
+
static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct c4iw_dev_ucontext *uctx)
{
@@ -41,9 +98,7 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
dma_free_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, wq->rq.queue,
dma_unmap_addr(&wq->rq, mapping));
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->sq.memsize, wq->sq.queue,
- dma_unmap_addr(&wq->sq, mapping));
+ dealloc_sq(rdev, &wq->sq);
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
kfree(wq->rq.sw_rq);
kfree(wq->sq.sw_sq);
@@ -93,11 +148,12 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (!wq->rq.rqt_hwaddr)
goto err4;
- wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
- wq->sq.memsize, &(wq->sq.dma_addr),
- GFP_KERNEL);
- if (!wq->sq.queue)
- goto err5;
+ if (user) {
+ if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
+ goto err5;
+ } else
+ if (alloc_host_sq(rdev, &wq->sq))
+ goto err5;
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
@@ -144,7 +200,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
V_FW_RI_RES_WR_NRES(2) |
FW_WR_COMPL(1));
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (u64)&wr_wait;
+ res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -158,6 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 |
V_FW_RI_RES_WR_IQID(scq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
@@ -198,14 +255,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = c4iw_ofld_send(rdev, skb);
if (ret)
goto err7;
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rdev->lldi.pdev));
- rdev->flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
if (ret)
goto err7;
@@ -219,9 +269,7 @@ err7:
wq->rq.memsize, wq->rq.queue,
dma_unmap_addr(&wq->rq, mapping));
err6:
- dma_free_coherent(&(rdev->lldi.pdev->dev),
- wq->sq.memsize, wq->sq.queue,
- dma_unmap_addr(&wq->sq, mapping));
+ dealloc_sq(rdev, &wq->sq);
err5:
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
@@ -263,6 +311,9 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
rem -= len;
}
}
+ len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
+ if (len)
+ memset(dstp, 0, len);
immdp->op = FW_RI_DATA_IMMD;
immdp->r1 = 0;
immdp->r2 = 0;
@@ -292,6 +343,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
if (++flitp == queue_end)
flitp = queue_start;
}
+ *flitp = (__force __be64)0;
isglp->op = FW_RI_DATA_ISGL;
isglp->r1 = 0;
isglp->nsge = cpu_to_be16(num_sge);
@@ -453,13 +505,15 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
return 0;
}
-static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
+ struct ib_send_wr *wr, u8 *len16)
{
struct fw_ri_immd *imdp;
__be64 *p;
int i;
int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+ int rem;
if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
return -EINVAL;
@@ -474,32 +528,28 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
0xffffffff);
- if (pbllen > T4_MAX_FR_IMMD) {
- struct c4iw_fr_page_list *c4pl =
- to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
- struct fw_ri_dsgl *sglp;
-
- sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
- sglp->op = FW_RI_DATA_DSGL;
- sglp->r1 = 0;
- sglp->nsge = cpu_to_be16(1);
- sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
- sglp->len0 = cpu_to_be32(pbllen);
-
- *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
- } else {
- imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
- imdp->op = FW_RI_DATA_IMMD;
- imdp->r1 = 0;
- imdp->r2 = 0;
- imdp->immdlen = cpu_to_be32(pbllen);
- p = (__be64 *)(imdp + 1);
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
- *p = cpu_to_be64(
- (u64)wr->wr.fast_reg.page_list->page_list[i]);
- *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
- 16);
+ WARN_ON(pbllen > T4_MAX_FR_IMMD);
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ rem = pbllen;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
+ rem -= sizeof *p;
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
}
+ BUG_ON(rem < 0);
+ while (rem) {
+ *p = 0;
+ rem -= sizeof *p;
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
return 0;
}
@@ -587,7 +637,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
- fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
+ fw_flags = FW_RI_RDMA_READ_INVALIDATE;
else
fw_flags = 0;
err = build_rdma_read(wqe, wr, &len16);
@@ -600,7 +650,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_FAST_REG_MR:
fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_fastreg(wqe, wr, &len16);
+ err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
@@ -905,46 +955,38 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
* Assumes qhp lock is held.
*/
static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
- struct c4iw_cq *schp, unsigned long *flag)
+ struct c4iw_cq *schp)
{
int count;
int flushed;
+ unsigned long flag;
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
- /* take a ref on the qhp since we must release the lock */
- atomic_inc(&qhp->refcnt);
- spin_unlock_irqrestore(&qhp->lock, *flag);
/* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&rchp->lock, *flag);
+ spin_lock_irqsave(&rchp->lock, flag);
spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&rchp->cq);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&rchp->lock, *flag);
+ spin_unlock_irqrestore(&rchp->lock, flag);
if (flushed)
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
/* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&schp->lock, *flag);
+ spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&schp->cq);
c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&schp->lock, *flag);
+ spin_unlock_irqrestore(&schp->lock, flag);
if (flushed)
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
-
- /* deref */
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-
- spin_lock_irqsave(&qhp->lock, *flag);
}
-static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
+static void flush_qp(struct c4iw_qp *qhp)
{
struct c4iw_cq *rchp, *schp;
@@ -958,7 +1000,7 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
t4_set_cq_in_error(&schp->cq);
return;
}
- __flush_qp(qhp, rchp, schp, flag);
+ __flush_qp(qhp, rchp, schp);
}
static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
@@ -966,7 +1008,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
{
struct fw_ri_wr *wqe;
int ret;
- struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -985,28 +1026,16 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID(ep->hwtid) |
FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
- wqe->cookie = (u64)&wr_wait;
+ wqe->cookie = (unsigned long) &ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
- c4iw_init_wr_wait(&wr_wait);
+ c4iw_init_wr_wait(&ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rhp->rdev.lldi.pdev));
- rhp->rdev.flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else {
- ret = wr_wait.ret;
- if (ret)
- printk(KERN_WARNING MOD
- "%s: Abnormal close qpid %d ret %u\n",
- pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
- ret);
- }
+ ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
+ qhp->wq.sq.qid, __func__);
out:
PDBG("%s ret %d\n", __func__, ret);
return ret;
@@ -1040,7 +1069,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
{
struct fw_ri_wr *wqe;
int ret;
- struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -1060,7 +1088,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_FLOWID(qhp->ep->hwtid) |
FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
- wqe->cookie = (u64)&wr_wait;
+ wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
@@ -1097,19 +1125,13 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
- c4iw_init_wr_wait(&wr_wait);
+ c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
goto out;
- wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
- if (!wr_wait.done) {
- printk(KERN_ERR MOD "Device %s not responding!\n",
- pci_name(rhp->rdev.lldi.pdev));
- rhp->rdev.flags = T4_FATAL_ERROR;
- ret = -EIO;
- } else
- ret = wr_wait.ret;
+ ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
out:
PDBG("%s ret %d\n", __func__, ret);
return ret;
@@ -1122,7 +1144,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
{
int ret = 0;
struct c4iw_qp_attributes newattr = qhp->attr;
- unsigned long flag;
int disconnect = 0;
int terminate = 0;
int abort = 0;
@@ -1133,7 +1154,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
(mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
- spin_lock_irqsave(&qhp->lock, flag);
+ mutex_lock(&qhp->mutex);
/* Process attr changes if in IDLE */
if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
@@ -1184,7 +1205,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.mpa_attr = attrs->mpa_attr;
qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
qhp->ep = qhp->attr.llp_stream_handle;
- qhp->attr.state = C4IW_QP_STATE_RTS;
+ set_state(qhp, C4IW_QP_STATE_RTS);
/*
* Ref the endpoint here and deref when we
@@ -1193,15 +1214,13 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
* transition.
*/
c4iw_get_ep(&qhp->ep->com);
- spin_unlock_irqrestore(&qhp->lock, flag);
ret = rdma_init(rhp, qhp);
- spin_lock_irqsave(&qhp->lock, flag);
if (ret)
goto err;
break;
case C4IW_QP_STATE_ERROR:
- qhp->attr.state = C4IW_QP_STATE_ERROR;
- flush_qp(qhp, &flag);
+ set_state(qhp, C4IW_QP_STATE_ERROR);
+ flush_qp(qhp);
break;
default:
ret = -EINVAL;
@@ -1212,38 +1231,38 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
- qhp->attr.state = C4IW_QP_STATE_CLOSING;
+ set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
if (!internal) {
abort = 0;
disconnect = 1;
- c4iw_get_ep(&ep->com);
+ c4iw_get_ep(&qhp->ep->com);
}
- spin_unlock_irqrestore(&qhp->lock, flag);
ret = rdma_fini(rhp, qhp, ep);
- spin_lock_irqsave(&qhp->lock, flag);
if (ret) {
- c4iw_get_ep(&ep->com);
+ if (internal)
+ c4iw_get_ep(&qhp->ep->com);
disconnect = abort = 1;
goto err;
}
break;
case C4IW_QP_STATE_TERMINATE:
- qhp->attr.state = C4IW_QP_STATE_TERMINATE;
+ set_state(qhp, C4IW_QP_STATE_TERMINATE);
if (qhp->ibqp.uobject)
t4_set_wq_in_error(&qhp->wq);
ep = qhp->ep;
- c4iw_get_ep(&ep->com);
- terminate = 1;
+ if (!internal)
+ terminate = 1;
disconnect = 1;
+ c4iw_get_ep(&qhp->ep->com);
break;
case C4IW_QP_STATE_ERROR:
- qhp->attr.state = C4IW_QP_STATE_ERROR;
+ set_state(qhp, C4IW_QP_STATE_ERROR);
if (!internal) {
abort = 1;
disconnect = 1;
ep = qhp->ep;
- c4iw_get_ep(&ep->com);
+ c4iw_get_ep(&qhp->ep->com);
}
goto err;
break;
@@ -1259,8 +1278,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
switch (attrs->next_state) {
case C4IW_QP_STATE_IDLE:
- flush_qp(qhp, &flag);
- qhp->attr.state = C4IW_QP_STATE_IDLE;
+ flush_qp(qhp);
+ set_state(qhp, C4IW_QP_STATE_IDLE);
qhp->attr.llp_stream_handle = NULL;
c4iw_put_ep(&qhp->ep->com);
qhp->ep = NULL;
@@ -1282,7 +1301,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
ret = -EINVAL;
goto out;
}
- qhp->attr.state = C4IW_QP_STATE_IDLE;
+ set_state(qhp, C4IW_QP_STATE_IDLE);
break;
case C4IW_QP_STATE_TERMINATE:
if (!internal) {
@@ -1305,15 +1324,16 @@ err:
/* disassociate the LLP connection */
qhp->attr.llp_stream_handle = NULL;
- ep = qhp->ep;
+ if (!ep)
+ ep = qhp->ep;
qhp->ep = NULL;
- qhp->attr.state = C4IW_QP_STATE_ERROR;
+ set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
wake_up(&qhp->wait);
BUG_ON(!ep);
- flush_qp(qhp, &flag);
+ flush_qp(qhp);
out:
- spin_unlock_irqrestore(&qhp->lock, flag);
+ mutex_unlock(&qhp->mutex);
if (terminate)
post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
@@ -1335,7 +1355,6 @@ out:
*/
if (free)
c4iw_put_ep(&ep->com);
-
PDBG("%s exit state %d\n", __func__, qhp->attr.state);
return ret;
}
@@ -1380,7 +1399,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
int sqsize, rqsize;
struct c4iw_ucontext *ucontext;
int ret;
- struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+ struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
PDBG("%s ib_pd %p\n", __func__, pd);
@@ -1450,6 +1469,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ord = 1;
qhp->attr.max_ird = 1;
spin_lock_init(&qhp->lock);
+ mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
@@ -1478,7 +1498,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = -ENOMEM;
goto err6;
}
-
+ if (t4_sq_onchip(&qhp->wq.sq)) {
+ mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
+ if (!mm5) {
+ ret = -ENOMEM;
+ goto err7;
+ }
+ uresp.flags = C4IW_QPF_ONCHIP;
+ } else
+ uresp.flags = 0;
uresp.qid_mask = rhp->rdev.qpmask;
uresp.sqid = qhp->wq.sq.qid;
uresp.sq_size = qhp->wq.sq.size;
@@ -1487,6 +1515,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
uresp.rq_size = qhp->wq.rq.size;
uresp.rq_memsize = qhp->wq.rq.memsize;
spin_lock(&ucontext->mmap_lock);
+ if (mm5) {
+ uresp.ma_sync_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ }
uresp.sq_key = ucontext->key;
ucontext->key += PAGE_SIZE;
uresp.rq_key = ucontext->key;
@@ -1498,9 +1530,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
- goto err7;
+ goto err8;
mm1->key = uresp.sq_key;
- mm1->addr = virt_to_phys(qhp->wq.sq.queue);
+ mm1->addr = qhp->wq.sq.phys_addr;
mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
insert_mmap(ucontext, mm1);
mm2->key = uresp.rq_key;
@@ -1515,6 +1547,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
mm4->addr = qhp->wq.rq.udb;
mm4->len = PAGE_SIZE;
insert_mmap(ucontext, mm4);
+ if (mm5) {
+ mm5->key = uresp.ma_sync_key;
+ mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
+ + A_PCIE_MA_SYNC) & PAGE_MASK;
+ mm5->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm5);
+ }
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
@@ -1522,6 +1561,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid);
return &qhp->ibqp;
+err8:
+ kfree(mm5);
err7:
kfree(mm4);
err6:
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 83b23dfa250d..4fb50d58b493 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -311,6 +311,9 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ if (!addr && printk_ratelimit())
+ printk(KERN_WARNING MOD "%s: Out of PBL memory\n",
+ pci_name(rdev->lldi.pdev));
return (u32)addr;
}
@@ -370,6 +373,9 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ if (!addr && printk_ratelimit())
+ printk(KERN_WARNING MOD "%s: Out of RQT memory\n",
+ pci_name(rdev->lldi.pdev));
return (u32)addr;
}
@@ -416,3 +422,59 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
gen_pool_destroy(rdev->rqt_pool);
}
+
+/*
+ * On-Chip QP Memory.
+ */
+#define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
+
+u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ return (u32)addr;
+}
+
+void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
+}
+
+int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
+{
+ unsigned start, chunk, top;
+
+ rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
+ if (!rdev->ocqp_pool)
+ return -ENOMEM;
+
+ start = rdev->lldi.vr->ocq.start;
+ chunk = rdev->lldi.vr->ocq.size;
+ top = start + chunk;
+
+ while (start < top) {
+ chunk = min(top - start + 1, chunk);
+ if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
+ PDBG("%s failed to add OCQP chunk (%x/%x)\n",
+ __func__, start, chunk);
+ if (chunk <= 1024 << MIN_OCQP_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all OCQP chunks (%x/%x)\n",
+ start, top - start);
+ return 0;
+ }
+ chunk >>= 1;
+ } else {
+ PDBG("%s added OCQP chunk (%x/%x)\n",
+ __func__, start, chunk);
+ start += chunk;
+ }
+ }
+ return 0;
+}
+
+void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->ocqp_pool);
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 24f369046ef3..70004425d695 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -52,6 +52,7 @@
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+#define A_PCIE_MA_SYNC 0x30b4
struct t4_status_page {
__be32 rsvd1; /* flit 0 - hw owns */
@@ -65,7 +66,7 @@ struct t4_status_page {
#define T4_EQ_ENTRY_SIZE 64
-#define T4_SQ_NUM_SLOTS 4
+#define T4_SQ_NUM_SLOTS 5
#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
@@ -78,7 +79,7 @@ struct t4_status_page {
sizeof(struct fw_ri_rdma_write_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
- sizeof(struct fw_ri_immd)))
+ sizeof(struct fw_ri_immd)) & ~31UL)
#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
@@ -266,10 +267,36 @@ struct t4_swsqe {
u16 idx;
};
+static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ return pgprot_writecombine(prot);
+#elif defined(CONFIG_PPC64)
+ return __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE) &
+ ~(pgprot_t)_PAGE_GUARDED);
+#else
+ return pgprot_noncached(prot);
+#endif
+}
+
+static inline int t4_ocqp_supported(void)
+{
+#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+enum {
+ T4_SQ_ONCHIP = (1<<0),
+};
+
struct t4_sq {
union t4_wr *queue;
dma_addr_t dma_addr;
DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned long phys_addr;
struct t4_swsqe *sw_sq;
struct t4_swsqe *oldest_read;
u64 udb;
@@ -280,6 +307,7 @@ struct t4_sq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 flags;
};
struct t4_swrqe {
@@ -350,6 +378,11 @@ static inline void t4_rq_consume(struct t4_wq *wq)
wq->rq.cidx = 0;
}
+static inline int t4_sq_onchip(struct t4_sq *sq)
+{
+ return sq->flags & T4_SQ_ONCHIP;
+}
+
static inline int t4_sq_empty(struct t4_wq *wq)
{
return wq->sq.in_use == 0;
@@ -396,30 +429,27 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
static inline int t4_wq_in_error(struct t4_wq *wq)
{
- return wq->sq.queue[wq->sq.size].status.qp_err;
+ return wq->rq.queue[wq->rq.size].status.qp_err;
}
static inline void t4_set_wq_in_error(struct t4_wq *wq)
{
- wq->sq.queue[wq->sq.size].status.qp_err = 1;
wq->rq.queue[wq->rq.size].status.qp_err = 1;
}
static inline void t4_disable_wq_db(struct t4_wq *wq)
{
- wq->sq.queue[wq->sq.size].status.db_off = 1;
wq->rq.queue[wq->rq.size].status.db_off = 1;
}
static inline void t4_enable_wq_db(struct t4_wq *wq)
{
- wq->sq.queue[wq->sq.size].status.db_off = 0;
wq->rq.queue[wq->rq.size].status.db_off = 0;
}
static inline int t4_wq_db_enabled(struct t4_wq *wq)
{
- return !wq->sq.queue[wq->sq.size].status.db_off;
+ return !wq->rq.queue[wq->rq.size].status.db_off;
}
struct t4_cq {
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
index ed6414abde02..e6669d54770e 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -50,7 +50,13 @@ struct c4iw_create_cq_resp {
__u32 qid_mask;
};
+
+enum {
+ C4IW_QPF_ONCHIP = (1<<0)
+};
+
struct c4iw_create_qp_resp {
+ __u64 ma_sync_key;
__u64 sq_key;
__u64 rq_key;
__u64 sq_db_gts_key;
@@ -62,5 +68,6 @@ struct c4iw_create_qp_resp {
__u32 sq_size;
__u32 rq_size;
__u32 qid_mask;
+ __u32 flags;
};
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 53f4cd4fc19a..43cae84005f0 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -171,7 +171,7 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
}
ret = ehca_reg_maxmr(shca, e_maxmr,
- (void *)ehca_map_vaddr((void *)KERNELBASE),
+ (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
mr_access_flags, e_pd,
&e_maxmr->ib.ib_mr.lkey,
&e_maxmr->ib.ib_mr.rkey);
@@ -1636,7 +1636,7 @@ int ehca_reg_internal_maxmr(
/* register internal max-MR on HCA */
size_maxmr = ehca_mr_len;
- iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
+ iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
ib_pbuf.addr = 0;
ib_pbuf.size = size_maxmr;
num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
@@ -2209,7 +2209,7 @@ int ehca_mr_is_maxmr(u64 size,
{
/* a MR is treated as max-MR only if it fits following: */
if ((size == ehca_mr_len) &&
- (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
+ (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
ehca_gen_dbg("this is a max-MR");
return 1;
} else
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index fa3df82681df..4496f2820c92 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
+ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-DIPATH_KERN_TYPE=0
obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index d13e72685dcf..12d5bf76302c 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -57,6 +57,7 @@ static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
goto bail;
}
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_private = data;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 11a236f8d884..4b8f9c49397e 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -30,66 +30,163 @@
* SOFTWARE.
*/
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
#include <linux/slab.h>
+#include <linux/inet.h>
+#include <linux/string.h>
#include "mlx4_ib.h"
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
+ u8 *mac, int *is_mcast, u8 port)
{
- struct mlx4_dev *dev = to_mdev(pd->device)->dev;
- struct mlx4_ib_ah *ah;
+ struct in6_addr in6;
- ah = kmalloc(sizeof *ah, GFP_ATOMIC);
- if (!ah)
- return ERR_PTR(-ENOMEM);
+ *is_mcast = 0;
- memset(&ah->av, 0, sizeof ah->av);
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
+ if (rdma_link_local_addr(&in6))
+ rdma_get_ll_mac(&in6, mac);
+ else if (rdma_is_multicast_addr(&in6)) {
+ rdma_get_mcast_mac(&in6, mac);
+ *is_mcast = 1;
+ } else
+ return -EINVAL;
- ah->av.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.g_slid = ah_attr->src_path_bits;
- ah->av.dlid = cpu_to_be16(ah_attr->dlid);
- if (ah_attr->static_rate) {
- ah->av.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
- while (ah->av.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
- !(1 << ah->av.stat_rate & dev->caps.stat_rate_support))
- --ah->av.stat_rate;
- }
- ah->av.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ return 0;
+}
+
+static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct mlx4_ib_ah *ah)
+{
+ struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+
+ ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ ah->av.ib.g_slid = ah_attr->src_path_bits;
if (ah_attr->ah_flags & IB_AH_GRH) {
- ah->av.g_slid |= 0x80;
- ah->av.gid_index = ah_attr->grh.sgid_index;
- ah->av.hop_limit = ah_attr->grh.hop_limit;
- ah->av.sl_tclass_flowlabel |=
+ ah->av.ib.g_slid |= 0x80;
+ ah->av.ib.gid_index = ah_attr->grh.sgid_index;
+ ah->av.ib.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.ib.sl_tclass_flowlabel |=
cpu_to_be32((ah_attr->grh.traffic_class << 20) |
ah_attr->grh.flow_label);
- memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
+ memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16);
+ }
+
+ ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid);
+ if (ah_attr->static_rate) {
+ ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+ while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
+ --ah->av.ib.stat_rate;
}
+ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
return &ah->ibah;
}
+static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ struct mlx4_ib_ah *ah)
+{
+ struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
+ struct mlx4_dev *dev = ibdev->dev;
+ union ib_gid sgid;
+ u8 mac[6];
+ int err;
+ int is_mcast;
+ u16 vlan_tag;
+
+ err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num);
+ if (err)
+ return ERR_PTR(err);
+
+ memcpy(ah->av.eth.mac, mac, 6);
+ err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid);
+ if (err)
+ return ERR_PTR(err);
+ vlan_tag = rdma_get_vlan_id(&sgid);
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (ah_attr->sl & 7) << 13;
+ ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ ah->av.eth.gid_index = ah_attr->grh.sgid_index;
+ ah->av.eth.vlan = cpu_to_be16(vlan_tag);
+ if (ah_attr->static_rate) {
+ ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+ while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
+ --ah->av.eth.stat_rate;
+ }
+
+ /*
+ * HW requires multicast LID so we just choose one.
+ */
+ if (is_mcast)
+ ah->av.ib.dlid = cpu_to_be16(0xc000);
+
+ memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
+ ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+
+ return &ah->ibah;
+}
+
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ struct mlx4_ib_ah *ah;
+ struct ib_ah *ret;
+
+ ah = kzalloc(sizeof *ah, GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
+ if (!(ah_attr->ah_flags & IB_AH_GRH)) {
+ ret = ERR_PTR(-EINVAL);
+ } else {
+ /*
+ * TBD: need to handle the case when we get
+ * called in an atomic context and there we
+ * might sleep. We don't expect this
+ * currently since we're working with link
+ * local addresses which we can translate
+ * without going to sleep.
+ */
+ ret = create_iboe_ah(pd, ah_attr, ah);
+ }
+
+ if (IS_ERR(ret))
+ kfree(ah);
+
+ return ret;
+ } else
+ return create_ib_ah(pd, ah_attr, ah); /* never fails */
+}
+
int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
{
struct mlx4_ib_ah *ah = to_mah(ibah);
+ enum rdma_link_layer ll;
memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->dlid = be16_to_cpu(ah->av.dlid);
- ah_attr->sl = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
- ah_attr->port_num = be32_to_cpu(ah->av.port_pd) >> 24;
- if (ah->av.stat_rate)
- ah_attr->static_rate = ah->av.stat_rate - MLX4_STAT_RATE_OFFSET;
- ah_attr->src_path_bits = ah->av.g_slid & 0x7F;
+ ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
+ ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+ ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
+ if (ah->av.ib.stat_rate)
+ ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
+ ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F;
if (mlx4_ib_ah_grh_present(ah)) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.traffic_class =
- be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20;
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20;
ah_attr->grh.flow_label =
- be32_to_cpu(ah->av.sl_tclass_flowlabel) & 0xfffff;
- ah_attr->grh.hop_limit = ah->av.hop_limit;
- ah_attr->grh.sgid_index = ah->av.gid_index;
- memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, 16);
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff;
+ ah_attr->grh.hop_limit = ah->av.ib.hop_limit;
+ ah_attr->grh.sgid_index = ah->av.ib.gid_index;
+ memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16);
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f38d5b118927..c9a8dd63b9e2 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -311,19 +311,25 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
struct ib_mad_agent *agent;
int p, q;
int ret;
+ enum rdma_link_layer ll;
- for (p = 0; p < dev->num_ports; ++p)
+ for (p = 0; p < dev->num_ports; ++p) {
+ ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
for (q = 0; q <= 1; ++q) {
- agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
- q ? IB_QPT_GSI : IB_QPT_SMI,
- NULL, 0, send_handler,
- NULL, NULL);
- if (IS_ERR(agent)) {
- ret = PTR_ERR(agent);
- goto err;
- }
- dev->send_agent[p][q] = agent;
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
+ q ? IB_QPT_GSI : IB_QPT_SMI,
+ NULL, 0, send_handler,
+ NULL, NULL);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+ dev->send_agent[p][q] = agent;
+ } else
+ dev->send_agent[p][q] = NULL;
}
+ }
return 0;
@@ -344,8 +350,10 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
for (p = 0; p < dev->num_ports; ++p) {
for (q = 0; q <= 1; ++q) {
agent = dev->send_agent[p][q];
- dev->send_agent[p][q] = NULL;
- ib_unregister_mad_agent(agent);
+ if (agent) {
+ dev->send_agent[p][q] = NULL;
+ ib_unregister_mad_agent(agent);
+ }
}
if (dev->sm_ah[p])
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4e94e360e43b..bf3e20cd0298 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -35,9 +35,14 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
@@ -58,6 +63,15 @@ static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
+struct update_gid_work {
+ struct work_struct work;
+ union ib_gid gids[128];
+ struct mlx4_ib_dev *dev;
+ int port;
+};
+
+static struct workqueue_struct *wq;
+
static void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -66,6 +80,8 @@ static void init_query_mad(struct ib_smp *mad)
mad->method = IB_MGMT_METHOD_GET;
}
+static union ib_gid zgid;
+
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
@@ -135,7 +151,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
props->max_srq_sge = dev->dev->caps.max_srq_sge;
- props->max_fast_reg_page_list_len = PAGE_SIZE / sizeof (u64);
+ props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
@@ -154,28 +170,19 @@ out:
return err;
}
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props)
+static enum rdma_link_layer
+mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
-
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- memset(props, 0, sizeof *props);
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ struct mlx4_dev *dev = to_mdev(device)->dev;
- err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
+ return dev->caps.port_mask & (1 << (port_num - 1)) ?
+ IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+}
+static int ib_link_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props,
+ struct ib_smp *out_mad)
+{
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
@@ -196,6 +203,80 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = out_mad->data[41] >> 4;
+ return 0;
+}
+
+static u8 state_to_phys_state(enum ib_port_state state)
+{
+ return state == IB_PORT_ACTIVE ? 5 : 3;
+}
+
+static int eth_link_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props,
+ struct ib_smp *out_mad)
+{
+ struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
+ struct net_device *ndev;
+ enum ib_mtu tmp;
+
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = 4;
+ props->port_cap_flags = IB_PORT_CM_SUP;
+ props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
+ props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
+ props->pkey_tbl_len = 1;
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
+ props->max_mtu = IB_MTU_2048;
+ props->subnet_timeout = 0;
+ props->max_vl_num = out_mad->data[37] >> 4;
+ props->init_type_reply = 0;
+ props->state = IB_PORT_DOWN;
+ props->phys_state = state_to_phys_state(props->state);
+ props->active_mtu = IB_MTU_256;
+ spin_lock(&iboe->lock);
+ ndev = iboe->netdevs[port - 1];
+ if (!ndev)
+ goto out;
+
+ tmp = iboe_get_mtu(ndev->mtu);
+ props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
+
+ props->state = netif_running(ndev) && netif_oper_up(ndev) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ props->phys_state = state_to_phys_state(props->state);
+
+out:
+ spin_unlock(&iboe->lock);
+ return 0;
+}
+
+static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ memset(props, 0, sizeof *props);
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
+ ib_link_query_port(ibdev, port, props, out_mad) :
+ eth_link_query_port(ibdev, port, props, out_mad);
+
out:
kfree(in_mad);
kfree(out_mad);
@@ -203,8 +284,8 @@ out:
return err;
}
-static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
- union ib_gid *gid)
+static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
@@ -241,6 +322,25 @@ out:
return err;
}
+static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+
+ *gid = dev->iboe.gid_table[port - 1][index];
+
+ return 0;
+}
+
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+ return __mlx4_ib_query_gid(ibdev, port, index, gid);
+ else
+ return iboe_query_gid(ibdev, port, index, gid);
+}
+
static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
@@ -272,14 +372,32 @@ out:
static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
{
+ struct mlx4_cmd_mailbox *mailbox;
+
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
return -EOPNOTSUPP;
- if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
- spin_lock(&to_mdev(ibdev)->sm_lock);
- memcpy(ibdev->node_desc, props->node_desc, 64);
- spin_unlock(&to_mdev(ibdev)->sm_lock);
- }
+ if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
+ return 0;
+
+ spin_lock(&to_mdev(ibdev)->sm_lock);
+ memcpy(ibdev->node_desc, props->node_desc, 64);
+ spin_unlock(&to_mdev(ibdev)->sm_lock);
+
+ /*
+ * If possible, pass node desc to FW, so it can generate
+ * a 144 trap. If cmd fails, just ignore.
+ */
+ mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
+ if (IS_ERR(mailbox))
+ return 0;
+
+ memset(mailbox->buf, 0, 256);
+ memcpy(mailbox->buf, props->node_desc, 64);
+ mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
+ MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+
+ mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
return 0;
}
@@ -289,6 +407,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
{
struct mlx4_cmd_mailbox *mailbox;
int err;
+ u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
if (IS_ERR(mailbox))
@@ -304,7 +423,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
}
- err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+ err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
@@ -447,18 +566,132 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
return 0;
}
+static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
+{
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_ib_gid_entry *ge;
+
+ ge = kzalloc(sizeof *ge, GFP_KERNEL);
+ if (!ge)
+ return -ENOMEM;
+
+ ge->gid = *gid;
+ if (mlx4_ib_add_mc(mdev, mqp, gid)) {
+ ge->port = mqp->port;
+ ge->added = 1;
+ }
+
+ mutex_lock(&mqp->mutex);
+ list_add_tail(&ge->list, &mqp->gid_list);
+ mutex_unlock(&mqp->mutex);
+
+ return 0;
+}
+
+int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ union ib_gid *gid)
+{
+ u8 mac[6];
+ struct net_device *ndev;
+ int ret = 0;
+
+ if (!mqp->port)
+ return 0;
+
+ spin_lock(&mdev->iboe.lock);
+ ndev = mdev->iboe.netdevs[mqp->port - 1];
+ if (ndev)
+ dev_hold(ndev);
+ spin_unlock(&mdev->iboe.lock);
+
+ if (ndev) {
+ rdma_get_mcast_mac((struct in6_addr *)gid, mac);
+ rtnl_lock();
+ dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
+ ret = 1;
+ rtnl_unlock();
+ dev_put(ndev);
+ }
+
+ return ret;
+}
+
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
- &to_mqp(ibqp)->mqp, gid->raw,
- !!(to_mqp(ibqp)->flags &
- MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+ int err;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+ if (err)
+ return err;
+
+ err = add_gid_entry(ibqp, gid);
+ if (err)
+ goto err_add;
+
+ return 0;
+
+err_add:
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
+ return err;
+}
+
+static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
+{
+ struct mlx4_ib_gid_entry *ge;
+ struct mlx4_ib_gid_entry *tmp;
+ struct mlx4_ib_gid_entry *ret = NULL;
+
+ list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+ if (!memcmp(raw, ge->gid.raw, 16)) {
+ ret = ge;
+ break;
+ }
+ }
+
+ return ret;
}
static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
- &to_mqp(ibqp)->mqp, gid->raw);
+ int err;
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ u8 mac[6];
+ struct net_device *ndev;
+ struct mlx4_ib_gid_entry *ge;
+
+ err = mlx4_multicast_detach(mdev->dev,
+ &mqp->mqp, gid->raw);
+ if (err)
+ return err;
+
+ mutex_lock(&mqp->mutex);
+ ge = find_gid_entry(mqp, gid->raw);
+ if (ge) {
+ spin_lock(&mdev->iboe.lock);
+ ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
+ if (ndev)
+ dev_hold(ndev);
+ spin_unlock(&mdev->iboe.lock);
+ rdma_get_mcast_mac((struct in6_addr *)gid, mac);
+ if (ndev) {
+ rtnl_lock();
+ dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
+ rtnl_unlock();
+ dev_put(ndev);
+ }
+ list_del(&ge->list);
+ kfree(ge);
+ } else
+ printk(KERN_WARNING "could not find mgid entry\n");
+
+ mutex_unlock(&mqp->mutex);
+
+ return 0;
}
static int init_node_data(struct mlx4_ib_dev *dev)
@@ -543,15 +776,215 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
+static void update_gids_task(struct work_struct *work)
+{
+ struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
+ struct mlx4_cmd_mailbox *mailbox;
+ union ib_gid *gids;
+ int err;
+ struct mlx4_dev *dev = gw->dev->dev;
+ struct ib_event event;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
+ return;
+ }
+
+ gids = mailbox->buf;
+ memcpy(gids, gw->gids, sizeof gw->gids);
+
+ err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ printk(KERN_WARNING "set port command failed\n");
+ else {
+ memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
+ event.device = &gw->dev->ib_dev;
+ event.element.port_num = gw->port;
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ kfree(gw);
+}
+
+static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
+{
+ struct net_device *ndev = dev->iboe.netdevs[port - 1];
+ struct update_gid_work *work;
+ struct net_device *tmp;
+ int i;
+ u8 *hits;
+ int ret;
+ union ib_gid gid;
+ int free;
+ int found;
+ int need_update = 0;
+ u16 vid;
+
+ work = kzalloc(sizeof *work, GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ hits = kzalloc(128, GFP_ATOMIC);
+ if (!hits) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ read_lock(&dev_base_lock);
+ for_each_netdev(&init_net, tmp) {
+ if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
+ gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ vid = rdma_vlan_dev_vlan_id(tmp);
+ mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
+ found = 0;
+ free = -1;
+ for (i = 0; i < 128; ++i) {
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
+ free = i;
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
+ hits[i] = 1;
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (tmp == ndev &&
+ (memcmp(&dev->iboe.gid_table[port - 1][0],
+ &gid, sizeof gid) ||
+ !memcmp(&dev->iboe.gid_table[port - 1][0],
+ &zgid, sizeof gid))) {
+ dev->iboe.gid_table[port - 1][0] = gid;
+ ++need_update;
+ hits[0] = 1;
+ } else if (free >= 0) {
+ dev->iboe.gid_table[port - 1][free] = gid;
+ hits[free] = 1;
+ ++need_update;
+ }
+ }
+ }
+ }
+ read_unlock(&dev_base_lock);
+
+ for (i = 0; i < 128; ++i)
+ if (!hits[i]) {
+ if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
+ ++need_update;
+ dev->iboe.gid_table[port - 1][i] = zgid;
+ }
+
+ if (need_update) {
+ memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
+ INIT_WORK(&work->work, update_gids_task);
+ work->port = port;
+ work->dev = dev;
+ queue_work(wq, &work->work);
+ } else
+ kfree(work);
+
+ kfree(hits);
+ return 0;
+
+out:
+ kfree(work);
+ return ret;
+}
+
+static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ update_ipv6_gids(dev, port, 0);
+ break;
+
+ case NETDEV_DOWN:
+ update_ipv6_gids(dev, port, 1);
+ dev->iboe.netdevs[port - 1] = NULL;
+ }
+}
+
+static void netdev_added(struct mlx4_ib_dev *dev, int port)
+{
+ update_ipv6_gids(dev, port, 0);
+}
+
+static void netdev_removed(struct mlx4_ib_dev *dev, int port)
+{
+ update_ipv6_gids(dev, port, 1);
+}
+
+static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct mlx4_ib_dev *ibdev;
+ struct net_device *oldnd;
+ struct mlx4_ib_iboe *iboe;
+ int port;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
+ iboe = &ibdev->iboe;
+
+ spin_lock(&iboe->lock);
+ mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+ oldnd = iboe->netdevs[port - 1];
+ iboe->netdevs[port - 1] =
+ mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
+ if (oldnd != iboe->netdevs[port - 1]) {
+ if (iboe->netdevs[port - 1])
+ netdev_added(ibdev, port);
+ else
+ netdev_removed(ibdev, port);
+ }
+ }
+
+ if (dev == iboe->netdevs[0] ||
+ (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
+ handle_en_event(ibdev, 1, event);
+ else if (dev == iboe->netdevs[1]
+ || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
+ handle_en_event(ibdev, 2, event);
+
+ spin_unlock(&iboe->lock);
+
+ return NOTIFY_DONE;
+}
+
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
struct mlx4_ib_dev *ibdev;
int num_ports = 0;
int i;
+ int err;
+ struct mlx4_ib_iboe *iboe;
printk_once(KERN_INFO "%s", mlx4_ib_version);
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
/* No point in registering a device with no ports... */
@@ -564,6 +997,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
return NULL;
}
+ iboe = &ibdev->iboe;
+
if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
goto err_dealloc;
@@ -612,6 +1047,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.query_device = mlx4_ib_query_device;
ibdev->ib_dev.query_port = mlx4_ib_query_port;
+ ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
@@ -656,6 +1092,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
+ spin_lock_init(&iboe->lock);
+
if (init_node_data(ibdev))
goto err_map;
@@ -668,16 +1106,28 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
+ iboe->nb.notifier_call = mlx4_ib_netdev_event;
+ err = register_netdevice_notifier(&iboe->nb);
+ if (err)
+ goto err_reg;
+ }
+
for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
if (device_create_file(&ibdev->ib_dev.dev,
mlx4_class_attributes[i]))
- goto err_reg;
+ goto err_notif;
}
ibdev->ib_active = true;
return ibdev;
+err_notif:
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+ printk(KERN_WARNING "failure unregistering notifier\n");
+ flush_workqueue(wq);
+
err_reg:
ib_unregister_device(&ibdev->ib_dev);
@@ -703,11 +1153,16 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
+ if (ibdev->iboe.nb.notifier_call) {
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+ printk(KERN_WARNING "failure unregistering notifier\n");
+ ibdev->iboe.nb.notifier_call = NULL;
+ }
+ iounmap(ibdev->uar_map);
- for (p = 1; p <= ibdev->num_ports; ++p)
+ mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
- iounmap(ibdev->uar_map);
mlx4_uar_free(dev, &ibdev->priv_uar);
mlx4_pd_free(dev, ibdev->priv_pdn);
ib_dealloc_device(&ibdev->ib_dev);
@@ -747,19 +1202,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
}
static struct mlx4_interface mlx4_ib_interface = {
- .add = mlx4_ib_add,
- .remove = mlx4_ib_remove,
- .event = mlx4_ib_event
+ .add = mlx4_ib_add,
+ .remove = mlx4_ib_remove,
+ .event = mlx4_ib_event,
+ .protocol = MLX4_PROTOCOL_IB
};
static int __init mlx4_ib_init(void)
{
- return mlx4_register_interface(&mlx4_ib_interface);
+ int err;
+
+ wq = create_singlethread_workqueue("mlx4_ib");
+ if (!wq)
+ return -ENOMEM;
+
+ err = mlx4_register_interface(&mlx4_ib_interface);
+ if (err) {
+ destroy_workqueue(wq);
+ return err;
+ }
+
+ return 0;
}
static void __exit mlx4_ib_cleanup(void)
{
mlx4_unregister_interface(&mlx4_ib_interface);
+ destroy_workqueue(wq);
}
module_init(mlx4_ib_init);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 3486d7675e56..2a322f21049f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -112,6 +112,13 @@ enum mlx4_ib_qp_flags {
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
};
+struct mlx4_ib_gid_entry {
+ struct list_head list;
+ union ib_gid gid;
+ int added;
+ u8 port;
+};
+
struct mlx4_ib_qp {
struct ib_qp ibqp;
struct mlx4_qp mqp;
@@ -138,6 +145,8 @@ struct mlx4_ib_qp {
u8 resp_depth;
u8 sq_no_prefetch;
u8 state;
+ int mlx_type;
+ struct list_head gid_list;
};
struct mlx4_ib_srq {
@@ -157,7 +166,14 @@ struct mlx4_ib_srq {
struct mlx4_ib_ah {
struct ib_ah ibah;
- struct mlx4_av av;
+ union mlx4_ext_av av;
+};
+
+struct mlx4_ib_iboe {
+ spinlock_t lock;
+ struct net_device *netdevs[MLX4_MAX_PORTS];
+ struct notifier_block nb;
+ union ib_gid gid_table[MLX4_MAX_PORTS][128];
};
struct mlx4_ib_dev {
@@ -176,6 +192,7 @@ struct mlx4_ib_dev {
struct mutex cap_mask_mutex;
bool ib_active;
+ struct mlx4_ib_iboe iboe;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@@ -314,9 +331,20 @@ int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
+int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
+ u8 *mac, int *is_mcast, u8 port);
+
static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
{
- return !!(ah->av.g_slid & 0x80);
+ u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
+
+ if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
+ return 1;
+
+ return !!(ah->av.ib.g_slid & 0x80);
}
+int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+ union ib_gid *gid);
+
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 1d27b9a8e2d6..dca55b19a6f1 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -226,7 +226,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
struct mlx4_ib_fast_reg_page_list *mfrpl;
int size = page_list_len * sizeof (u64);
- if (size > PAGE_SIZE)
+ if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
return ERR_PTR(-EINVAL);
mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6a60827b2301..9a7794ac34c1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -33,9 +33,11 @@
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
+#include <rdma/ib_addr.h>
#include <linux/mlx4/qp.h>
@@ -48,17 +50,26 @@ enum {
enum {
MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83,
- MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f
+ MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
+ MLX4_IB_LINK_TYPE_IB = 0,
+ MLX4_IB_LINK_TYPE_ETH = 1
};
enum {
/*
- * Largest possible UD header: send with GRH and immediate data.
+ * Largest possible UD header: send with GRH and immediate
+ * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
+ * tag. (LRH would only use 8 bytes, so Ethernet is the
+ * biggest case)
*/
- MLX4_IB_UD_HEADER_SIZE = 72,
+ MLX4_IB_UD_HEADER_SIZE = 82,
MLX4_IB_LSO_HEADER_SPARE = 128,
};
+enum {
+ MLX4_IB_IBOE_ETHERTYPE = 0x8915
+};
+
struct mlx4_ib_sqp {
struct mlx4_ib_qp qp;
int pkey_index;
@@ -462,6 +473,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
+ INIT_LIST_HEAD(&qp->gid_list);
qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -649,6 +661,16 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
}
}
+static void del_gid_entries(struct mlx4_ib_qp *qp)
+{
+ struct mlx4_ib_gid_entry *ge, *tmp;
+
+ list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+ list_del(&ge->list);
+ kfree(ge);
+ }
+}
+
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
int is_user)
{
@@ -695,6 +717,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
if (!qp->ibqp.srq)
mlx4_db_free(dev->dev, &qp->db);
}
+
+ del_gid_entries(qp);
}
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
@@ -852,6 +876,14 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
struct mlx4_qp_path *path, u8 port)
{
+ int err;
+ int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET;
+ u8 mac[6];
+ int is_mcast;
+ u16 vlan_tag;
+ int vidx;
+
path->grh_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
if (ah->static_rate) {
@@ -879,12 +911,49 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
memcpy(path->rgid, ah->grh.dgid.raw, 16);
}
- path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+ if (is_eth) {
+ path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+ ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
+
+ if (!(ah->ah_flags & IB_AH_GRH))
+ return -1;
+
+ err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port);
+ if (err)
+ return err;
+
+ memcpy(path->dmac, mac, 6);
+ path->ackto = MLX4_IB_LINK_TYPE_ETH;
+ /* use index 0 into MAC table for IBoE */
+ path->grh_mylmc &= 0x80;
+
+ vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
+ if (vlan_tag < 0x1000) {
+ if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
+ return -ENOENT;
+
+ path->vlan_index = vidx;
+ path->fl = 1 << 6;
+ }
+ } else
+ path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+ ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
return 0;
}
+static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+ struct mlx4_ib_gid_entry *ge, *tmp;
+
+ list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+ if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
+ ge->added = 1;
+ ge->port = qp->port;
+ }
+ }
+}
+
static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -980,7 +1049,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_TIMEOUT) {
- context->pri_path.ackto = attr->timeout << 3;
+ context->pri_path.ackto |= attr->timeout << 3;
optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
}
@@ -1118,8 +1187,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT)
+ if (attr_mask & IB_QP_PORT) {
qp->port = attr->port_num;
+ update_mcg_macs(dev, qp);
+ }
if (attr_mask & IB_QP_ALT_PATH)
qp->alt_port = attr->alt_port_num;
@@ -1221,40 +1292,59 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ union ib_gid sgid;
u16 pkey;
int send_size;
int header_size;
int spc;
int i;
+ int is_eth;
+ int is_vlan = 0;
+ int is_grh;
+ u16 vlan;
send_size = 0;
for (i = 0; i < wr->num_sge; ++i)
send_size += wr->sg_list[i].length;
- ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
+ is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
+ is_grh = mlx4_ib_ah_grh_present(ah);
+ if (is_eth) {
+ ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index, &sgid);
+ vlan = rdma_get_vlan_id(&sgid);
+ is_vlan = vlan < 0x1000;
+ }
+ ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
+
+ if (!is_eth) {
+ sqp->ud_header.lrh.service_level =
+ be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
+ sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
+ }
- sqp->ud_header.lrh.service_level =
- be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
- sqp->ud_header.lrh.destination_lid = ah->av.dlid;
- sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.g_slid & 0x7f);
- if (mlx4_ib_ah_grh_present(ah)) {
+ if (is_grh) {
sqp->ud_header.grh.traffic_class =
- (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff;
+ (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
sqp->ud_header.grh.flow_label =
- ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
- sqp->ud_header.grh.hop_limit = ah->av.hop_limit;
- ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24,
- ah->av.gid_index, &sqp->ud_header.grh.source_gid);
+ ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
+ sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
+ ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid);
memcpy(sqp->ud_header.grh.destination_gid.raw,
- ah->av.dgid, 16);
+ ah->av.ib.dgid, 16);
}
mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
- (sqp->ud_header.lrh.destination_lid ==
- IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
- (sqp->ud_header.lrh.service_level << 8));
- mlx->rlid = sqp->ud_header.lrh.destination_lid;
+
+ if (!is_eth) {
+ mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
+ (sqp->ud_header.lrh.destination_lid ==
+ IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
+ (sqp->ud_header.lrh.service_level << 8));
+ mlx->rlid = sqp->ud_header.lrh.destination_lid;
+ }
switch (wr->opcode) {
case IB_WR_SEND:
@@ -1270,9 +1360,29 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
return -EINVAL;
}
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
- if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
- sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+ if (is_eth) {
+ u8 *smac;
+
+ memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
+ /* FIXME: cache smac value? */
+ smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+ memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+ if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
+ mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ if (!is_vlan) {
+ sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+ } else {
+ u16 pcp;
+
+ sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+ pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
+ sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
+ }
+ } else {
+ sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
+ sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+ }
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
@@ -1429,11 +1539,14 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
}
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
- struct ib_send_wr *wr)
+ struct ib_send_wr *wr, __be16 *vlan)
{
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+ dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
+ memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
+ *vlan = dseg->vlan;
}
static void set_mlx_icrc_seg(void *dseg)
@@ -1536,6 +1649,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 uninitialized_var(lso_hdr_sz);
__be32 blh;
int i;
+ __be16 vlan = cpu_to_be16(0xffff);
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1639,7 +1753,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_QPT_UD:
- set_datagram_seg(wqe, wr);
+ set_datagram_seg(wqe, wr, &vlan);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
@@ -1717,6 +1831,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
+ if (be16_to_cpu(vlan) < 0x1000) {
+ ctrl->ins_vlan = 1 << 6;
+ ctrl->vlan_tag = vlan;
+ }
+
stamp = ind + qp->sq_spare_wqes;
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
@@ -1866,17 +1985,27 @@ static int to_ib_qp_access_flags(int mlx4_flags)
return ib_flags;
}
-static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
+static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
struct mlx4_qp_path *path)
{
+ struct mlx4_dev *dev = ibdev->dev;
+ int is_eth;
+
memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
return;
+ is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
+ IB_LINK_LAYER_ETHERNET;
+ if (is_eth)
+ ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
+ ((path->sched_queue & 4) << 1);
+ else
+ ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
+
ib_ah_attr->dlid = be16_to_cpu(path->rlid);
- ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
@@ -1929,8 +2058,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
to_ib_qp_access_flags(be32_to_cpu(context.params2));
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path);
- to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path);
+ to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
+ to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d2d172e6289c..a34c9d38e822 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1493,7 +1493,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
int err;
u16 pkey;
- ib_ud_header_init(256, /* assume a MAD */
+ ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
&sqp->ud_header);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6220d9d75b58..25ad0f9944c0 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1424,7 +1424,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
{
int reset = 0; /* whether to send reset in case of err.. */
- int passive_state;
atomic_inc(&cm_resets_recvd);
nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
" refcnt=%d\n", cm_node, cm_node->state,
@@ -1439,7 +1438,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
active_open_err(cm_node, skb, reset);
break;
case NES_CM_STATE_MPAREQ_RCVD:
- passive_state = atomic_add_return(1, &cm_node->passive_state);
+ atomic_inc(&cm_node->passive_state);
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_ESTABLISHED:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 10560c796fd6..3892e2c0e95a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -271,6 +271,7 @@ static int nes_netdev_stop(struct net_device *netdev)
if (netif_msg_ifdown(nesvnic))
printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
+ netif_carrier_off(netdev);
/* Disable network packets */
napi_disable(&nesvnic->napi);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 546fc22405fe..99933e4e48ff 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -476,9 +476,9 @@ static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
}
nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
"ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
- "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl,
+ "pbl.paddr = %llx\n", pnesfrpl, &pnesfrpl->ibfrpl,
pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
- (void *)pnesfrpl->nes_wqe_pbl.paddr);
+ (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr);
return pifrpl;
}
@@ -584,7 +584,9 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
props->lmc = 0;
props->sm_lid = 0;
props->sm_sl = 0;
- if (nesvnic->linkup)
+ if (netif_queue_stopped(netdev))
+ props->state = IB_PORT_DOWN;
+ else if (nesvnic->linkup)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_DOWN;
@@ -3483,13 +3485,13 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
dst_page_list[i] = cpu_to_le64(src_page_list[i]);
- nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, "
- "length: %d, rkey: %0x, pgl_paddr: %p, "
+ nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, "
+ "length: %d, rkey: %0x, pgl_paddr: %llx, "
"page_list_len: %u, wqe_misc: %x\n",
- (void *)ib_wr->wr.fast_reg.iova_start,
+ (unsigned long long) ib_wr->wr.fast_reg.iova_start,
ib_wr->wr.fast_reg.length,
ib_wr->wr.fast_reg.rkey,
- (void *)pnesfrpl->nes_wqe_pbl.paddr,
+ (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr,
ib_wr->wr.fast_reg.page_list_len,
wqe_misc);
break;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 61de0654820e..64c9e7d02d4a 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1406,7 +1406,7 @@ extern struct mutex qib_mutex;
*/
#define qib_early_err(dev, fmt, ...) \
do { \
- dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
+ dev_err(dev, fmt, ##__VA_ARGS__); \
} while (0)
#define qib_dev_err(dd, fmt, ...) \
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index a0e6613e8be6..7e433d75c775 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -58,6 +58,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
goto bail;
}
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = 0;
inode->i_gid = 0;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f1d16d3a01f6..f3b503936043 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1243,6 +1243,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
"work if CONFIG_PCI_MSI is not enabled\n",
ent->device);
+ dd = ERR_PTR(-ENODEV);
#endif
break;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 7fa6e5592630..48b6674cbc49 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -103,16 +103,20 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret)
+ if (ret) {
qib_early_err(&pdev->dev,
"Unable to set DMA consistent mask: %d\n", ret);
+ goto bail;
+ }
pci_set_master(pdev);
ret = pci_enable_pcie_error_reporting(pdev);
- if (ret)
+ if (ret) {
qib_early_err(&pdev->dev,
"Unable to enable pcie error reporting: %d\n",
ret);
+ ret = 0;
+ }
goto done;
bail:
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index a0931119bd78..955fb7157793 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2068,7 +2068,10 @@ send_last:
goto nack_op_err;
if (!ret)
goto rnr_nak;
- goto send_last_imm;
+ wc.ex.imm_data = ohdr->u.rc.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ goto send_last;
case OP(RDMA_READ_REQUEST): {
struct qib_ack_entry *e;
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index b9c8b6346c1b..32ccf3c824ca 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -457,8 +457,10 @@ rdma_first:
}
if (opcode == OP(RDMA_WRITE_ONLY))
goto rdma_last;
- else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
+ wc.ex.imm_data = ohdr->u.rc.imm_data;
goto rdma_last_imm;
+ }
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
@@ -471,8 +473,8 @@ rdma_first:
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-rdma_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
+rdma_last_imm:
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ec6b4fbe25e4..dfa71903d6e4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -223,6 +223,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
u64 mapping[IPOIB_UD_RX_SG];
+ union ib_gid *dgid;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -271,6 +272,16 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_ud_dma_unmap_rx(priv, mapping);
ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
+ /* First byte of dgid signals multicast when 0xff */
+ dgid = &((struct ib_grh *)skb->data)->dgid;
+
+ if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
+ skb->pkt_type = PACKET_HOST;
+ else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -281,9 +292,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
dev->stats.rx_bytes += skb->len;
skb->dev = dev;
- /* XXX get correct PACKET_ type here */
- skb->pkt_type = PACKET_HOST;
-
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b4b22576f12a..9ff7bc73ed95 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1240,6 +1240,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto alloc_mem_failed;
SET_NETDEV_DEV(priv->dev, hca->dma_device);
+ priv->dev->dev_id = port - 1;
if (!ib_query_port(hca, port, &attr))
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
@@ -1362,6 +1363,8 @@ static void ipoib_add_one(struct ib_device *device)
}
for (p = s; p <= e; ++p) {
+ if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
+ continue;
dev = ipoib_add_port("ib%d", device, p);
if (!IS_ERR(dev)) {
priv = netdev_priv(dev);
@@ -1409,8 +1412,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
- ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
- IPOIB_MIN_QUEUE_SIZE));
+ ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
#endif
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7f8f16bad753..cfc1d65c4577 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -291,7 +291,7 @@ static void srp_free_target_ib(struct srp_target_port *target)
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
- for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
+ for (i = 0; i < SRP_SQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->tx_ring[i]);
}
@@ -811,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return len;
}
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head. Lock cannot be dropped between call here and
+ * call to __srp_post_send().
+ *
+ * Note:
+ * An upper limit for the number of allocated information units for each
+ * request type is:
+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
+ * more than Scsi_Host.can_queue requests.
+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
+ * one unanswered SRP request to an initiator.
+ */
+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
+ enum srp_iu_type iu_type)
+{
+ s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
+ struct srp_iu *iu;
+
+ srp_send_completion(target->send_cq, target);
+
+ if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+ return NULL;
+
+ /* Initiator responses to target requests do not consume credits */
+ if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
+ ++target->zero_req_lim;
+ return NULL;
+ }
+
+ iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
+ iu->type = iu_type;
+ return iu;
+}
+
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.
+ */
+static int __srp_post_send(struct srp_target_port *target,
+ struct srp_iu *iu, int len)
+{
+ struct ib_sge list;
+ struct ib_send_wr wr, *bad_wr;
+ int ret = 0;
+
+ list.addr = iu->dma;
+ list.length = len;
+ list.lkey = target->srp_host->srp_dev->mr->lkey;
+
+ wr.next = NULL;
+ wr.wr_id = target->tx_head & SRP_SQ_MASK;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(target->qp, &wr, &bad_wr);
+
+ if (!ret) {
+ ++target->tx_head;
+ if (iu->type != SRP_IU_RSP)
+ --target->req_lim;
+ }
+
+ return ret;
+}
+
static int srp_post_recv(struct srp_target_port *target)
{
unsigned long flags;
@@ -822,7 +891,7 @@ static int srp_post_recv(struct srp_target_port *target)
spin_lock_irqsave(target->scsi_host->host_lock, flags);
- next = target->rx_head & (SRP_RQ_SIZE - 1);
+ next = target->rx_head & SRP_RQ_MASK;
wr.wr_id = next;
iu = target->rx_ring[next];
@@ -896,6 +965,71 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
}
+static int srp_response_common(struct srp_target_port *target, s32 req_delta,
+ void *rsp, int len)
+{
+ struct ib_device *dev;
+ unsigned long flags;
+ struct srp_iu *iu;
+ int err = 1;
+
+ dev = target->srp_host->srp_dev->dev;
+
+ spin_lock_irqsave(target->scsi_host->host_lock, flags);
+ target->req_lim += req_delta;
+
+ iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+ if (!iu) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "no IU available to send response\n");
+ goto out;
+ }
+
+ ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
+ memcpy(iu->buf, rsp, len);
+ ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
+
+ err = __srp_post_send(target, iu, len);
+ if (err)
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "unable to post response: %d\n", err);
+
+out:
+ spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+ return err;
+}
+
+static void srp_process_cred_req(struct srp_target_port *target,
+ struct srp_cred_req *req)
+{
+ struct srp_cred_rsp rsp = {
+ .opcode = SRP_CRED_RSP,
+ .tag = req->tag,
+ };
+ s32 delta = be32_to_cpu(req->req_lim_delta);
+
+ if (srp_response_common(target, delta, &rsp, sizeof rsp))
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "problems processing SRP_CRED_REQ\n");
+}
+
+static void srp_process_aer_req(struct srp_target_port *target,
+ struct srp_aer_req *req)
+{
+ struct srp_aer_rsp rsp = {
+ .opcode = SRP_AER_RSP,
+ .tag = req->tag,
+ };
+ s32 delta = be32_to_cpu(req->req_lim_delta);
+
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
+
+ if (srp_response_common(target, delta, &rsp, sizeof rsp))
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "problems processing SRP_AER_REQ\n");
+}
+
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
struct ib_device *dev;
@@ -923,6 +1057,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
srp_process_rsp(target, iu->buf);
break;
+ case SRP_CRED_REQ:
+ srp_process_cred_req(target, iu->buf);
+ break;
+
+ case SRP_AER_REQ:
+ srp_process_aer_req(target, iu->buf);
+ break;
+
case SRP_T_LOGOUT:
/* XXX Handle target logout */
shost_printk(KERN_WARNING, target->scsi_host,
@@ -981,61 +1123,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
}
}
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head. Lock cannot be dropped between call here and
- * call to __srp_post_send().
- */
-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
- enum srp_request_type req_type)
-{
- s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
-
- srp_send_completion(target->send_cq, target);
-
- if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
- return NULL;
-
- if (target->req_lim < min) {
- ++target->zero_req_lim;
- return NULL;
- }
-
- return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
-}
-
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
- struct srp_iu *iu, int len)
-{
- struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
- int ret = 0;
-
- list.addr = iu->dma;
- list.length = len;
- list.lkey = target->srp_host->srp_dev->mr->lkey;
-
- wr.next = NULL;
- wr.wr_id = target->tx_head & SRP_SQ_SIZE;
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(target->qp, &wr, &bad_wr);
-
- if (!ret) {
- ++target->tx_head;
- --target->req_lim;
- }
-
- return ret;
-}
-
static int srp_queuecommand(struct scsi_cmnd *scmnd,
void (*done)(struct scsi_cmnd *))
{
@@ -1056,7 +1143,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
return 0;
}
- iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
+ iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
goto err;
@@ -1064,7 +1151,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
DMA_TO_DEVICE);
- req = list_entry(target->free_reqs.next, struct srp_request, list);
+ req = list_first_entry(&target->free_reqs, struct srp_request, list);
scmnd->scsi_done = done;
scmnd->result = 0;
@@ -1121,7 +1208,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
goto err;
}
- for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
target->tx_ring[i] = srp_alloc_iu(target->srp_host,
srp_max_iu_len,
GFP_KERNEL, DMA_TO_DEVICE);
@@ -1137,7 +1224,7 @@ err:
target->rx_ring[i] = NULL;
}
- for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ for (i = 0; i < SRP_SQ_SIZE; ++i) {
srp_free_iu(target->srp_host, target->tx_ring[i]);
target->tx_ring[i] = NULL;
}
@@ -1252,8 +1339,13 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
target->req_lim = be32_to_cpu(rsp->req_lim_delta);
- target->scsi_host->can_queue = min(target->req_lim,
- target->scsi_host->can_queue);
+ /*
+ * Reserve credits for task management so we don't
+ * bounce requests back to the SCSI mid-layer.
+ */
+ target->scsi_host->can_queue
+ = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
+ target->scsi_host->can_queue);
} else {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled RSP opcode %#x\n", opcode);
@@ -1350,6 +1442,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_request *req, u8 func)
{
+ struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
@@ -1363,10 +1456,12 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
init_completion(&req->done);
- iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
+ iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
if (!iu)
goto out;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
+ DMA_TO_DEVICE);
tsk_mgmt = iu->buf;
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
@@ -1376,6 +1471,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
tsk_mgmt->tsk_mgmt_func = func;
tsk_mgmt->task_tag = req->index;
+ ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+ DMA_TO_DEVICE);
if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
goto out;
@@ -1626,9 +1723,9 @@ static struct scsi_host_template srp_template = {
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
- .can_queue = SRP_SQ_SIZE,
+ .can_queue = SRP_CMD_SQ_SIZE,
.this_id = -1,
- .cmd_per_lun = SRP_SQ_SIZE,
+ .cmd_per_lun = SRP_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs
};
@@ -1813,7 +1910,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
goto out;
}
- target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
+ target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
break;
case SRP_OPT_IO_CLASS:
@@ -1891,7 +1988,7 @@ static ssize_t srp_create_target(struct device *dev,
INIT_LIST_HEAD(&target->free_reqs);
INIT_LIST_HEAD(&target->req_queue);
- for (i = 0; i < SRP_SQ_SIZE; ++i) {
+ for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
target->req_ring[i].index = i;
list_add_tail(&target->req_ring[i].list, &target->free_reqs);
}
@@ -2159,6 +2256,9 @@ static int __init srp_init_module(void)
{
int ret;
+ BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+
if (srp_sg_tablesize > 255) {
printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
srp_sg_tablesize = 255;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 5a80eac6fdaa..ed0dce9e479f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,7 +59,14 @@ enum {
SRP_RQ_SHIFT = 6,
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
- SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
+ SRP_RQ_MASK = SRP_RQ_SIZE - 1,
+
+ SRP_SQ_SIZE = SRP_RQ_SIZE,
+ SRP_SQ_MASK = SRP_SQ_SIZE - 1,
+ SRP_RSP_SQ_SIZE = 1,
+ SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
+ SRP_TSK_MGMT_SQ_SIZE = 1,
+ SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
@@ -75,9 +82,10 @@ enum srp_target_state {
SRP_TARGET_REMOVED
};
-enum srp_request_type {
- SRP_REQ_NORMAL,
- SRP_REQ_TASK_MGMT,
+enum srp_iu_type {
+ SRP_IU_CMD,
+ SRP_IU_TSK_MGMT,
+ SRP_IU_RSP,
};
struct srp_device {
@@ -144,11 +152,11 @@ struct srp_target_port {
unsigned tx_head;
unsigned tx_tail;
- struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
+ struct srp_iu *tx_ring[SRP_SQ_SIZE];
struct list_head free_reqs;
struct list_head req_queue;
- struct srp_request req_ring[SRP_SQ_SIZE];
+ struct srp_request req_ring[SRP_CMD_SQ_SIZE];
struct work_struct work;
@@ -164,6 +172,7 @@ struct srp_iu {
void *buf;
size_t size;
enum dma_data_direction direction;
+ enum srp_iu_type type;
};
#endif /* IB_SRP_H */
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index af25e1f3efd4..e90db8870b6c 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -563,7 +563,7 @@ reset_inf(struct inf_hw *hw)
mdelay(10);
hw->ipac.isac.adf2 = 0x87;
hw->ipac.hscx[0].slot = 0x1f;
- hw->ipac.hscx[0].slot = 0x23;
+ hw->ipac.hscx[1].slot = 0x23;
break;
case INF_GAZEL_R753:
val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index b0554f80bfb3..ee4dae1382e0 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -164,11 +164,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
char tmp[80];
struct sk_buff *skb = arg;
- p = skb->data;
-
/* Channel Identification */
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_chanID, 0))) {
+ p = findie(skb->data, skb->len, WE0_chanID, 0);
+ if (p) {
if (p[1] != 1) {
l3_1tr6_error(pc, "setup wrong chanID len", skb);
return;
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index 947d4afa25ca..30e6195e19d4 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -482,7 +482,7 @@ static s32 pm121_correct(s32 new_setpoint,
new_min += correction->offset;
new_min = (new_min >> 16) + min;
- return max(new_setpoint, max(new_min, 0));
+ return max3(new_setpoint, new_min, 0);
}
static s32 pm121_connect(unsigned int control_id, s32 setpoint)
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 0b61792a2780..2129cdb115dc 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
* Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion.
*/
- INIT_WORK_ON_STACK(&req.work, do_metadata);
+ INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq);
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 490c57cc4cfe..aa4163eb7a83 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -79,6 +79,18 @@ config IR_SONY_DECODER
Enable this option if you have an infrared remote control which
uses the Sony protocol, and you need software decoding support.
+config IR_RC5_SZ_DECODER
+ tristate "Enable IR raw decoder for the RC-5 (streamzap) protocol"
+ depends on IR_CORE
+ select BITREVERSE
+ default y
+
+ ---help---
+ Enable this option if you have IR with RC-5 (streamzap) protocol,
+ and if the IR is decoded in software. (The Streamzap PC Remote
+ uses an IR protocol that is almost standard RC-5, but not quite,
+ as it uses an additional bit).
+
config IR_LIRC_CODEC
tristate "Enable IR to LIRC bridge"
depends on IR_CORE
@@ -89,6 +101,20 @@ config IR_LIRC_CODEC
Enable this option to pass raw IR to and from userspace via
the LIRC interface.
+config IR_ENE
+ tristate "ENE eHome Receiver/Transceiver (pnp id: ENE0100/ENE02xxx)"
+ depends on PNP
+ depends on IR_CORE
+ ---help---
+ Say Y here to enable support for integrated infrared receiver
+ /transceiver made by ENE.
+
+ You can see if you have it by looking at lspnp output.
+ Output should include ENE0100 ENE0200 or something similar.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ene_ir.
+
config IR_IMON
tristate "SoundGraph iMON Receiver and Display"
depends on USB_ARCH_HAS_HCD
@@ -113,19 +139,18 @@ config IR_MCEUSB
To compile this driver as a module, choose M here: the
module will be called mceusb.
-config IR_ENE
- tristate "ENE eHome Receiver/Transciever (pnp id: ENE0100/ENE02xxx)"
+config IR_NUVOTON
+ tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
depends on PNP
depends on IR_CORE
---help---
Say Y here to enable support for integrated infrared receiver
- /transciever made by ENE.
-
- You can see if you have it by looking at lspnp output.
- Output should include ENE0100 ENE0200 or something similiar.
+ /transciever made by Nuvoton (formerly Winbond). This chip is
+ found in the ASRock ION 330HT, as well as assorted Intel
+ DP55-series motherboards (and of course, possibly others).
To compile this driver as a module, choose M here: the
- module will be called ene_ir.
+ module will be called nuvoton-cir.
config IR_STREAMZAP
tristate "Streamzap PC Remote IR Receiver"
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
index 53676838fe97..f9574adab82a 100644
--- a/drivers/media/IR/Makefile
+++ b/drivers/media/IR/Makefile
@@ -11,10 +11,12 @@ obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
+obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
# stand-alone IR receivers/transmitters
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
+obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
obj-$(CONFIG_IR_ENE) += ene_ir.o
obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
diff --git a/drivers/media/IR/ene_ir.c b/drivers/media/IR/ene_ir.c
index 5447750f5e38..7637babcd262 100644
--- a/drivers/media/IR/ene_ir.c
+++ b/drivers/media/IR/ene_ir.c
@@ -1,5 +1,5 @@
/*
- * driver for ENE KB3926 B/C/D CIR (pnp id: ENE0XXX)
+ * driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX)
*
* Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
*
@@ -17,6 +17,17 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
+ *
+ * Special thanks to:
+ * Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore
+ * bringing to life support for transmission & learning mode.
+ *
+ * Charlie Andrews <charliethepilot@googlemail.com> for lots of help in
+ * bringing up the support of new firmware buffer that is popular
+ * on latest notebooks
+ *
+ * ENE for partial device documentation
+ *
*/
#include <linux/kernel.h>
@@ -31,51 +42,59 @@
#include <media/ir-common.h>
#include "ene_ir.h"
-
-static int sample_period = -1;
-static int enable_idle = 1;
-static int input = 1;
+static int sample_period;
+static bool learning_mode_force;
static int debug;
-static int txsim;
+static bool txsim;
-static int ene_irq_status(struct ene_device *dev);
+static void ene_set_reg_addr(struct ene_device *dev, u16 reg)
+{
+ outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
+ outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+}
/* read a hardware register */
-static u8 ene_hw_read_reg(struct ene_device *dev, u16 reg)
+static u8 ene_read_reg(struct ene_device *dev, u16 reg)
{
u8 retval;
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ ene_set_reg_addr(dev, reg);
retval = inb(dev->hw_io + ENE_IO);
-
- ene_dbg_verbose("reg %04x == %02x", reg, retval);
+ dbg_regs("reg %04x == %02x", reg, retval);
return retval;
}
/* write a hardware register */
-static void ene_hw_write_reg(struct ene_device *dev, u16 reg, u8 value)
+static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value)
{
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ dbg_regs("reg %04x <- %02x", reg, value);
+ ene_set_reg_addr(dev, reg);
outb(value, dev->hw_io + ENE_IO);
-
- ene_dbg_verbose("reg %04x <- %02x", reg, value);
}
-/* change specific bits in hardware register */
-static void ene_hw_write_reg_mask(struct ene_device *dev,
- u16 reg, u8 value, u8 mask)
+/* Set bits in hardware register */
+static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
{
- u8 regvalue;
-
- outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
- outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
+ dbg_regs("reg %04x |= %02x", reg, mask);
+ ene_set_reg_addr(dev, reg);
+ outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO);
+}
- regvalue = inb(dev->hw_io + ENE_IO) & ~mask;
- regvalue |= (value & mask);
- outb(regvalue, dev->hw_io + ENE_IO);
+/* Clear bits in hardware register */
+static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
+{
+ dbg_regs("reg %04x &= ~%02x ", reg, mask);
+ ene_set_reg_addr(dev, reg);
+ outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO);
+}
- ene_dbg_verbose("reg %04x <- %02x (mask=%02x)", reg, value, mask);
+/* A helper to set/clear a bit in register according to boolean variable */
+static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask,
+ bool set)
+{
+ if (set)
+ ene_set_reg_mask(dev, reg, mask);
+ else
+ ene_clear_reg_mask(dev, reg, mask);
}
/* detect hardware features */
@@ -83,194 +102,378 @@ static int ene_hw_detect(struct ene_device *dev)
{
u8 chip_major, chip_minor;
u8 hw_revision, old_ver;
- u8 tmp;
- u8 fw_capabilities;
- int pll_freq;
+ u8 fw_reg2, fw_reg1;
- tmp = ene_hw_read_reg(dev, ENE_HW_UNK);
- ene_hw_write_reg(dev, ENE_HW_UNK, tmp & ~ENE_HW_UNK_CLR);
+ ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
+ chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR);
+ chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR);
+ ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
- chip_major = ene_hw_read_reg(dev, ENE_HW_VER_MAJOR);
- chip_minor = ene_hw_read_reg(dev, ENE_HW_VER_MINOR);
+ hw_revision = ene_read_reg(dev, ENE_ECHV);
+ old_ver = ene_read_reg(dev, ENE_HW_VER_OLD);
- ene_hw_write_reg(dev, ENE_HW_UNK, tmp);
- hw_revision = ene_hw_read_reg(dev, ENE_HW_VERSION);
- old_ver = ene_hw_read_reg(dev, ENE_HW_VER_OLD);
+ dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) +
+ (ene_read_reg(dev, ENE_PLLFRL) >> 4);
- pll_freq = (ene_hw_read_reg(dev, ENE_PLLFRH) << 4) +
- (ene_hw_read_reg(dev, ENE_PLLFRL) >> 4);
-
- if (pll_freq != 1000)
- dev->rx_period_adjust = 4;
- else
- dev->rx_period_adjust = 2;
-
-
- ene_printk(KERN_NOTICE, "PLL freq = %d\n", pll_freq);
+ if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD)
+ dev->rx_period_adjust =
+ dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4;
if (hw_revision == 0xFF) {
-
- ene_printk(KERN_WARNING, "device seems to be disabled\n");
- ene_printk(KERN_WARNING,
- "send a mail to lirc-list@lists.sourceforge.net\n");
- ene_printk(KERN_WARNING, "please attach output of acpidump\n");
+ ene_warn("device seems to be disabled");
+ ene_warn("send a mail to lirc-list@lists.sourceforge.net");
+ ene_warn("please attach output of acpidump and dmidecode");
return -ENODEV;
}
+ ene_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x",
+ chip_major, chip_minor, old_ver, hw_revision);
+
+ ene_notice("PLL freq = %d", dev->pll_freq);
+
if (chip_major == 0x33) {
- ene_printk(KERN_WARNING, "chips 0x33xx aren't supported\n");
+ ene_warn("chips 0x33xx aren't supported");
return -ENODEV;
}
if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_C;
+ ene_notice("KB3926C detected");
} else if (old_ver == 0x24 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_B;
- ene_printk(KERN_NOTICE, "KB3926B detected\n");
+ ene_notice("KB3926B detected");
} else {
dev->hw_revision = ENE_HW_D;
- ene_printk(KERN_WARNING,
- "unknown ENE chip detected, assuming KB3926D\n");
- ene_printk(KERN_WARNING,
- "driver support might be not complete");
-
+ ene_notice("KB3926D or higher detected");
}
- ene_printk(KERN_DEBUG,
- "chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n",
- chip_major, chip_minor, old_ver, hw_revision);
-
/* detect features hardware supports */
if (dev->hw_revision < ENE_HW_C)
return 0;
- fw_capabilities = ene_hw_read_reg(dev, ENE_FW2);
- ene_dbg("Firmware capabilities: %02x", fw_capabilities);
+ fw_reg1 = ene_read_reg(dev, ENE_FW1);
+ fw_reg2 = ene_read_reg(dev, ENE_FW2);
+
+ ene_notice("Firmware regs: %02x %02x", fw_reg1, fw_reg2);
- dev->hw_gpio40_learning = fw_capabilities & ENE_FW2_GP40_AS_LEARN;
- dev->hw_learning_and_tx_capable = fw_capabilities & ENE_FW2_LEARNING;
+ dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A);
+ dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING);
+ dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF);
- dev->hw_fan_as_normal_input = dev->hw_learning_and_tx_capable &&
- (fw_capabilities & ENE_FW2_FAN_AS_NRML_IN);
+ if (dev->hw_learning_and_tx_capable)
+ dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT);
- ene_printk(KERN_NOTICE, "hardware features:\n");
- ene_printk(KERN_NOTICE,
- "learning and transmit %s, gpio40_learn %s, fan_in %s\n",
- dev->hw_learning_and_tx_capable ? "on" : "off",
- dev->hw_gpio40_learning ? "on" : "off",
- dev->hw_fan_as_normal_input ? "on" : "off");
+ ene_notice("Hardware features:");
if (dev->hw_learning_and_tx_capable) {
- ene_printk(KERN_WARNING,
- "Device supports transmitting, but that support is\n");
- ene_printk(KERN_WARNING,
- "lightly tested. Please test it and mail\n");
- ene_printk(KERN_WARNING,
- "lirc-list@lists.sourceforge.net\n");
+ ene_notice("* Supports transmitting & learning mode");
+ ene_notice(" This feature is rare and therefore,");
+ ene_notice(" you are welcome to test it,");
+ ene_notice(" and/or contact the author via:");
+ ene_notice(" lirc-list@lists.sourceforge.net");
+ ene_notice(" or maximlevitsky@gmail.com");
+
+ ene_notice("* Uses GPIO %s for IR raw input",
+ dev->hw_use_gpio_0a ? "40" : "0A");
+
+ if (dev->hw_fan_input)
+ ene_notice("* Uses unused fan feedback input as source"
+ " of demodulated IR data");
}
+
+ if (!dev->hw_fan_input)
+ ene_notice("* Uses GPIO %s for IR demodulated input",
+ dev->hw_use_gpio_0a ? "0A" : "40");
+
+ if (dev->hw_extra_buffer)
+ ene_notice("* Uses new style input buffer");
return 0;
}
-/* this enables/disables IR input via gpio40*/
-static void ene_enable_gpio40_receive(struct ene_device *dev, int enable)
+/* Read properities of hw sample buffer */
+static void ene_rx_setup_hw_buffer(struct ene_device *dev)
{
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, enable ?
- 0 : ENE_CIR_CONF2_GPIO40DIS,
- ENE_CIR_CONF2_GPIO40DIS);
+ u16 tmp;
+
+ ene_rx_read_hw_pointer(dev);
+ dev->r_pointer = dev->w_pointer;
+
+ if (!dev->hw_extra_buffer) {
+ dev->buffer_len = ENE_FW_PACKET_SIZE * 2;
+ return;
+ }
+
+ tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER);
+ tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8;
+ dev->extra_buf1_address = tmp;
+
+ dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2);
+
+ tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3);
+ tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8;
+ dev->extra_buf2_address = tmp;
+
+ dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5);
+
+ dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8;
+
+ ene_notice("Hardware uses 2 extended buffers:");
+ ene_notice(" 0x%04x - len : %d", dev->extra_buf1_address,
+ dev->extra_buf1_len);
+ ene_notice(" 0x%04x - len : %d", dev->extra_buf2_address,
+ dev->extra_buf2_len);
+
+ ene_notice("Total buffer len = %d", dev->buffer_len);
+
+ if (dev->buffer_len > 64 || dev->buffer_len < 16)
+ goto error;
+
+ if (dev->extra_buf1_address > 0xFBFC ||
+ dev->extra_buf1_address < 0xEC00)
+ goto error;
+
+ if (dev->extra_buf2_address > 0xFBFC ||
+ dev->extra_buf2_address < 0xEC00)
+ goto error;
+
+ if (dev->r_pointer > dev->buffer_len)
+ goto error;
+
+ ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
+ return;
+error:
+ ene_warn("Error validating extra buffers, device probably won't work");
+ dev->hw_extra_buffer = false;
+ ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
-/* this enables/disables IR via standard input */
-static void ene_enable_normal_receive(struct ene_device *dev, int enable)
+
+/* Restore the pointers to extra buffers - to make module reload work*/
+static void ene_rx_restore_hw_buffer(struct ene_device *dev)
{
- ene_hw_write_reg(dev, ENE_CIR_CONF1, enable ? ENE_CIR_CONF1_RX_ON : 0);
+ if (!dev->hw_extra_buffer)
+ return;
+
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0,
+ dev->extra_buf1_address & 0xFF);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1,
+ dev->extra_buf1_address >> 8);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len);
+
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3,
+ dev->extra_buf2_address & 0xFF);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4,
+ dev->extra_buf2_address >> 8);
+ ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5,
+ dev->extra_buf2_len);
+ ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
-/* this enables/disables IR input via unused fan tachtometer input */
-static void ene_enable_fan_receive(struct ene_device *dev, int enable)
+/* Read hardware write pointer */
+static void ene_rx_read_hw_pointer(struct ene_device *dev)
{
- if (!enable)
- ene_hw_write_reg(dev, ENE_FAN_AS_IN1, 0);
- else {
- ene_hw_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
- ene_hw_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
- }
- dev->rx_fan_input_inuse = enable;
+ if (dev->hw_extra_buffer)
+ dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER);
+ else
+ dev->w_pointer = ene_read_reg(dev, ENE_FW2)
+ & ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE;
+
+ dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x",
+ dev->w_pointer, dev->r_pointer);
}
+/* Gets address of next sample from HW ring buffer */
+static int ene_rx_get_sample_reg(struct ene_device *dev)
+{
+ int r_pointer;
+
+ if (dev->r_pointer == dev->w_pointer) {
+ dbg_verbose("RB: hit end, try update w_pointer");
+ ene_rx_read_hw_pointer(dev);
+ }
+
+ if (dev->r_pointer == dev->w_pointer) {
+ dbg_verbose("RB: end of data at %d", dev->r_pointer);
+ return 0;
+ }
+
+ dbg_verbose("RB: reading at offset %d", dev->r_pointer);
+ r_pointer = dev->r_pointer;
+
+ dev->r_pointer++;
+ if (dev->r_pointer == dev->buffer_len)
+ dev->r_pointer = 0;
+
+ dbg_verbose("RB: next read will be from offset %d", dev->r_pointer);
+
+ if (r_pointer < 8) {
+ dbg_verbose("RB: read at main buffer at %d", r_pointer);
+ return ENE_FW_SAMPLE_BUFFER + r_pointer;
+ }
+
+ r_pointer -= 8;
+
+ if (r_pointer < dev->extra_buf1_len) {
+ dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer);
+ return dev->extra_buf1_address + r_pointer;
+ }
+
+ r_pointer -= dev->extra_buf1_len;
+
+ if (r_pointer < dev->extra_buf2_len) {
+ dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer);
+ return dev->extra_buf2_address + r_pointer;
+ }
+
+ dbg("attempt to read beyong ring bufer end");
+ return 0;
+}
/* Sense current received carrier */
-static int ene_rx_sense_carrier(struct ene_device *dev)
+void ene_rx_sense_carrier(struct ene_device *dev)
{
- int period = ene_hw_read_reg(dev, ENE_RX_CARRIER);
- int carrier;
- ene_dbg("RX: hardware carrier period = %02x", period);
+ DEFINE_IR_RAW_EVENT(ev);
- if (!(period & ENE_RX_CARRIER_VALID))
- return 0;
+ int carrier, duty_cycle;
+ int period = ene_read_reg(dev, ENE_CIRCAR_PRD);
+ int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD);
+
+ if (!(period & ENE_CIRCAR_PRD_VALID))
+ return;
- period &= ~ENE_RX_CARRIER_VALID;
+ period &= ~ENE_CIRCAR_PRD_VALID;
if (!period)
- return 0;
+ return;
+
+ dbg("RX: hardware carrier period = %02x", period);
+ dbg("RX: hardware carrier pulse period = %02x", hperiod);
carrier = 2000000 / period;
- ene_dbg("RX: sensed carrier = %d Hz", carrier);
- return carrier;
+ duty_cycle = (hperiod * 100) / period;
+ dbg("RX: sensed carrier = %d Hz, duty cycle %d%%",
+ carrier, duty_cycle);
+ if (dev->carrier_detect_enabled) {
+ ev.carrier_report = true;
+ ev.carrier = carrier;
+ ev.duty_cycle = duty_cycle;
+ ir_raw_event_store(dev->idev, &ev);
+ }
}
-/* determine which input to use*/
-static void ene_rx_set_inputs(struct ene_device *dev)
+/* this enables/disables the CIR RX engine */
+static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable)
{
- int learning_mode = dev->learning_enabled;
-
- ene_dbg("RX: setup receiver, learning mode = %d", learning_mode);
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG,
+ ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable);
+}
- ene_enable_normal_receive(dev, 1);
+/* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/
+static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a)
+{
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a);
+}
- /* old hardware doesn't support learning mode for sure */
- if (dev->hw_revision <= ENE_HW_B)
+/*
+ * this enables alternative input via fan tachometer sensor and bypasses
+ * the hw CIR engine
+ */
+static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable)
+{
+ if (!dev->hw_fan_input)
return;
- /* receiver not learning capable, still set gpio40 correctly */
- if (!dev->hw_learning_and_tx_capable) {
- ene_enable_gpio40_receive(dev, !dev->hw_gpio40_learning);
- return;
+ if (!enable)
+ ene_write_reg(dev, ENE_FAN_AS_IN1, 0);
+ else {
+ ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
+ ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
}
+}
+
+/* setup the receiver for RX*/
+static void ene_rx_setup(struct ene_device *dev)
+{
+ bool learning_mode = dev->learning_mode_enabled ||
+ dev->carrier_detect_enabled;
+ int sample_period_adjust = 0;
+
+ dbg("RX: setup receiver, learning mode = %d", learning_mode);
+
+
+ /* This selects RLC input and clears CFG2 settings */
+ ene_write_reg(dev, ENE_CIRCFG2, 0x00);
+
+ /* set sample period*/
+ if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD)
+ sample_period_adjust =
+ dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2;
+
+ ene_write_reg(dev, ENE_CIRRLC_CFG,
+ (sample_period + sample_period_adjust) |
+ ENE_CIRRLC_CFG_OVERFLOW);
+ /* revB doesn't support inputs */
+ if (dev->hw_revision < ENE_HW_C)
+ goto select_timeout;
- /* enable learning mode */
if (learning_mode) {
- ene_enable_gpio40_receive(dev, dev->hw_gpio40_learning);
- /* fan input is not used for learning */
- if (dev->hw_fan_as_normal_input)
- ene_enable_fan_receive(dev, 0);
+ WARN_ON(!dev->hw_learning_and_tx_capable);
- /* disable learning mode */
- } else {
- if (dev->hw_fan_as_normal_input) {
- ene_enable_fan_receive(dev, 1);
- ene_enable_normal_receive(dev, 0);
- } else
- ene_enable_gpio40_receive(dev,
- !dev->hw_gpio40_learning);
- }
+ /* Enable the opposite of the normal input
+ That means that if GPIO40 is normally used, use GPIO0A
+ and vice versa.
+ This input will carry non demodulated
+ signal, and we will tell the hw to demodulate it itself */
+ ene_rx_select_input(dev, !dev->hw_use_gpio_0a);
+ dev->rx_fan_input_inuse = false;
- /* set few additional settings for this mode */
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF1, learning_mode ?
- ENE_CIR_CONF1_LEARN1 : 0, ENE_CIR_CONF1_LEARN1);
+ /* Enable carrier demodulation */
+ ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
- ene_hw_write_reg_mask(dev, ENE_CIR_CONF2, learning_mode ?
- ENE_CIR_CONF2_LEARN2 : 0, ENE_CIR_CONF2_LEARN2);
+ /* Enable carrier detection */
+ ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63);
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT,
+ dev->carrier_detect_enabled || debug);
+ } else {
+ if (dev->hw_fan_input)
+ dev->rx_fan_input_inuse = true;
+ else
+ ene_rx_select_input(dev, dev->hw_use_gpio_0a);
+
+ /* Disable carrier detection & demodulation */
+ ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
+ ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT);
+ }
+select_timeout:
if (dev->rx_fan_input_inuse) {
- dev->props->rx_resolution = ENE_SAMPLE_PERIOD_FAN * 1000;
+ dev->props->rx_resolution = MS_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
- dev->props->timeout =
- ENE_FAN_VALUE_MASK * ENE_SAMPLE_PERIOD_FAN * 1000;
+ /* Fan input doesn't support timeouts, it just ends the
+ input with a maximum sample */
+ dev->props->min_timeout = dev->props->max_timeout =
+ MS_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
+ ENE_FW_SAMPLE_PERIOD_FAN);
} else {
- dev->props->rx_resolution = sample_period * 1000;
- dev->props->timeout = ENE_MAXGAP * 1000;
+ dev->props->rx_resolution = MS_TO_NS(sample_period);
+
+ /* Theoreticly timeout is unlimited, but we cap it
+ * because it was seen that on one device, it
+ * would stop sending spaces after around 250 msec.
+ * Besides, this is close to 2^32 anyway and timeout is u32.
+ */
+ dev->props->min_timeout = MS_TO_NS(127 * sample_period);
+ dev->props->max_timeout = MS_TO_NS(200000);
}
+
+ if (dev->hw_learning_and_tx_capable)
+ dev->props->tx_resolution = MS_TO_NS(sample_period);
+
+ if (dev->props->timeout > dev->props->max_timeout)
+ dev->props->timeout = dev->props->max_timeout;
+ if (dev->props->timeout < dev->props->min_timeout)
+ dev->props->timeout = dev->props->min_timeout;
}
/* Enable the device for receive */
@@ -278,145 +481,157 @@ static void ene_rx_enable(struct ene_device *dev)
{
u8 reg_value;
+ /* Enable system interrupt */
if (dev->hw_revision < ENE_HW_C) {
- ene_hw_write_reg(dev, ENEB_IRQ, dev->irq << 1);
- ene_hw_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
+ ene_write_reg(dev, ENEB_IRQ, dev->irq << 1);
+ ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
} else {
- reg_value = ene_hw_read_reg(dev, ENEC_IRQ) & 0xF0;
- reg_value |= ENEC_IRQ_UNK_EN;
- reg_value &= ~ENEC_IRQ_STATUS;
- reg_value |= (dev->irq & ENEC_IRQ_MASK);
- ene_hw_write_reg(dev, ENEC_IRQ, reg_value);
- ene_hw_write_reg(dev, ENE_TX_UNK1, 0x63);
+ reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0;
+ reg_value |= ENE_IRQ_UNK_EN;
+ reg_value &= ~ENE_IRQ_STATUS;
+ reg_value |= (dev->irq & ENE_IRQ_MASK);
+ ene_write_reg(dev, ENE_IRQ, reg_value);
}
- ene_hw_write_reg(dev, ENE_CIR_CONF2, 0x00);
- ene_rx_set_inputs(dev);
-
- /* set sampling period */
- ene_hw_write_reg(dev, ENE_CIR_SAMPLE_PERIOD, sample_period);
+ /* Enable inputs */
+ ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse);
+ ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse);
/* ack any pending irqs - just in case */
ene_irq_status(dev);
/* enable firmware bits */
- ene_hw_write_reg_mask(dev, ENE_FW1,
- ENE_FW1_ENABLE | ENE_FW1_IRQ,
- ENE_FW1_ENABLE | ENE_FW1_IRQ);
+ ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
/* enter idle mode */
- ir_raw_event_set_idle(dev->idev, 1);
- ir_raw_event_reset(dev->idev);
-
+ ir_raw_event_set_idle(dev->idev, true);
+ dev->rx_enabled = true;
}
/* Disable the device receiver */
static void ene_rx_disable(struct ene_device *dev)
{
/* disable inputs */
- ene_enable_normal_receive(dev, 0);
-
- if (dev->hw_fan_as_normal_input)
- ene_enable_fan_receive(dev, 0);
+ ene_rx_enable_cir_engine(dev, false);
+ ene_rx_enable_fan_input(dev, false);
/* disable hardware IRQ and firmware flag */
- ene_hw_write_reg_mask(dev, ENE_FW1, 0, ENE_FW1_ENABLE | ENE_FW1_IRQ);
+ ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
- ir_raw_event_set_idle(dev->idev, 1);
- ir_raw_event_reset(dev->idev);
+ ir_raw_event_set_idle(dev->idev, true);
+ dev->rx_enabled = false;
}
+/* This resets the receiver. Usefull to stop stream of spaces at end of
+ * transmission
+ */
+static void ene_rx_reset(struct ene_device *dev)
+{
+ ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
+ ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
+}
-/* prepare transmission */
-static void ene_tx_prepare(struct ene_device *dev)
+/* Set up the TX carrier frequency and duty cycle */
+static void ene_tx_set_carrier(struct ene_device *dev)
{
- u8 conf1;
+ u8 tx_puls_width;
+ unsigned long flags;
- conf1 = ene_hw_read_reg(dev, ENE_CIR_CONF1);
- dev->saved_conf1 = conf1;
+ spin_lock_irqsave(&dev->hw_lock, flags);
- if (dev->hw_revision == ENE_HW_C)
- conf1 &= ~ENE_CIR_CONF1_TX_CLEAR;
+ ene_set_clear_reg_mask(dev, ENE_CIRCFG,
+ ENE_CIRCFG_TX_CARR, dev->tx_period > 0);
- /* Enable TX engine */
- conf1 |= ENE_CIR_CONF1_TX_ON;
+ if (!dev->tx_period)
+ goto unlock;
- /* Set carrier */
- if (dev->tx_period) {
+ BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0);
- /* NOTE: duty cycle handling is just a guess, it might
- not be aviable. Default values were tested */
- int tx_period_in500ns = dev->tx_period * 2;
+ tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle);
- int tx_pulse_width_in_500ns =
- tx_period_in500ns / (100 / dev->tx_duty_cycle);
+ if (!tx_puls_width)
+ tx_puls_width = 1;
- if (!tx_pulse_width_in_500ns)
- tx_pulse_width_in_500ns = 1;
+ dbg("TX: pulse distance = %d * 500 ns", dev->tx_period);
+ dbg("TX: pulse width = %d * 500 ns", tx_puls_width);
- ene_dbg("TX: pulse distance = %d * 500 ns", tx_period_in500ns);
- ene_dbg("TX: pulse width = %d * 500 ns",
- tx_pulse_width_in_500ns);
+ ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL);
+ ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width);
+unlock:
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+}
- ene_hw_write_reg(dev, ENE_TX_PERIOD, ENE_TX_PERIOD_UNKBIT |
- tx_period_in500ns);
+/* Enable/disable transmitters */
+static void ene_tx_set_transmitters(struct ene_device *dev)
+{
+ unsigned long flags;
- ene_hw_write_reg(dev, ENE_TX_PERIOD_PULSE,
- tx_pulse_width_in_500ns);
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41,
+ !!(dev->transmitter_mask & 0x01));
+ ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D,
+ !!(dev->transmitter_mask & 0x02));
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
+}
- conf1 |= ENE_CIR_CONF1_TX_CARR;
- } else
- conf1 &= ~ENE_CIR_CONF1_TX_CARR;
+/* prepare transmission */
+static void ene_tx_enable(struct ene_device *dev)
+{
+ u8 conf1 = ene_read_reg(dev, ENE_CIRCFG);
+ u8 fwreg2 = ene_read_reg(dev, ENE_FW2);
+
+ dev->saved_conf1 = conf1;
+
+ /* Show information about currently connected transmitter jacks */
+ if (fwreg2 & ENE_FW2_EMMITER1_CONN)
+ dbg("TX: Transmitter #1 is connected");
+
+ if (fwreg2 & ENE_FW2_EMMITER2_CONN)
+ dbg("TX: Transmitter #2 is connected");
- ene_hw_write_reg(dev, ENE_CIR_CONF1, conf1);
+ if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN)))
+ ene_warn("TX: transmitter cable isn't connected!");
+ /* disable receive on revc */
+ if (dev->hw_revision == ENE_HW_C)
+ conf1 &= ~ENE_CIRCFG_RX_EN;
+
+ /* Enable TX engine */
+ conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ;
+ ene_write_reg(dev, ENE_CIRCFG, conf1);
}
/* end transmission */
-static void ene_tx_complete(struct ene_device *dev)
+static void ene_tx_disable(struct ene_device *dev)
{
- ene_hw_write_reg(dev, ENE_CIR_CONF1, dev->saved_conf1);
+ ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1);
dev->tx_buffer = NULL;
}
-/* set transmit mask */
-static void ene_tx_hw_set_transmiter_mask(struct ene_device *dev)
-{
- u8 txport1 = ene_hw_read_reg(dev, ENE_TX_PORT1) & ~ENE_TX_PORT1_EN;
- u8 txport2 = ene_hw_read_reg(dev, ENE_TX_PORT2) & ~ENE_TX_PORT2_EN;
-
- if (dev->transmitter_mask & 0x01)
- txport1 |= ENE_TX_PORT1_EN;
-
- if (dev->transmitter_mask & 0x02)
- txport2 |= ENE_TX_PORT2_EN;
-
- ene_hw_write_reg(dev, ENE_TX_PORT1, txport1);
- ene_hw_write_reg(dev, ENE_TX_PORT2, txport2);
-}
/* TX one sample - must be called with dev->hw_lock*/
static void ene_tx_sample(struct ene_device *dev)
{
u8 raw_tx;
u32 sample;
+ bool pulse = dev->tx_sample_pulse;
if (!dev->tx_buffer) {
- ene_dbg("TX: attempt to transmit NULL buffer");
+ ene_warn("TX: BUG: attempt to transmit NULL buffer");
return;
}
/* Grab next TX sample */
if (!dev->tx_sample) {
-again:
- if (dev->tx_pos == dev->tx_len + 1) {
+
+ if (dev->tx_pos == dev->tx_len) {
if (!dev->tx_done) {
- ene_dbg("TX: no more data to send");
- dev->tx_done = 1;
+ dbg("TX: no more data to send");
+ dev->tx_done = true;
goto exit;
} else {
- ene_dbg("TX: last sample sent by hardware");
- ene_tx_complete(dev);
+ dbg("TX: last sample sent by hardware");
+ ene_tx_disable(dev);
complete(&dev->tx_complete);
return;
}
@@ -425,23 +640,23 @@ again:
sample = dev->tx_buffer[dev->tx_pos++];
dev->tx_sample_pulse = !dev->tx_sample_pulse;
- ene_dbg("TX: sample %8d (%s)", sample, dev->tx_sample_pulse ?
- "pulse" : "space");
+ dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period);
- dev->tx_sample = DIV_ROUND_CLOSEST(sample, ENE_TX_SMPL_PERIOD);
-
- /* guard against too short samples */
if (!dev->tx_sample)
- goto again;
+ dev->tx_sample = 1;
}
- raw_tx = min(dev->tx_sample , (unsigned int)ENE_TX_SMLP_MASK);
+ raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK);
dev->tx_sample -= raw_tx;
- if (dev->tx_sample_pulse)
- raw_tx |= ENE_TX_PULSE_MASK;
+ dbg("TX: sample %8d (%s)", raw_tx * sample_period,
+ pulse ? "pulse" : "space");
+ if (pulse)
+ raw_tx |= ENE_CIRRLC_OUT_PULSE;
+
+ ene_write_reg(dev,
+ dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx);
- ene_hw_write_reg(dev, ENE_TX_INPUT1 + dev->tx_reg, raw_tx);
dev->tx_reg = !dev->tx_reg;
exit:
/* simulate TX done interrupt */
@@ -466,76 +681,59 @@ static int ene_irq_status(struct ene_device *dev)
{
u8 irq_status;
u8 fw_flags1, fw_flags2;
- int cur_rx_pointer;
int retval = 0;
- fw_flags2 = ene_hw_read_reg(dev, ENE_FW2);
- cur_rx_pointer = !!(fw_flags2 & ENE_FW2_BUF_HIGH);
+ fw_flags2 = ene_read_reg(dev, ENE_FW2);
if (dev->hw_revision < ENE_HW_C) {
- irq_status = ene_hw_read_reg(dev, ENEB_IRQ_STATUS);
+ irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS);
if (!(irq_status & ENEB_IRQ_STATUS_IR))
return 0;
- ene_hw_write_reg(dev, ENEB_IRQ_STATUS,
- irq_status & ~ENEB_IRQ_STATUS_IR);
- dev->rx_pointer = cur_rx_pointer;
+ ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR);
return ENE_IRQ_RX;
}
- irq_status = ene_hw_read_reg(dev, ENEC_IRQ);
-
- if (!(irq_status & ENEC_IRQ_STATUS))
+ irq_status = ene_read_reg(dev, ENE_IRQ);
+ if (!(irq_status & ENE_IRQ_STATUS))
return 0;
/* original driver does that twice - a workaround ? */
- ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
- ene_hw_write_reg(dev, ENEC_IRQ, irq_status & ~ENEC_IRQ_STATUS);
+ ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
+ ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
- /* clear unknown flag in F8F9 */
- if (fw_flags2 & ENE_FW2_IRQ_CLR)
- ene_hw_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_IRQ_CLR);
+ /* check RX interrupt */
+ if (fw_flags2 & ENE_FW2_RXIRQ) {
+ retval |= ENE_IRQ_RX;
+ ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ);
+ }
- /* check if this is a TX interrupt */
- fw_flags1 = ene_hw_read_reg(dev, ENE_FW1);
+ /* check TX interrupt */
+ fw_flags1 = ene_read_reg(dev, ENE_FW1);
if (fw_flags1 & ENE_FW1_TXIRQ) {
- ene_hw_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
+ ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
retval |= ENE_IRQ_TX;
}
- /* Check if this is RX interrupt */
- if (dev->rx_pointer != cur_rx_pointer) {
- retval |= ENE_IRQ_RX;
- dev->rx_pointer = cur_rx_pointer;
-
- } else if (!(retval & ENE_IRQ_TX)) {
- ene_dbg("RX: interrupt without change in RX pointer(%d)",
- dev->rx_pointer);
- retval |= ENE_IRQ_RX;
- }
-
- if ((retval & ENE_IRQ_RX) && (retval & ENE_IRQ_TX))
- ene_dbg("both RX and TX interrupt at same time");
-
return retval;
}
/* interrupt handler */
static irqreturn_t ene_isr(int irq, void *data)
{
- u16 hw_value;
- int i, hw_sample;
- int pulse;
- int irq_status;
+ u16 hw_value, reg;
+ int hw_sample, irq_status;
+ bool pulse;
unsigned long flags;
- int carrier = 0;
irqreturn_t retval = IRQ_NONE;
struct ene_device *dev = (struct ene_device *)data;
- struct ir_raw_event ev;
-
+ DEFINE_IR_RAW_EVENT(ev);
spin_lock_irqsave(&dev->hw_lock, flags);
+
+ dbg_verbose("ISR called");
+ ene_rx_read_hw_pointer(dev);
irq_status = ene_irq_status(dev);
if (!irq_status)
@@ -544,9 +742,9 @@ static irqreturn_t ene_isr(int irq, void *data)
retval = IRQ_HANDLED;
if (irq_status & ENE_IRQ_TX) {
-
+ dbg_verbose("TX interrupt");
if (!dev->hw_learning_and_tx_capable) {
- ene_dbg("TX interrupt on unsupported device!");
+ dbg("TX interrupt on unsupported device!");
goto unlock;
}
ene_tx_sample(dev);
@@ -555,48 +753,57 @@ static irqreturn_t ene_isr(int irq, void *data)
if (!(irq_status & ENE_IRQ_RX))
goto unlock;
+ dbg_verbose("RX interrupt");
- if (dev->carrier_detect_enabled || debug)
- carrier = ene_rx_sense_carrier(dev);
-#if 0
- /* TODO */
- if (dev->carrier_detect_enabled && carrier)
- ir_raw_event_report_frequency(dev->idev, carrier);
-#endif
+ if (dev->hw_learning_and_tx_capable)
+ ene_rx_sense_carrier(dev);
+
+ /* On hardware that don't support extra buffer we need to trust
+ the interrupt and not track the read pointer */
+ if (!dev->hw_extra_buffer)
+ dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0;
+
+ while (1) {
+
+ reg = ene_rx_get_sample_reg(dev);
+
+ dbg_verbose("next sample to read at: %04x", reg);
+ if (!reg)
+ break;
- for (i = 0; i < ENE_SAMPLES_SIZE; i++) {
- hw_value = ene_hw_read_reg(dev,
- ENE_SAMPLE_BUFFER + dev->rx_pointer * 4 + i);
+ hw_value = ene_read_reg(dev, reg);
if (dev->rx_fan_input_inuse) {
+
+ int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER;
+
/* read high part of the sample */
- hw_value |= ene_hw_read_reg(dev,
- ENE_SAMPLE_BUFFER_FAN +
- dev->rx_pointer * 4 + i) << 8;
- pulse = hw_value & ENE_FAN_SMPL_PULS_MSK;
+ hw_value |= ene_read_reg(dev, reg + offset) << 8;
+ pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS;
/* clear space bit, and other unused bits */
- hw_value &= ENE_FAN_VALUE_MASK;
- hw_sample = hw_value * ENE_SAMPLE_PERIOD_FAN;
+ hw_value &= ENE_FW_SMPL_BUF_FAN_MSK;
+ hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN;
} else {
- pulse = !(hw_value & ENE_SAMPLE_SPC_MASK);
- hw_value &= ENE_SAMPLE_VALUE_MASK;
+ pulse = !(hw_value & ENE_FW_SAMPLE_SPACE);
+ hw_value &= ~ENE_FW_SAMPLE_SPACE;
hw_sample = hw_value * sample_period;
if (dev->rx_period_adjust) {
- hw_sample *= (100 - dev->rx_period_adjust);
- hw_sample /= 100;
+ hw_sample *= 100;
+ hw_sample /= (100 + dev->rx_period_adjust);
}
}
- /* no more data */
- if (!(hw_value))
- break;
- ene_dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
+ if (!dev->hw_extra_buffer && !hw_sample) {
+ dev->r_pointer = dev->w_pointer;
+ continue;
+ }
+ dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
- ev.duration = hw_sample * 1000;
+ ev.duration = MS_TO_NS(hw_sample);
ev.pulse = pulse;
ir_raw_event_store_with_filter(dev->idev, &ev);
}
@@ -608,19 +815,26 @@ unlock:
}
/* Initialize default settings */
-static void ene_setup_settings(struct ene_device *dev)
+static void ene_setup_default_settings(struct ene_device *dev)
{
dev->tx_period = 32;
- dev->tx_duty_cycle = 25; /*%*/
- dev->transmitter_mask = 3;
+ dev->tx_duty_cycle = 50; /*%*/
+ dev->transmitter_mask = 0x03;
+ dev->learning_mode_enabled = learning_mode_force;
- /* Force learning mode if (input == 2), otherwise
- let user set it with LIRC_SET_REC_CARRIER */
- dev->learning_enabled =
- (input == 2 && dev->hw_learning_and_tx_capable);
+ /* Set reasonable default timeout */
+ dev->props->timeout = MS_TO_NS(150000);
+}
- dev->rx_pointer = -1;
+/* Upload all hardware settings at once. Used at load and resume time */
+static void ene_setup_hw_settings(struct ene_device *dev)
+{
+ if (dev->hw_learning_and_tx_capable) {
+ ene_tx_set_carrier(dev);
+ ene_tx_set_transmitters(dev);
+ }
+ ene_rx_setup(dev);
}
/* outside interface: called on first open*/
@@ -630,8 +844,6 @@ static int ene_open(void *data)
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
- dev->in_use = 1;
- ene_setup_settings(dev);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
@@ -645,7 +857,6 @@ static void ene_close(void *data)
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
- dev->in_use = 0;
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
@@ -653,19 +864,17 @@ static void ene_close(void *data)
static int ene_set_tx_mask(void *data, u32 tx_mask)
{
struct ene_device *dev = (struct ene_device *)data;
- unsigned long flags;
- ene_dbg("TX: attempt to set transmitter mask %02x", tx_mask);
+ dbg("TX: attempt to set transmitter mask %02x", tx_mask);
/* invalid txmask */
- if (!tx_mask || tx_mask & ~0x3) {
- ene_dbg("TX: invalid mask");
+ if (!tx_mask || tx_mask & ~0x03) {
+ dbg("TX: invalid mask");
/* return count of transmitters */
return 2;
}
- spin_lock_irqsave(&dev->hw_lock, flags);
dev->transmitter_mask = tx_mask;
- spin_unlock_irqrestore(&dev->hw_lock, flags);
+ ene_tx_set_transmitters(dev);
return 0;
}
@@ -673,66 +882,76 @@ static int ene_set_tx_mask(void *data, u32 tx_mask)
static int ene_set_tx_carrier(void *data, u32 carrier)
{
struct ene_device *dev = (struct ene_device *)data;
- unsigned long flags;
- u32 period = 1000000 / carrier; /* (1 / freq) (* # usec in 1 sec) */
-
- ene_dbg("TX: attempt to set tx carrier to %d kHz", carrier);
+ u32 period = 2000000 / carrier;
- if (period && (period > ENE_TX_PERIOD_MAX ||
- period < ENE_TX_PERIOD_MIN)) {
+ dbg("TX: attempt to set tx carrier to %d kHz", carrier);
- ene_dbg("TX: out of range %d-%d carrier, "
- "falling back to 32 kHz",
- 1000 / ENE_TX_PERIOD_MIN,
- 1000 / ENE_TX_PERIOD_MAX);
+ if (period && (period > ENE_CIRMOD_PRD_MAX ||
+ period < ENE_CIRMOD_PRD_MIN)) {
- period = 32; /* this is just a coincidence!!! */
+ dbg("TX: out of range %d-%d kHz carrier",
+ 2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX);
+ return -1;
}
- ene_dbg("TX: set carrier to %d kHz", carrier);
- spin_lock_irqsave(&dev->hw_lock, flags);
dev->tx_period = period;
- spin_unlock_irqrestore(&dev->hw_lock, flags);
+ ene_tx_set_carrier(dev);
return 0;
}
+/*outside interface : set tx duty cycle */
+static int ene_set_tx_duty_cycle(void *data, u32 duty_cycle)
+{
+ struct ene_device *dev = (struct ene_device *)data;
+ dbg("TX: setting duty cycle to %d%%", duty_cycle);
+ dev->tx_duty_cycle = duty_cycle;
+ ene_tx_set_carrier(dev);
+ return 0;
+}
/* outside interface: enable learning mode */
static int ene_set_learning_mode(void *data, int enable)
{
struct ene_device *dev = (struct ene_device *)data;
unsigned long flags;
- if (enable == dev->learning_enabled)
+ if (enable == dev->learning_mode_enabled)
return 0;
spin_lock_irqsave(&dev->hw_lock, flags);
- dev->learning_enabled = enable;
- ene_rx_set_inputs(dev);
+ dev->learning_mode_enabled = enable;
+ ene_rx_disable(dev);
+ ene_rx_setup(dev);
+ ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
-/* outside interface: set rec carrier */
-static int ene_set_rec_carrier(void *data, u32 min, u32 max)
+static int ene_set_carrier_report(void *data, int enable)
{
struct ene_device *dev = (struct ene_device *)data;
- ene_set_learning_mode(dev,
- max > ENE_NORMAL_RX_HI || min < ENE_NORMAL_RX_LOW);
+ unsigned long flags;
+
+ if (enable == dev->carrier_detect_enabled)
+ return 0;
+
+ spin_lock_irqsave(&dev->hw_lock, flags);
+ dev->carrier_detect_enabled = enable;
+ ene_rx_disable(dev);
+ ene_rx_setup(dev);
+ ene_rx_enable(dev);
+ spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
/* outside interface: enable or disable idle mode */
-static void ene_rx_set_idle(void *data, int idle)
+static void ene_set_idle(void *data, bool idle)
{
- struct ene_device *dev = (struct ene_device *)data;
- ene_dbg("%sabling idle mode", idle ? "en" : "dis");
-
- ene_hw_write_reg_mask(dev, ENE_CIR_SAMPLE_PERIOD,
- (enable_idle && idle) ? 0 : ENE_CIR_SAMPLE_OVERFLOW,
- ENE_CIR_SAMPLE_OVERFLOW);
+ if (idle) {
+ ene_rx_reset((struct ene_device *)data);
+ dbg("RX: end of data");
+ }
}
-
/* outside interface: transmit */
static int ene_transmit(void *data, int *buf, u32 n)
{
@@ -747,12 +966,11 @@ static int ene_transmit(void *data, int *buf, u32 n)
dev->tx_sample = 0;
dev->tx_sample_pulse = 0;
- ene_dbg("TX: %d samples", dev->tx_len);
+ dbg("TX: %d samples", dev->tx_len);
spin_lock_irqsave(&dev->hw_lock, flags);
- ene_tx_hw_set_transmiter_mask(dev);
- ene_tx_prepare(dev);
+ ene_tx_enable(dev);
/* Transmit first two samples */
ene_tx_sample(dev);
@@ -761,16 +979,15 @@ static int ene_transmit(void *data, int *buf, u32 n)
spin_unlock_irqrestore(&dev->hw_lock, flags);
if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) {
- ene_dbg("TX: timeout");
+ dbg("TX: timeout");
spin_lock_irqsave(&dev->hw_lock, flags);
- ene_tx_complete(dev);
+ ene_tx_disable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
} else
- ene_dbg("TX: done");
+ dbg("TX: done");
return n;
}
-
/* probe entry */
static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
{
@@ -785,121 +1002,103 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
if (!input_dev || !ir_props || !dev)
- goto error;
+ goto error1;
/* validate resources */
error = -ENODEV;
if (!pnp_port_valid(pnp_dev, 0) ||
- pnp_port_len(pnp_dev, 0) < ENE_MAX_IO)
+ pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
goto error;
if (!pnp_irq_valid(pnp_dev, 0))
goto error;
- dev->hw_io = pnp_port_start(pnp_dev, 0);
- dev->irq = pnp_irq(pnp_dev, 0);
spin_lock_init(&dev->hw_lock);
/* claim the resources */
error = -EBUSY;
- if (!request_region(dev->hw_io, ENE_MAX_IO, ENE_DRIVER_NAME))
+ dev->hw_io = pnp_port_start(pnp_dev, 0);
+ if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+ dev->hw_io = -1;
+ dev->irq = -1;
goto error;
+ }
+ dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
- IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev))
+ IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
+ dev->irq = -1;
goto error;
+ }
pnp_set_drvdata(pnp_dev, dev);
dev->pnp_dev = pnp_dev;
+ /* don't allow too short/long sample periods */
+ if (sample_period < 5 || sample_period > 0x7F)
+ sample_period = ENE_DEFAULT_SAMPLE_PERIOD;
+
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
goto error;
- ene_setup_settings(dev);
-
if (!dev->hw_learning_and_tx_capable && txsim) {
- dev->hw_learning_and_tx_capable = 1;
+ dev->hw_learning_and_tx_capable = true;
setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
(long unsigned int)dev);
- ene_printk(KERN_WARNING,
- "Simulation of TX activated\n");
+ ene_warn("Simulation of TX activated");
}
+ if (!dev->hw_learning_and_tx_capable)
+ learning_mode_force = false;
+
ir_props->driver_type = RC_DRIVER_IR_RAW;
ir_props->allowed_protos = IR_TYPE_ALL;
ir_props->priv = dev;
ir_props->open = ene_open;
ir_props->close = ene_close;
- ir_props->min_timeout = ENE_MINGAP * 1000;
- ir_props->max_timeout = ENE_MAXGAP * 1000;
- ir_props->timeout = ENE_MAXGAP * 1000;
-
- if (dev->hw_revision == ENE_HW_B)
- ir_props->s_idle = ene_rx_set_idle;
-
+ ir_props->s_idle = ene_set_idle;
dev->props = ir_props;
dev->idev = input_dev;
- /* don't allow too short/long sample periods */
- if (sample_period < 5 || sample_period > 0x7F)
- sample_period = -1;
-
- /* choose default sample period */
- if (sample_period == -1) {
-
- sample_period = 50;
-
- /* on revB, hardware idle mode eats first sample
- if we set too low sample period */
- if (dev->hw_revision == ENE_HW_B && enable_idle)
- sample_period = 75;
- }
-
- ir_props->rx_resolution = sample_period * 1000;
-
if (dev->hw_learning_and_tx_capable) {
-
ir_props->s_learning_mode = ene_set_learning_mode;
-
- if (input == 0)
- ir_props->s_rx_carrier_range = ene_set_rec_carrier;
-
init_completion(&dev->tx_complete);
ir_props->tx_ir = ene_transmit;
ir_props->s_tx_mask = ene_set_tx_mask;
ir_props->s_tx_carrier = ene_set_tx_carrier;
- ir_props->tx_resolution = ENE_TX_SMPL_PERIOD * 1000;
- /* ir_props->s_carrier_report = ene_set_carrier_report; */
+ ir_props->s_tx_duty_cycle = ene_set_tx_duty_cycle;
+ ir_props->s_carrier_report = ene_set_carrier_report;
}
+ ene_rx_setup_hw_buffer(dev);
+ ene_setup_default_settings(dev);
+ ene_setup_hw_settings(dev);
- device_set_wakeup_capable(&pnp_dev->dev, 1);
- device_set_wakeup_enable(&pnp_dev->dev, 1);
+ device_set_wakeup_capable(&pnp_dev->dev, true);
+ device_set_wakeup_enable(&pnp_dev->dev, true);
if (dev->hw_learning_and_tx_capable)
input_dev->name = "ENE eHome Infrared Remote Transceiver";
else
input_dev->name = "ENE eHome Infrared Remote Receiver";
-
error = -ENODEV;
if (ir_input_register(input_dev, RC_MAP_RC6_MCE, ir_props,
ENE_DRIVER_NAME))
goto error;
-
- ene_printk(KERN_NOTICE, "driver has been succesfully loaded\n");
+ ene_notice("driver has been succesfully loaded");
return 0;
error:
- if (dev->irq)
+ if (dev && dev->irq >= 0)
free_irq(dev->irq, dev);
- if (dev->hw_io)
- release_region(dev->hw_io, ENE_MAX_IO);
-
+ if (dev && dev->hw_io >= 0)
+ release_region(dev->hw_io, ENE_IO_SIZE);
+error1:
input_free_device(input_dev);
kfree(ir_props);
kfree(dev);
@@ -914,10 +1113,11 @@ static void ene_remove(struct pnp_dev *pnp_dev)
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
+ ene_rx_restore_hw_buffer(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
free_irq(dev->irq, dev);
- release_region(dev->hw_io, ENE_MAX_IO);
+ release_region(dev->hw_io, ENE_IO_SIZE);
ir_input_unregister(dev->idev);
kfree(dev->props);
kfree(dev);
@@ -927,28 +1127,29 @@ static void ene_remove(struct pnp_dev *pnp_dev)
static void ene_enable_wake(struct ene_device *dev, int enable)
{
enable = enable && device_may_wakeup(&dev->pnp_dev->dev);
-
- ene_dbg("wake on IR %s", enable ? "enabled" : "disabled");
-
- ene_hw_write_reg_mask(dev, ENE_FW1, enable ?
- ENE_FW1_WAKE : 0, ENE_FW1_WAKE);
+ dbg("wake on IR %s", enable ? "enabled" : "disabled");
+ ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable);
}
#ifdef CONFIG_PM
static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- ene_enable_wake(dev, 1);
+ ene_enable_wake(dev, true);
+
+ /* TODO: add support for wake pattern */
return 0;
}
static int ene_resume(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- if (dev->in_use)
+ ene_setup_hw_settings(dev);
+
+ if (dev->rx_enabled)
ene_rx_enable(dev);
- ene_enable_wake(dev, 0);
+ ene_enable_wake(dev, false);
return 0;
}
#endif
@@ -956,7 +1157,7 @@ static int ene_resume(struct pnp_dev *pnp_dev)
static void ene_shutdown(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- ene_enable_wake(dev, 1);
+ ene_enable_wake(dev, true);
}
static const struct pnp_device_id ene_ids[] = {
@@ -994,18 +1195,11 @@ static void ene_exit(void)
module_param(sample_period, int, S_IRUGO);
MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
-module_param(enable_idle, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_idle,
- "Enables turning off signal sampling after long inactivity time; "
- "if disabled might help detecting input signal (default: enabled)"
- " (KB3926B only)");
-
-module_param(input, bool, S_IRUGO);
-MODULE_PARM_DESC(input, "select which input to use "
- "0 - auto, 1 - standard, 2 - wideband(KB3926C+)");
+module_param(learning_mode_force, bool, S_IRUGO);
+MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default");
module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debug (debug=2 verbose debug output)");
+MODULE_PARM_DESC(debug, "Debug level");
module_param(txsim, bool, S_IRUGO);
MODULE_PARM_DESC(txsim,
@@ -1013,8 +1207,8 @@ MODULE_PARM_DESC(txsim,
MODULE_DEVICE_TABLE(pnp, ene_ids);
MODULE_DESCRIPTION
- ("Infrared input driver for KB3926B/KB3926C/KB3926D "
- "(aka ENE0100/ENE0200/ENE0201) CIR port");
+ ("Infrared input driver for KB3926B/C/D/E/F "
+ "(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
MODULE_AUTHOR("Maxim Levitsky");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/IR/ene_ir.h b/drivers/media/IR/ene_ir.h
index 54c76af0d033..f5870667a433 100644
--- a/drivers/media/IR/ene_ir.h
+++ b/drivers/media/IR/ene_ir.h
@@ -1,5 +1,5 @@
/*
- * driver for ENE KB3926 B/C/D CIR (also known as ENE0XXX)
+ * driver for ENE KB3926 B/C/D/E/F CIR (also known as ENE0XXX)
*
* Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
*
@@ -26,43 +26,50 @@
#define ENE_ADDR_HI 1 /* hi byte of register address */
#define ENE_ADDR_LO 2 /* low byte of register address */
#define ENE_IO 3 /* read/write window */
-#define ENE_MAX_IO 4
-
-/* 8 bytes of samples, divided in 2 halfs*/
-#define ENE_SAMPLE_BUFFER 0xF8F0 /* regular sample buffer */
-#define ENE_SAMPLE_SPC_MASK 0x80 /* sample is space */
-#define ENE_SAMPLE_VALUE_MASK 0x7F
-#define ENE_SAMPLE_OVERFLOW 0x7F
-#define ENE_SAMPLES_SIZE 4
-
-/* fan input sample buffer */
-#define ENE_SAMPLE_BUFFER_FAN 0xF8FB /* this buffer holds high byte of */
- /* each sample of normal buffer */
-#define ENE_FAN_SMPL_PULS_MSK 0x8000 /* this bit of combined sample */
- /* if set, says that sample is pulse */
-#define ENE_FAN_VALUE_MASK 0x0FFF /* mask for valid bits of the value */
-
-/* first firmware register */
-#define ENE_FW1 0xF8F8
+#define ENE_IO_SIZE 4
+
+/* 8 bytes of samples, divided in 2 packets*/
+#define ENE_FW_SAMPLE_BUFFER 0xF8F0 /* sample buffer */
+#define ENE_FW_SAMPLE_SPACE 0x80 /* sample is space */
+#define ENE_FW_PACKET_SIZE 4
+
+/* first firmware flag register */
+#define ENE_FW1 0xF8F8 /* flagr */
#define ENE_FW1_ENABLE 0x01 /* enable fw processing */
#define ENE_FW1_TXIRQ 0x02 /* TX interrupt pending */
+#define ENE_FW1_HAS_EXTRA_BUF 0x04 /* fw uses extra buffer*/
+#define ENE_FW1_EXTRA_BUF_HND 0x08 /* extra buffer handshake bit*/
+#define ENE_FW1_LED_ON 0x10 /* turn on a led */
+
+#define ENE_FW1_WPATTERN 0x20 /* enable wake pattern */
#define ENE_FW1_WAKE 0x40 /* enable wake from S3 */
#define ENE_FW1_IRQ 0x80 /* enable interrupt */
-/* second firmware register */
-#define ENE_FW2 0xF8F9
-#define ENE_FW2_BUF_HIGH 0x01 /* which half of the buffer to read */
-#define ENE_FW2_IRQ_CLR 0x04 /* clear this on IRQ */
-#define ENE_FW2_GP40_AS_LEARN 0x08 /* normal input is used as */
- /* learning input */
-#define ENE_FW2_FAN_AS_NRML_IN 0x40 /* fan is used as normal input */
+/* second firmware flag register */
+#define ENE_FW2 0xF8F9 /* flagw */
+#define ENE_FW2_BUF_WPTR 0x01 /* which half of the buffer to read */
+#define ENE_FW2_RXIRQ 0x04 /* RX IRQ pending*/
+#define ENE_FW2_GP0A 0x08 /* Use GPIO0A for demodulated input */
+#define ENE_FW2_EMMITER1_CONN 0x10 /* TX emmiter 1 connected */
+#define ENE_FW2_EMMITER2_CONN 0x20 /* TX emmiter 2 connected */
+
+#define ENE_FW2_FAN_INPUT 0x40 /* fan input used for demodulated data*/
#define ENE_FW2_LEARNING 0x80 /* hardware supports learning and TX */
+/* firmware RX pointer for new style buffer */
+#define ENE_FW_RX_POINTER 0xF8FA
+
+/* high parts of samples for fan input (8 samples)*/
+#define ENE_FW_SMPL_BUF_FAN 0xF8FB
+#define ENE_FW_SMPL_BUF_FAN_PLS 0x8000 /* combined sample is pulse */
+#define ENE_FW_SMPL_BUF_FAN_MSK 0x0FFF /* combined sample maximum value */
+#define ENE_FW_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */
+
/* transmitter ports */
-#define ENE_TX_PORT2 0xFC01 /* this enables one or both */
-#define ENE_TX_PORT2_EN 0x20 /* TX ports */
-#define ENE_TX_PORT1 0xFC08
-#define ENE_TX_PORT1_EN 0x02
+#define ENE_GPIOFS1 0xFC01
+#define ENE_GPIOFS1_GPIO0D 0x20 /* enable tx output on GPIO0D */
+#define ENE_GPIOFS8 0xFC08
+#define ENE_GPIOFS8_GPIO41 0x02 /* enable tx output on GPIO40 */
/* IRQ registers block (for revision B) */
#define ENEB_IRQ 0xFD09 /* IRQ number */
@@ -70,97 +77,99 @@
#define ENEB_IRQ_STATUS 0xFD80 /* irq status */
#define ENEB_IRQ_STATUS_IR 0x20 /* IR irq */
-/* fan as input settings - only if learning capable */
+/* fan as input settings */
#define ENE_FAN_AS_IN1 0xFE30 /* fan init reg 1 */
#define ENE_FAN_AS_IN1_EN 0xCD
#define ENE_FAN_AS_IN2 0xFE31 /* fan init reg 2 */
#define ENE_FAN_AS_IN2_EN 0x03
-#define ENE_SAMPLE_PERIOD_FAN 61 /* fan input has fixed sample period */
/* IRQ registers block (for revision C,D) */
-#define ENEC_IRQ 0xFE9B /* new irq settings register */
-#define ENEC_IRQ_MASK 0x0F /* irq number mask */
-#define ENEC_IRQ_UNK_EN 0x10 /* always enabled */
-#define ENEC_IRQ_STATUS 0x20 /* irq status and ACK */
-
-/* CIR block settings */
-#define ENE_CIR_CONF1 0xFEC0
-#define ENE_CIR_CONF1_TX_CLEAR 0x01 /* clear that on revC */
- /* while transmitting */
-#define ENE_CIR_CONF1_RX_ON 0x07 /* normal receiver enabled */
-#define ENE_CIR_CONF1_LEARN1 0x08 /* enabled on learning mode */
-#define ENE_CIR_CONF1_TX_ON 0x30 /* enabled on transmit */
-#define ENE_CIR_CONF1_TX_CARR 0x80 /* send TX carrier or not */
-
-#define ENE_CIR_CONF2 0xFEC1 /* unknown setting = 0 */
-#define ENE_CIR_CONF2_LEARN2 0x10 /* set on enable learning */
-#define ENE_CIR_CONF2_GPIO40DIS 0x20 /* disable input via gpio40 */
-
-#define ENE_CIR_SAMPLE_PERIOD 0xFEC8 /* sample period in us */
-#define ENE_CIR_SAMPLE_OVERFLOW 0x80 /* interrupt on overflows if set */
-
-
-/* Two byte tx buffer */
-#define ENE_TX_INPUT1 0xFEC9
-#define ENE_TX_INPUT2 0xFECA
-#define ENE_TX_PULSE_MASK 0x80 /* Transmitted sample is pulse */
-#define ENE_TX_SMLP_MASK 0x7F
-#define ENE_TX_SMPL_PERIOD 50 /* transmit sample period - fixed */
+#define ENE_IRQ 0xFE9B /* new irq settings register */
+#define ENE_IRQ_MASK 0x0F /* irq number mask */
+#define ENE_IRQ_UNK_EN 0x10 /* always enabled */
+#define ENE_IRQ_STATUS 0x20 /* irq status and ACK */
+
+/* CIR Config register #1 */
+#define ENE_CIRCFG 0xFEC0
+#define ENE_CIRCFG_RX_EN 0x01 /* RX enable */
+#define ENE_CIRCFG_RX_IRQ 0x02 /* Enable hardware interrupt */
+#define ENE_CIRCFG_REV_POL 0x04 /* Input polarity reversed */
+#define ENE_CIRCFG_CARR_DEMOD 0x08 /* Enable carrier demodulator */
+
+#define ENE_CIRCFG_TX_EN 0x10 /* TX enable */
+#define ENE_CIRCFG_TX_IRQ 0x20 /* Send interrupt on TX done */
+#define ENE_CIRCFG_TX_POL_REV 0x40 /* TX polarity reversed */
+#define ENE_CIRCFG_TX_CARR 0x80 /* send TX carrier or not */
+
+/* CIR config register #2 */
+#define ENE_CIRCFG2 0xFEC1
+#define ENE_CIRCFG2_RLC 0x00
+#define ENE_CIRCFG2_RC5 0x01
+#define ENE_CIRCFG2_RC6 0x02
+#define ENE_CIRCFG2_NEC 0x03
+#define ENE_CIRCFG2_CARR_DETECT 0x10 /* Enable carrier detection */
+#define ENE_CIRCFG2_GPIO0A 0x20 /* Use GPIO0A instead of GPIO40 for input */
+#define ENE_CIRCFG2_FAST_SAMPL1 0x40 /* Fast leading pulse detection for RC6 */
+#define ENE_CIRCFG2_FAST_SAMPL2 0x80 /* Fast data detection for RC6 */
+
+/* Knobs for protocol decoding - will document when/if will use them */
+#define ENE_CIRPF 0xFEC2
+#define ENE_CIRHIGH 0xFEC3
+#define ENE_CIRBIT 0xFEC4
+#define ENE_CIRSTART 0xFEC5
+#define ENE_CIRSTART2 0xFEC6
+
+/* Actual register which contains RLC RX data - read by firmware */
+#define ENE_CIRDAT_IN 0xFEC7
+
+
+/* RLC configuration - sample period (1us resulution) + idle mode */
+#define ENE_CIRRLC_CFG 0xFEC8
+#define ENE_CIRRLC_CFG_OVERFLOW 0x80 /* interrupt on overflows if set */
+#define ENE_DEFAULT_SAMPLE_PERIOD 50
+
+/* Two byte RLC TX buffer */
+#define ENE_CIRRLC_OUT0 0xFEC9
+#define ENE_CIRRLC_OUT1 0xFECA
+#define ENE_CIRRLC_OUT_PULSE 0x80 /* Transmitted sample is pulse */
+#define ENE_CIRRLC_OUT_MASK 0x7F
+
+
+/* Carrier detect setting
+ * Low nibble - number of carrier pulses to average
+ * High nibble - number of initial carrier pulses to discard
+ */
+#define ENE_CIRCAR_PULS 0xFECB
+/* detected RX carrier period (resolution: 500 ns) */
+#define ENE_CIRCAR_PRD 0xFECC
+#define ENE_CIRCAR_PRD_VALID 0x80 /* data valid content valid */
-/* Unknown TX setting - TX sample period ??? */
-#define ENE_TX_UNK1 0xFECB /* set to 0x63 */
+/* detected RX carrier pulse width (resolution: 500 ns) */
+#define ENE_CIRCAR_HPRD 0xFECD
-/* Current received carrier period */
-#define ENE_RX_CARRIER 0xFECC /* RX period (500 ns) */
-#define ENE_RX_CARRIER_VALID 0x80 /* Register content valid */
+/* TX period (resolution: 500 ns, minimum 2)*/
+#define ENE_CIRMOD_PRD 0xFECE
+#define ENE_CIRMOD_PRD_POL 0x80 /* TX carrier polarity*/
+#define ENE_CIRMOD_PRD_MAX 0x7F /* 15.87 kHz */
+#define ENE_CIRMOD_PRD_MIN 0x02 /* 1 Mhz */
-/* TX period (1/carrier) */
-#define ENE_TX_PERIOD 0xFECE /* TX period (500 ns) */
-#define ENE_TX_PERIOD_UNKBIT 0x80 /* This bit set on transmit*/
-#define ENE_TX_PERIOD_PULSE 0xFECF /* TX pulse period (500 ns)*/
+/* TX pulse width (resolution: 500 ns)*/
+#define ENE_CIRMOD_HPRD 0xFECF
/* Hardware versions */
-#define ENE_HW_VERSION 0xFF00 /* hardware revision */
+#define ENE_ECHV 0xFF00 /* hardware revision */
#define ENE_PLLFRH 0xFF16
#define ENE_PLLFRL 0xFF17
+#define ENE_DEFAULT_PLL_FREQ 1000
-#define ENE_HW_UNK 0xFF1D
-#define ENE_HW_UNK_CLR 0x04
-#define ENE_HW_VER_MAJOR 0xFF1E /* chip version */
-#define ENE_HW_VER_MINOR 0xFF1F
-#define ENE_HW_VER_OLD 0xFD00
-
-/* Normal/Learning carrier ranges - only valid if we have learning input*/
-/* TODO: test */
-#define ENE_NORMAL_RX_LOW 34
-#define ENE_NORMAL_RX_HI 38
+#define ENE_ECSTS 0xFF1D
+#define ENE_ECSTS_RSRVD 0x04
-/* Tx carrier range */
-/* Hardware might be able to do more, but this range is enough for
- all purposes */
-#define ENE_TX_PERIOD_MAX 32 /* corresponds to 29.4 kHz */
-#define ENE_TX_PERIOD_MIN 16 /* corrsponds to 62.5 kHz */
-
-
-
-/* Minimal and maximal gaps */
-
-/* Normal case:
- Minimal gap is 0x7F * sample period
- Maximum gap depends on hardware.
- For KB3926B, it is unlimited, for newer models its around
- 250000, after which HW stops sending samples, and that is
- not possible to change */
-
-/* Fan case:
- Both minimal and maximal gaps are same, and equal to 0xFFF * 0x61
- And there is nothing to change this setting
-*/
-
-#define ENE_MAXGAP 250000
-#define ENE_MINGAP (127 * sample_period)
+#define ENE_ECVER_MAJOR 0xFF1E /* chip version */
+#define ENE_ECVER_MINOR 0xFF1F
+#define ENE_HW_VER_OLD 0xFD00
/******************************************************************************/
@@ -171,46 +180,60 @@
#define ENE_HW_B 1 /* 3926B */
#define ENE_HW_C 2 /* 3926C */
-#define ENE_HW_D 3 /* 3926D */
+#define ENE_HW_D 3 /* 3926D or later */
#define ene_printk(level, text, ...) \
- printk(level ENE_DRIVER_NAME ": " text, ## __VA_ARGS__)
+ printk(level ENE_DRIVER_NAME ": " text "\n", ## __VA_ARGS__)
-#define ene_dbg(text, ...) \
- if (debug) \
- printk(KERN_DEBUG \
- ENE_DRIVER_NAME ": " text "\n" , ## __VA_ARGS__)
+#define ene_notice(text, ...) ene_printk(KERN_NOTICE, text, ## __VA_ARGS__)
+#define ene_warn(text, ...) ene_printk(KERN_WARNING, text, ## __VA_ARGS__)
-#define ene_dbg_verbose(text, ...) \
- if (debug > 1) \
- printk(KERN_DEBUG \
- ENE_DRIVER_NAME ": " text "\n" , ## __VA_ARGS__)
+#define __dbg(level, format, ...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG ENE_DRIVER_NAME \
+ ": " format "\n", ## __VA_ARGS__); \
+ } while (0)
+
+
+#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
+#define dbg_regs(format, ...) __dbg(3, format, ## __VA_ARGS__)
+
+#define MS_TO_NS(msec) ((msec) * 1000)
struct ene_device {
struct pnp_dev *pnp_dev;
struct input_dev *idev;
struct ir_dev_props *props;
- int in_use;
/* hw IO settings */
- unsigned long hw_io;
+ long hw_io;
int irq;
spinlock_t hw_lock;
/* HW features */
int hw_revision; /* hardware revision */
- bool hw_learning_and_tx_capable; /* learning capable */
- bool hw_gpio40_learning; /* gpio40 is learning */
- bool hw_fan_as_normal_input; /* fan input is used as */
- /* regular input */
+ bool hw_use_gpio_0a; /* gpio0a is demodulated input*/
+ bool hw_extra_buffer; /* hardware has 'extra buffer' */
+ bool hw_fan_input; /* fan input is IR data source */
+ bool hw_learning_and_tx_capable; /* learning & tx capable */
+ int pll_freq;
+ int buffer_len;
+
+ /* Extra RX buffer location */
+ int extra_buf1_address;
+ int extra_buf1_len;
+ int extra_buf2_address;
+ int extra_buf2_len;
+
/* HW state*/
- int rx_pointer; /* hw pointer to rx buffer */
+ int r_pointer; /* pointer to next sample to read */
+ int w_pointer; /* pointer to next sample hw will write */
bool rx_fan_input_inuse; /* is fan input in use for rx*/
int tx_reg; /* current reg used for TX */
u8 saved_conf1; /* saved FEC0 reg */
-
- /* TX sample handling */
unsigned int tx_sample; /* current sample for TX */
bool tx_sample_pulse; /* current sample is pulse */
@@ -229,7 +252,11 @@ struct ene_device {
int transmitter_mask;
/* RX settings */
- bool learning_enabled; /* learning input enabled */
+ bool learning_mode_enabled; /* learning input enabled */
bool carrier_detect_enabled; /* carrier detect enabled */
int rx_period_adjust;
+ bool rx_enabled;
};
+
+static int ene_irq_status(struct ene_device *dev);
+static void ene_rx_read_hw_pointer(struct ene_device *dev);
diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c
index faed5a332c71..bc118066bc38 100644
--- a/drivers/media/IR/imon.c
+++ b/drivers/media/IR/imon.c
@@ -1,7 +1,7 @@
/*
* imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD
*
- * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com>
+ * Copyright(C) 2010 Jarod Wilson <jarod@wilsonet.com>
* Portions based on the original lirc_imon driver,
* Copyright(C) 2004 Venky Raju(dev@venky.ws)
*
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -44,7 +46,7 @@
#define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
#define MOD_NAME "imon"
-#define MOD_VERSION "0.9.1"
+#define MOD_VERSION "0.9.2"
#define DISPLAY_MINOR_BASE 144
#define DEVICE_NAME "lcd%d"
@@ -121,21 +123,26 @@ struct imon_context {
u16 vendor; /* usb vendor ID */
u16 product; /* usb product ID */
- struct input_dev *idev; /* input device for remote */
+ struct input_dev *rdev; /* input device for remote */
+ struct input_dev *idev; /* input device for panel & IR mouse */
struct input_dev *touch; /* input device for touchscreen */
+ spinlock_t kc_lock; /* make sure we get keycodes right */
u32 kc; /* current input keycode */
u32 last_keycode; /* last reported input keycode */
+ u32 rc_scancode; /* the computed remote scancode */
+ u8 rc_toggle; /* the computed remote toggle bit */
u64 ir_type; /* iMON or MCE (RC6) IR protocol? */
- u8 mce_toggle_bit; /* last mce toggle bit */
bool release_code; /* some keys send a release code */
u8 display_type; /* store the display type */
bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */
+ char name_rdev[128]; /* rc input device name */
+ char phys_rdev[64]; /* rc input device phys path */
+
char name_idev[128]; /* input device name */
char phys_idev[64]; /* input device phys path */
- struct timer_list itimer; /* input device timer, need for rc6 */
char name_touch[128]; /* touch screen name */
char phys_touch[64]; /* touch screen phys path */
@@ -289,6 +296,9 @@ static const struct {
{ 0x000100000000ffeell, KEY_VOLUMEUP },
{ 0x010000000000ffeell, KEY_VOLUMEDOWN },
{ 0x000000000100ffeell, KEY_MUTE },
+ /* 0xffdc iMON MCE VFD */
+ { 0x00010000ffffffeell, KEY_VOLUMEUP },
+ { 0x01000000ffffffeell, KEY_VOLUMEDOWN },
/* iMON Knob values */
{ 0x000100ffffffffeell, KEY_VOLUMEUP },
{ 0x010000ffffffffeell, KEY_VOLUMEDOWN },
@@ -307,7 +317,7 @@ MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
+MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
static int display_type;
@@ -365,15 +375,14 @@ static int display_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
interface = usb_find_interface(&imon_driver, subminor);
if (!interface) {
- err("%s: could not find interface for minor %d",
- __func__, subminor);
+ pr_err("could not find interface for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
ictx = usb_get_intfdata(interface);
if (!ictx) {
- err("%s: no context found for minor %d", __func__, subminor);
+ pr_err("no context found for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
@@ -381,10 +390,10 @@ static int display_open(struct inode *inode, struct file *file)
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- err("%s: display not supported by device", __func__);
+ pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (ictx->display_isopen) {
- err("%s: display port is already open", __func__);
+ pr_err("display port is already open\n");
retval = -EBUSY;
} else {
ictx->display_isopen = true;
@@ -411,17 +420,17 @@ static int display_close(struct inode *inode, struct file *file)
ictx = file->private_data;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- err("%s: display not supported by device", __func__);
+ pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (!ictx->display_isopen) {
- err("%s: display is not open", __func__);
+ pr_err("display is not open\n");
retval = -EIO;
} else {
ictx->display_isopen = false;
@@ -500,19 +509,19 @@ static int send_packet(struct imon_context *ictx)
if (retval) {
ictx->tx.busy = false;
smp_rmb(); /* ensure later readers know we're not busy */
- err("%s: error submitting urb(%d)", __func__, retval);
+ pr_err("error submitting urb(%d)\n", retval);
} else {
/* Wait for transmission to complete (or abort) */
mutex_unlock(&ictx->lock);
retval = wait_for_completion_interruptible(
&ictx->tx.finished);
if (retval)
- err("%s: task interrupted", __func__);
+ pr_err("task interrupted\n");
mutex_lock(&ictx->lock);
retval = ictx->tx.status;
if (retval)
- err("%s: packet tx failed (%d)", __func__, retval);
+ pr_err("packet tx failed (%d)\n", retval);
}
kfree(control_req);
@@ -544,12 +553,12 @@ static int send_associate_24g(struct imon_context *ictx)
0x00, 0x00, 0x00, 0x20 };
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
if (!ictx->dev_present_intf0) {
- err("%s: no iMON device present", __func__);
+ pr_err("no iMON device present\n");
return -ENODEV;
}
@@ -577,7 +586,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
int i;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
@@ -638,8 +647,7 @@ static int send_set_imon_clock(struct imon_context *ictx,
memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8);
retval = send_packet(ictx);
if (retval) {
- err("%s: send_packet failed for packet %d",
- __func__, i);
+ pr_err("send_packet failed for packet %d\n", i);
break;
}
}
@@ -778,7 +786,7 @@ static struct attribute *imon_display_sysfs_entries[] = {
NULL
};
-static struct attribute_group imon_display_attribute_group = {
+static struct attribute_group imon_display_attr_group = {
.attrs = imon_display_sysfs_entries
};
@@ -787,7 +795,7 @@ static struct attribute *imon_rf_sysfs_entries[] = {
NULL
};
-static struct attribute_group imon_rf_attribute_group = {
+static struct attribute_group imon_rf_attr_group = {
.attrs = imon_rf_sysfs_entries
};
@@ -815,20 +823,20 @@ static ssize_t vfd_write(struct file *file, const char *buf,
ictx = file->private_data;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->dev_present_intf0) {
- err("%s: no iMON device present", __func__);
+ pr_err("no iMON device present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes <= 0 || n_bytes > 32) {
- err("%s: invalid payload size", __func__);
+ pr_err("invalid payload size\n");
retval = -EINVAL;
goto exit;
}
@@ -854,8 +862,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
retval = send_packet(ictx);
if (retval) {
- err("%s: send packet failed for packet #%d",
- __func__, seq/2);
+ pr_err("send packet failed for packet #%d\n", seq / 2);
goto exit;
} else {
seq += 2;
@@ -869,8 +876,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
ictx->usb_tx_buf[7] = (unsigned char) seq;
retval = send_packet(ictx);
if (retval)
- err("%s: send packet failed for packet #%d",
- __func__, seq / 2);
+ pr_err("send packet failed for packet #%d\n", seq / 2);
exit:
mutex_unlock(&ictx->lock);
@@ -899,21 +905,20 @@ static ssize_t lcd_write(struct file *file, const char *buf,
ictx = file->private_data;
if (!ictx) {
- err("%s: no context for device", __func__);
+ pr_err("no context for device\n");
return -ENODEV;
}
mutex_lock(&ictx->lock);
if (!ictx->display_supported) {
- err("%s: no iMON display present", __func__);
+ pr_err("no iMON display present\n");
retval = -ENODEV;
goto exit;
}
if (n_bytes != 8) {
- err("%s: invalid payload size: %d (expecting 8)",
- __func__, (int) n_bytes);
+ pr_err("invalid payload size: %d (expected 8)\n", (int)n_bytes);
retval = -EINVAL;
goto exit;
}
@@ -925,7 +930,7 @@ static ssize_t lcd_write(struct file *file, const char *buf,
retval = send_packet(ictx);
if (retval) {
- err("%s: send packet failed!", __func__);
+ pr_err("send packet failed!\n");
goto exit;
} else {
dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n",
@@ -958,17 +963,6 @@ static void usb_tx_callback(struct urb *urb)
}
/**
- * mce/rc6 keypresses have no distinct release code, use timer
- */
-static void imon_mce_timeout(unsigned long data)
-{
- struct imon_context *ictx = (struct imon_context *)data;
-
- input_report_key(ictx->idev, ictx->last_keycode, 0);
- input_sync(ictx->idev);
-}
-
-/**
* report touchscreen input
*/
static void imon_touch_display_timeout(unsigned long data)
@@ -1008,14 +1002,11 @@ int imon_ir_change_protocol(void *priv, u64 ir_type)
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
pad_mouse = false;
- init_timer(&ictx->itimer);
- ictx->itimer.data = (unsigned long)ictx;
- ictx->itimer.function = imon_mce_timeout;
break;
case IR_TYPE_UNKNOWN:
case IR_TYPE_OTHER:
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
- if (pad_stabilize)
+ if (pad_stabilize && !nomouse)
pad_mouse = true;
else {
dev_dbg(dev, "PAD stabilize functionality disabled\n");
@@ -1027,7 +1018,7 @@ int imon_ir_change_protocol(void *priv, u64 ir_type)
default:
dev_warn(dev, "Unsupported IR protocol specified, overriding "
"to iMON IR protocol\n");
- if (pad_stabilize)
+ if (pad_stabilize && !nomouse)
pad_mouse = true;
else {
dev_dbg(dev, "PAD stabilize functionality disabled\n");
@@ -1149,20 +1140,21 @@ static int stabilize(int a, int b, u16 timeout, u16 threshold)
return result;
}
-static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 hw_code)
+static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 scancode)
{
- u32 scancode = be32_to_cpu(hw_code);
u32 keycode;
u32 release;
bool is_release_code = false;
/* Look for the initial press of a button */
- keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+ keycode = ir_g_keycode_from_table(ictx->rdev, scancode);
+ ictx->rc_toggle = 0x0;
+ ictx->rc_scancode = scancode;
/* Look for the release of a button */
if (keycode == KEY_RESERVED) {
release = scancode & ~0x4000;
- keycode = ir_g_keycode_from_table(ictx->idev, release);
+ keycode = ir_g_keycode_from_table(ictx->rdev, release);
if (keycode != KEY_RESERVED)
is_release_code = true;
}
@@ -1172,9 +1164,8 @@ static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 hw_code)
return keycode;
}
-static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 hw_code)
+static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
{
- u32 scancode = be32_to_cpu(hw_code);
u32 keycode;
#define MCE_KEY_MASK 0x7000
@@ -1188,18 +1179,21 @@ static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 hw_code)
* but we can't or them into all codes, as some keys are decoded in
* a different way w/o the same use of the toggle bit...
*/
- if ((scancode >> 24) & 0x80)
+ if (scancode & 0x80000000)
scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT;
- keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+ ictx->rc_scancode = scancode;
+ keycode = ir_g_keycode_from_table(ictx->rdev, scancode);
+
+ /* not used in mce mode, but make sure we know its false */
+ ictx->release_code = false;
return keycode;
}
-static u32 imon_panel_key_lookup(u64 hw_code)
+static u32 imon_panel_key_lookup(u64 code)
{
int i;
- u64 code = be64_to_cpu(hw_code);
u32 keycode = KEY_RESERVED;
for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
@@ -1219,6 +1213,9 @@ static bool imon_mouse_event(struct imon_context *ictx,
u8 right_shift = 1;
bool mouse_input = true;
int dir = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ictx->kc_lock, flags);
/* newer iMON device PAD or mouse button */
if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) {
@@ -1250,6 +1247,8 @@ static bool imon_mouse_event(struct imon_context *ictx,
} else
mouse_input = false;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+
if (mouse_input) {
dev_dbg(ictx->dev, "sending mouse data via input subsystem\n");
@@ -1264,7 +1263,9 @@ static bool imon_mouse_event(struct imon_context *ictx,
buf[1] >> right_shift & 0x1);
}
input_sync(ictx->idev);
+ spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = ictx->kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
}
return mouse_input;
@@ -1286,8 +1287,8 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
int dir = 0;
char rel_x = 0x00, rel_y = 0x00;
u16 timeout, threshold;
- u64 temp_key;
- u32 remote_key;
+ u32 scancode = KEY_RESERVED;
+ unsigned long flags;
/*
* The imon directional pad functions more like a touchpad. Bytes 3 & 4
@@ -1311,26 +1312,36 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
+ spin_lock_irqsave(&ictx->kc_lock,
+ flags);
ictx->kc = KEY_UNKNOWN;
+ spin_unlock_irqrestore(&ictx->kc_lock,
+ flags);
return;
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
- memcpy(&temp_key, buf, sizeof(temp_key));
- remote_key = (u32) (le64_to_cpu(temp_key)
- & 0xffffffff);
- ictx->kc = imon_remote_key_lookup(ictx,
- remote_key);
+ scancode = be32_to_cpu(*((u32 *)buf));
}
} else {
+ /*
+ * Hack alert: instead of using keycodes, we have
+ * to use hard-coded scancodes here...
+ */
if (abs(rel_y) > abs(rel_x)) {
buf[2] = (rel_y > 0) ? 0x7F : 0x80;
buf[3] = 0;
- ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ if (rel_y > 0)
+ scancode = 0x01007f00; /* KEY_DOWN */
+ else
+ scancode = 0x01008000; /* KEY_UP */
} else {
buf[2] = 0;
buf[3] = (rel_x > 0) ? 0x7F : 0x80;
- ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ if (rel_x > 0)
+ scancode = 0x0100007f; /* KEY_RIGHT */
+ else
+ scancode = 0x01000080; /* KEY_LEFT */
}
}
@@ -1367,34 +1378,56 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
+ spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->kc = KEY_UNKNOWN;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
- memcpy(&temp_key, buf, sizeof(temp_key));
- remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
- ictx->kc = imon_remote_key_lookup(ictx, remote_key);
+ scancode = be32_to_cpu(*((u32 *)buf));
} else {
+ /*
+ * Hack alert: instead of using keycodes, we have
+ * to use hard-coded scancodes here...
+ */
if (abs(rel_y) > abs(rel_x)) {
buf[2] = (rel_y > 0) ? 0x7F : 0x80;
buf[3] = 0;
- ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ if (rel_y > 0)
+ scancode = 0x01007f00; /* KEY_DOWN */
+ else
+ scancode = 0x01008000; /* KEY_UP */
} else {
buf[2] = 0;
buf[3] = (rel_x > 0) ? 0x7F : 0x80;
- ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ if (rel_x > 0)
+ scancode = 0x0100007f; /* KEY_RIGHT */
+ else
+ scancode = 0x01000080; /* KEY_LEFT */
}
}
}
+
+ if (scancode) {
+ spin_lock_irqsave(&ictx->kc_lock, flags);
+ ictx->kc = imon_remote_key_lookup(ictx, scancode);
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+ }
}
+/**
+ * figure out if these is a press or a release. We don't actually
+ * care about repeats, as those will be auto-generated within the IR
+ * subsystem for repeating scancodes.
+ */
static int imon_parse_press_type(struct imon_context *ictx,
unsigned char *buf, u8 ktype)
{
int press_type = 0;
- int rep_delay = ictx->idev->rep[REP_DELAY];
- int rep_period = ictx->idev->rep[REP_PERIOD];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ictx->kc_lock, flags);
/* key release of 0x02XXXXXX key */
if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00)
@@ -1410,22 +1443,10 @@ static int imon_parse_press_type(struct imon_context *ictx,
buf[2] == 0x81 && buf[3] == 0xb7)
ictx->kc = ictx->last_keycode;
- /* mce-specific button handling */
+ /* mce-specific button handling, no keyup events */
else if (ktype == IMON_KEY_MCE) {
- /* initial press */
- if (ictx->kc != ictx->last_keycode
- || buf[2] != ictx->mce_toggle_bit) {
- ictx->last_keycode = ictx->kc;
- ictx->mce_toggle_bit = buf[2];
- press_type = 1;
- mod_timer(&ictx->itimer,
- jiffies + msecs_to_jiffies(rep_delay));
- /* repeat */
- } else {
- press_type = 2;
- mod_timer(&ictx->itimer,
- jiffies + msecs_to_jiffies(rep_period));
- }
+ ictx->rc_toggle = buf[2];
+ press_type = 1;
/* incoherent or irrelevant data */
} else if (ictx->kc == KEY_RESERVED)
@@ -1439,6 +1460,8 @@ static int imon_parse_press_type(struct imon_context *ictx,
else
press_type = 1;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+
return press_type;
}
@@ -1451,41 +1474,45 @@ static void imon_incoming_packet(struct imon_context *ictx,
int len = urb->actual_length;
unsigned char *buf = urb->transfer_buffer;
struct device *dev = ictx->dev;
+ unsigned long flags;
u32 kc;
bool norelease = false;
int i;
- u64 temp_key;
- u64 panel_key = 0;
- u32 remote_key = 0;
- struct input_dev *idev = NULL;
+ u64 scancode;
+ struct input_dev *rdev = NULL;
+ struct ir_input_dev *irdev = NULL;
int press_type = 0;
int msec;
struct timeval t;
static struct timeval prev_time = { 0, 0 };
- u8 ktype = IMON_KEY_IMON;
+ u8 ktype;
- idev = ictx->idev;
+ rdev = ictx->rdev;
+ irdev = input_get_drvdata(rdev);
/* filter out junk data on the older 0xffdc imon devices */
if ((buf[0] == 0xff) && (buf[1] == 0xff) && (buf[2] == 0xff))
return;
/* Figure out what key was pressed */
- memcpy(&temp_key, buf, sizeof(temp_key));
if (len == 8 && buf[7] == 0xee) {
+ scancode = be64_to_cpu(*((u64 *)buf));
ktype = IMON_KEY_PANEL;
- panel_key = le64_to_cpu(temp_key);
- kc = imon_panel_key_lookup(panel_key);
+ kc = imon_panel_key_lookup(scancode);
} else {
- remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ scancode = be32_to_cpu(*((u32 *)buf));
if (ictx->ir_type == IR_TYPE_RC6) {
+ ktype = IMON_KEY_IMON;
if (buf[0] == 0x80)
ktype = IMON_KEY_MCE;
- kc = imon_mce_key_lookup(ictx, remote_key);
- } else
- kc = imon_remote_key_lookup(ictx, remote_key);
+ kc = imon_mce_key_lookup(ictx, scancode);
+ } else {
+ ktype = IMON_KEY_IMON;
+ kc = imon_remote_key_lookup(ictx, scancode);
+ }
}
+ spin_lock_irqsave(&ictx->kc_lock, flags);
/* keyboard/mouse mode toggle button */
if (kc == KEY_KEYBOARD && !ictx->release_code) {
ictx->last_keycode = kc;
@@ -1493,6 +1520,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
dev_dbg(dev, "toggling to %s mode\n",
ictx->pad_mouse ? "mouse" : "keyboard");
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
} else {
ictx->pad_mouse = 0;
@@ -1501,11 +1529,13 @@ static void imon_incoming_packet(struct imon_context *ictx,
}
ictx->kc = kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
buf[7] == 0x86) {
imon_touch_event(ictx, buf);
+ return;
/* look for mouse events with pad in mouse mode */
} else if (ictx->pad_mouse) {
@@ -1533,36 +1563,55 @@ static void imon_incoming_packet(struct imon_context *ictx,
if (press_type < 0)
goto not_input_data;
+ spin_lock_irqsave(&ictx->kc_lock, flags);
if (ictx->kc == KEY_UNKNOWN)
goto unknown_key;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+
+ if (ktype != IMON_KEY_PANEL) {
+ if (press_type == 0)
+ ir_keyup(irdev);
+ else {
+ ir_keydown(rdev, ictx->rc_scancode, ictx->rc_toggle);
+ spin_lock_irqsave(&ictx->kc_lock, flags);
+ ictx->last_keycode = ictx->kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
+ }
+ return;
+ }
- /* KEY_MUTE repeats from MCE and knob need to be suppressed */
- if ((ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode)
- && (buf[7] == 0xee || ktype == IMON_KEY_MCE)) {
+ /* Only panel type events left to process now */
+ spin_lock_irqsave(&ictx->kc_lock, flags);
+
+ /* KEY_MUTE repeats from knob need to be suppressed */
+ if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
do_gettimeofday(&t);
msec = tv2int(&t, &prev_time);
prev_time = t;
- if (msec < idev->rep[REP_DELAY])
+ if (msec < ictx->idev->rep[REP_DELAY]) {
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
+ }
}
+ kc = ictx->kc;
- input_report_key(idev, ictx->kc, press_type);
- input_sync(idev);
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
- /* panel keys and some remote keys don't generate a release */
- if (panel_key || norelease) {
- input_report_key(idev, ictx->kc, 0);
- input_sync(idev);
- }
+ input_report_key(ictx->idev, kc, press_type);
+ input_sync(ictx->idev);
- ictx->last_keycode = ictx->kc;
+ /* panel keys don't generate a release */
+ input_report_key(ictx->idev, kc, 0);
+ input_sync(ictx->idev);
+
+ ictx->last_keycode = kc;
return;
unknown_key:
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
dev_info(dev, "%s: unknown keypress, code 0x%llx\n", __func__,
- (panel_key ? be64_to_cpu(panel_key) :
- be32_to_cpu(remote_key)));
+ (long long)scancode);
return;
not_input_data:
@@ -1653,31 +1702,205 @@ static void usb_rx_callback_intf1(struct urb *urb)
usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
}
+/*
+ * The 0x15c2:0xffdc device ID was used for umpteen different imon
+ * devices, and all of them constantly spew interrupts, even when there
+ * is no actual data to report. However, byte 6 of this buffer looks like
+ * its unique across device variants, so we're trying to key off that to
+ * figure out which display type (if any) and what IR protocol the device
+ * actually supports. These devices have their IR protocol hard-coded into
+ * their firmware, they can't be changed on the fly like the newer hardware.
+ */
+static void imon_get_ffdc_type(struct imon_context *ictx)
+{
+ u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
+ u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
+ u64 allowed_protos = IR_TYPE_OTHER;
+
+ switch (ffdc_cfg_byte) {
+ /* iMON Knob, no display, iMON IR + vol knob */
+ case 0x21:
+ dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
+ ictx->display_supported = false;
+ break;
+ /* iMON 2.4G LT (usb stick), no display, iMON RF */
+ case 0x4e:
+ dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF");
+ ictx->display_supported = false;
+ ictx->rf_device = true;
+ break;
+ /* iMON VFD, no IR (does have vol knob tho) */
+ case 0x35:
+ dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON VFD, iMON IR */
+ case 0x24:
+ case 0x85:
+ dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON VFD, MCE IR */
+ case 0x9e:
+ dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ allowed_protos = IR_TYPE_RC6;
+ break;
+ /* iMON LCD, MCE IR */
+ case 0x9f:
+ dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = IR_TYPE_RC6;
+ break;
+ default:
+ dev_info(ictx->dev, "Unknown 0xffdc device, "
+ "defaulting to VFD and iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+
+ printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
+
+ ictx->display_type = detected_display_type;
+ ictx->props->allowed_protos = allowed_protos;
+ ictx->ir_type = allowed_protos;
+}
+
+static void imon_set_display_type(struct imon_context *ictx)
+{
+ u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
+
+ /*
+ * Try to auto-detect the type of display if the user hasn't set
+ * it by hand via the display_type modparam. Default is VFD.
+ */
+
+ if (display_type == IMON_DISPLAY_TYPE_AUTO) {
+ switch (ictx->product) {
+ case 0xffdc:
+ /* set in imon_get_ffdc_type() */
+ configured_display_type = ictx->display_type;
+ break;
+ case 0x0034:
+ case 0x0035:
+ configured_display_type = IMON_DISPLAY_TYPE_VGA;
+ break;
+ case 0x0038:
+ case 0x0039:
+ case 0x0045:
+ configured_display_type = IMON_DISPLAY_TYPE_LCD;
+ break;
+ case 0x003c:
+ case 0x0041:
+ case 0x0042:
+ case 0x0043:
+ configured_display_type = IMON_DISPLAY_TYPE_NONE;
+ ictx->display_supported = false;
+ break;
+ case 0x0036:
+ case 0x0044:
+ default:
+ configured_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+ } else {
+ configured_display_type = display_type;
+ if (display_type == IMON_DISPLAY_TYPE_NONE)
+ ictx->display_supported = false;
+ else
+ ictx->display_supported = true;
+ dev_info(ictx->dev, "%s: overriding display type to %d via "
+ "modparam\n", __func__, display_type);
+ }
+
+ ictx->display_type = configured_display_type;
+}
+
+static struct input_dev *imon_init_rdev(struct imon_context *ictx)
+{
+ struct input_dev *rdev;
+ struct ir_dev_props *props;
+ int ret;
+ char *ir_codes = NULL;
+ const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x88 };
+
+ rdev = input_allocate_device();
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (!rdev || !props) {
+ dev_err(ictx->dev, "remote control dev allocation failed\n");
+ goto out;
+ }
+
+ snprintf(ictx->name_rdev, sizeof(ictx->name_rdev),
+ "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
+ usb_make_path(ictx->usbdev_intf0, ictx->phys_rdev,
+ sizeof(ictx->phys_rdev));
+ strlcat(ictx->phys_rdev, "/input0", sizeof(ictx->phys_rdev));
+
+ rdev->name = ictx->name_rdev;
+ rdev->phys = ictx->phys_rdev;
+ usb_to_input_id(ictx->usbdev_intf0, &rdev->id);
+ rdev->dev.parent = ictx->dev;
+ rdev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_set_drvdata(rdev, ictx);
+
+ props->priv = ictx;
+ props->driver_type = RC_DRIVER_SCANCODE;
+ props->allowed_protos = IR_TYPE_OTHER | IR_TYPE_RC6; /* iMON PAD or MCE */
+ props->change_protocol = imon_ir_change_protocol;
+ ictx->props = props;
+
+ /* Enable front-panel buttons and/or knobs */
+ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
+ ret = send_packet(ictx);
+ /* Not fatal, but warn about it */
+ if (ret)
+ dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
+
+ if (ictx->product == 0xffdc)
+ imon_get_ffdc_type(ictx);
+
+ imon_set_display_type(ictx);
+
+ if (ictx->ir_type == IR_TYPE_RC6)
+ ir_codes = RC_MAP_IMON_MCE;
+ else
+ ir_codes = RC_MAP_IMON_PAD;
+
+ ret = ir_input_register(rdev, ir_codes, props, MOD_NAME);
+ if (ret < 0) {
+ dev_err(ictx->dev, "remote input dev register failed\n");
+ goto out;
+ }
+
+ return rdev;
+
+out:
+ kfree(props);
+ input_free_device(rdev);
+ return NULL;
+}
+
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
struct input_dev *idev;
- struct ir_dev_props *props;
int ret, i;
idev = input_allocate_device();
if (!idev) {
- dev_err(ictx->dev, "remote input dev allocation failed\n");
- goto idev_alloc_failed;
- }
-
- props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
- if (!props) {
- dev_err(ictx->dev, "remote ir dev props allocation failed\n");
- goto props_alloc_failed;
+ dev_err(ictx->dev, "input dev allocation failed\n");
+ goto out;
}
snprintf(ictx->name_idev, sizeof(ictx->name_idev),
- "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
+ "iMON Panel, Knob and Mouse(%04x:%04x)",
+ ictx->vendor, ictx->product);
idev->name = ictx->name_idev;
usb_make_path(ictx->usbdev_intf0, ictx->phys_idev,
sizeof(ictx->phys_idev));
- strlcat(ictx->phys_idev, "/input0", sizeof(ictx->phys_idev));
+ strlcat(ictx->phys_idev, "/input1", sizeof(ictx->phys_idev));
idev->phys = ictx->phys_idev;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL);
@@ -1693,30 +1916,20 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
__set_bit(kc, idev->keybit);
}
- props->priv = ictx;
- props->driver_type = RC_DRIVER_SCANCODE;
- /* IR_TYPE_OTHER maps to iMON PAD remote, IR_TYPE_RC6 to MCE remote */
- props->allowed_protos = IR_TYPE_OTHER | IR_TYPE_RC6;
- props->change_protocol = imon_ir_change_protocol;
- ictx->props = props;
-
usb_to_input_id(ictx->usbdev_intf0, &idev->id);
idev->dev.parent = ictx->dev;
+ input_set_drvdata(idev, ictx);
- ret = ir_input_register(idev, RC_MAP_IMON_PAD, props, MOD_NAME);
+ ret = input_register_device(idev);
if (ret < 0) {
- dev_err(ictx->dev, "remote input dev register failed\n");
- goto idev_register_failed;
+ dev_err(ictx->dev, "input dev register failed\n");
+ goto out;
}
return idev;
-idev_register_failed:
- kfree(props);
-props_alloc_failed:
+out:
input_free_device(idev);
-idev_alloc_failed:
-
return NULL;
}
@@ -1738,7 +1951,7 @@ static struct input_dev *imon_init_touch(struct imon_context *ictx)
usb_make_path(ictx->usbdev_intf1, ictx->phys_touch,
sizeof(ictx->phys_touch));
- strlcat(ictx->phys_touch, "/input1", sizeof(ictx->phys_touch));
+ strlcat(ictx->phys_touch, "/input2", sizeof(ictx->phys_touch));
touch->phys = ictx->phys_touch;
touch->evbit[0] =
@@ -1850,7 +2063,7 @@ static bool imon_find_endpoints(struct imon_context *ictx,
/* Input endpoint is mandatory */
if (!ir_ep_found)
- err("%s: no valid input (IR) endpoint found.", __func__);
+ pr_err("no valid input (IR) endpoint found\n");
ictx->tx_control = tx_control;
@@ -1888,6 +2101,7 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
}
mutex_init(&ictx->lock);
+ spin_lock_init(&ictx->kc_lock);
mutex_lock(&ictx->lock);
@@ -1913,6 +2127,12 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto idev_setup_failed;
}
+ ictx->rdev = imon_init_rdev(ictx);
+ if (!ictx->rdev) {
+ dev_err(dev, "%s: rc device setup failed\n", __func__);
+ goto rdev_setup_failed;
+ }
+
usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
usb_rcvintpipe(ictx->usbdev_intf0,
ictx->rx_endpoint_intf0->bEndpointAddress),
@@ -1922,15 +2142,16 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL);
if (ret) {
- err("%s: usb_submit_urb failed for intf0 (%d)",
- __func__, ret);
+ pr_err("usb_submit_urb failed for intf0 (%d)\n", ret);
goto urb_submit_failed;
}
return ictx;
urb_submit_failed:
- ir_input_unregister(ictx->idev);
+ ir_input_unregister(ictx->rdev);
+rdev_setup_failed:
+ input_unregister_device(ictx->idev);
idev_setup_failed:
find_endpoint_failed:
mutex_unlock(&ictx->lock);
@@ -1954,7 +2175,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb) {
- err("%s: usb_alloc_urb failed for IR urb", __func__);
+ pr_err("usb_alloc_urb failed for IR urb\n");
goto rx_urb_alloc_failed;
}
@@ -1992,8 +2213,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL);
if (ret) {
- err("%s: usb_submit_urb failed for intf1 (%d)",
- __func__, ret);
+ pr_err("usb_submit_urb failed for intf1 (%d)\n", ret);
goto urb_submit_failed;
}
@@ -2012,116 +2232,6 @@ rx_urb_alloc_failed:
return NULL;
}
-/*
- * The 0x15c2:0xffdc device ID was used for umpteen different imon
- * devices, and all of them constantly spew interrupts, even when there
- * is no actual data to report. However, byte 6 of this buffer looks like
- * its unique across device variants, so we're trying to key off that to
- * figure out which display type (if any) and what IR protocol the device
- * actually supports. These devices have their IR protocol hard-coded into
- * their firmware, they can't be changed on the fly like the newer hardware.
- */
-static void imon_get_ffdc_type(struct imon_context *ictx)
-{
- u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
- u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
- u64 allowed_protos = IR_TYPE_OTHER;
-
- switch (ffdc_cfg_byte) {
- /* iMON Knob, no display, iMON IR + vol knob */
- case 0x21:
- dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
- ictx->display_supported = false;
- break;
- /* iMON 2.4G LT (usb stick), no display, iMON RF */
- case 0x4e:
- dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF");
- ictx->display_supported = false;
- ictx->rf_device = true;
- break;
- /* iMON VFD, no IR (does have vol knob tho) */
- case 0x35:
- dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
- detected_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- /* iMON VFD, iMON IR */
- case 0x24:
- case 0x85:
- dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
- detected_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- /* iMON LCD, MCE IR */
- case 0x9e:
- case 0x9f:
- dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
- detected_display_type = IMON_DISPLAY_TYPE_LCD;
- allowed_protos = IR_TYPE_RC6;
- break;
- default:
- dev_info(ictx->dev, "Unknown 0xffdc device, "
- "defaulting to VFD and iMON IR");
- detected_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- }
-
- printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
-
- ictx->display_type = detected_display_type;
- ictx->props->allowed_protos = allowed_protos;
- ictx->ir_type = allowed_protos;
-}
-
-static void imon_set_display_type(struct imon_context *ictx,
- struct usb_interface *intf)
-{
- u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
-
- /*
- * Try to auto-detect the type of display if the user hasn't set
- * it by hand via the display_type modparam. Default is VFD.
- */
-
- if (display_type == IMON_DISPLAY_TYPE_AUTO) {
- switch (ictx->product) {
- case 0xffdc:
- /* set in imon_get_ffdc_type() */
- configured_display_type = ictx->display_type;
- break;
- case 0x0034:
- case 0x0035:
- configured_display_type = IMON_DISPLAY_TYPE_VGA;
- break;
- case 0x0038:
- case 0x0039:
- case 0x0045:
- configured_display_type = IMON_DISPLAY_TYPE_LCD;
- break;
- case 0x003c:
- case 0x0041:
- case 0x0042:
- case 0x0043:
- configured_display_type = IMON_DISPLAY_TYPE_NONE;
- ictx->display_supported = false;
- break;
- case 0x0036:
- case 0x0044:
- default:
- configured_display_type = IMON_DISPLAY_TYPE_VFD;
- break;
- }
- } else {
- configured_display_type = display_type;
- if (display_type == IMON_DISPLAY_TYPE_NONE)
- ictx->display_supported = false;
- else
- ictx->display_supported = true;
- dev_info(ictx->dev, "%s: overriding display type to %d via "
- "modparam\n", __func__, display_type);
- }
-
- ictx->display_type = configured_display_type;
-}
-
static void imon_init_display(struct imon_context *ictx,
struct usb_interface *intf)
{
@@ -2130,8 +2240,7 @@ static void imon_init_display(struct imon_context *ictx,
dev_dbg(ictx->dev, "Registering iMON display with sysfs\n");
/* set up sysfs entry for built-in clock */
- ret = sysfs_create_group(&intf->dev.kobj,
- &imon_display_attribute_group);
+ ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group);
if (ret)
dev_err(ictx->dev, "Could not create display sysfs "
"entries(%d)", ret);
@@ -2162,8 +2271,6 @@ static int __devinit imon_probe(struct usb_interface *interface,
struct imon_context *ictx = NULL;
struct imon_context *first_if_ctx = NULL;
u16 vendor, product;
- const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x88 };
code_length = BUF_CHUNK_SIZE * 8;
@@ -2185,7 +2292,7 @@ static int __devinit imon_probe(struct usb_interface *interface,
if (ifnum == 0) {
ictx = imon_init_intf0(interface);
if (!ictx) {
- err("%s: failed to initialize context!\n", __func__);
+ pr_err("failed to initialize context!\n");
ret = -ENODEV;
goto fail;
}
@@ -2194,7 +2301,7 @@ static int __devinit imon_probe(struct usb_interface *interface,
/* this is the secondary interface on the device */
ictx = imon_init_intf1(interface, first_if_ctx);
if (!ictx) {
- err("%s: failed to attach to context!\n", __func__);
+ pr_err("failed to attach to context!\n");
ret = -ENODEV;
goto fail;
}
@@ -2204,39 +2311,18 @@ static int __devinit imon_probe(struct usb_interface *interface,
usb_set_intfdata(interface, ictx);
if (ifnum == 0) {
- /* Enable front-panel buttons and/or knobs */
- memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
- ret = send_packet(ictx);
- /* Not fatal, but warn about it */
- if (ret)
- dev_info(dev, "failed to enable panel buttons "
- "and/or knobs\n");
-
- if (product == 0xffdc)
- imon_get_ffdc_type(ictx);
-
- imon_set_display_type(ictx, interface);
-
if (product == 0xffdc && ictx->rf_device) {
sysfs_err = sysfs_create_group(&interface->dev.kobj,
- &imon_rf_attribute_group);
+ &imon_rf_attr_group);
if (sysfs_err)
- err("%s: Could not create RF sysfs entries(%d)",
- __func__, sysfs_err);
+ pr_err("Could not create RF sysfs entries(%d)\n",
+ sysfs_err);
}
if (ictx->display_supported)
imon_init_display(ictx, interface);
}
- /* set IR protocol/remote type */
- ret = imon_ir_change_protocol(ictx, ictx->ir_type);
- if (ret) {
- dev_warn(dev, "%s: failed to set IR protocol, falling back "
- "to standard iMON protocol mode\n", __func__);
- ictx->ir_type = IR_TYPE_OTHER;
- }
-
dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
"usb<%d:%d> initialized\n", vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
@@ -2275,10 +2361,8 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
* sysfs_remove_group is safe to call even if sysfs_create_group
* hasn't been called
*/
- sysfs_remove_group(&interface->dev.kobj,
- &imon_display_attribute_group);
- sysfs_remove_group(&interface->dev.kobj,
- &imon_rf_attribute_group);
+ sysfs_remove_group(&interface->dev.kobj, &imon_display_attr_group);
+ sysfs_remove_group(&interface->dev.kobj, &imon_rf_attr_group);
usb_set_intfdata(interface, NULL);
@@ -2291,7 +2375,8 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
if (ifnum == 0) {
ictx->dev_present_intf0 = false;
usb_kill_urb(ictx->rx_urb_intf0);
- ir_input_unregister(ictx->idev);
+ input_unregister_device(ictx->idev);
+ ir_input_unregister(ictx->rdev);
if (ictx->display_supported) {
if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
usb_deregister_dev(interface, &imon_lcd_class);
@@ -2311,11 +2396,8 @@ static void __devexit imon_disconnect(struct usb_interface *interface)
mutex_unlock(&ictx->lock);
if (!ictx->display_isopen)
free_imon_context(ictx);
- } else {
- if (ictx->ir_type == IR_TYPE_RC6)
- del_timer_sync(&ictx->itimer);
+ } else
mutex_unlock(&ictx->lock);
- }
mutex_unlock(&driver_lock);
@@ -2372,7 +2454,7 @@ static int __init imon_init(void)
rc = usb_register(&imon_driver);
if (rc) {
- err("%s: usb register failed(%d)", __func__, rc);
+ pr_err("usb register failed(%d)\n", rc);
rc = -ENODEV;
}
diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h
index a85a8c7c905a..81c936bd793f 100644
--- a/drivers/media/IR/ir-core-priv.h
+++ b/drivers/media/IR/ir-core-priv.h
@@ -17,6 +17,7 @@
#define _IR_RAW_EVENT
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <media/ir-core.h>
struct ir_raw_handler {
@@ -33,6 +34,7 @@ struct ir_raw_handler {
struct ir_raw_event_ctrl {
struct list_head list; /* to keep track of raw clients */
struct task_struct *thread;
+ spinlock_t lock;
struct kfifo kfifo; /* fifo for the pulse/space durations */
ktime_t last_event; /* when last event occurred */
enum raw_event_type last_type; /* last event type */
@@ -76,10 +78,22 @@ struct ir_raw_event_ctrl {
bool first;
bool toggle;
} jvc;
+ struct rc5_sz_dec {
+ int state;
+ u32 bits;
+ unsigned count;
+ unsigned wanted_bits;
+ } rc5_sz;
struct lirc_codec {
struct ir_input_dev *ir_dev;
struct lirc_driver *drv;
int carrier_low;
+
+ ktime_t gap_start;
+ u64 gap_duration;
+ bool gap;
+ bool send_timeout_reports;
+
} lirc;
};
@@ -107,13 +121,19 @@ static inline void decrease_duration(struct ir_raw_event *ev, unsigned duration)
ev->duration -= duration;
}
+/* Returns true if event is normal pulse/space event */
+static inline bool is_timing_event(struct ir_raw_event ev)
+{
+ return !ev.carrier_report && !ev.reset;
+}
+
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
-#define IS_RESET(ev) (ev.duration == 0)
/*
* Routines from ir-sysfs.c - Meant to be called only internally inside
* ir-core
*/
+int ir_register_input(struct input_dev *input_dev);
int ir_register_class(struct input_dev *input_dev);
void ir_unregister_class(struct input_dev *input_dev);
diff --git a/drivers/media/IR/ir-jvc-decoder.c b/drivers/media/IR/ir-jvc-decoder.c
index 77a89c4de014..63dca6e5458b 100644
--- a/drivers/media/IR/ir-jvc-decoder.c
+++ b/drivers/media/IR/ir-jvc-decoder.c
@@ -50,8 +50,9 @@ static int ir_jvc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_JVC))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index c06b4d50a3dc..9186b45132ed 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -435,7 +435,7 @@ EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
* This routine is used to signal that a key has been released on the
* remote control. It reports a keyup input event via input_report_key().
*/
-static void ir_keyup(struct ir_input_dev *ir)
+void ir_keyup(struct ir_input_dev *ir)
{
if (!ir->keypressed)
return;
@@ -445,6 +445,7 @@ static void ir_keyup(struct ir_input_dev *ir)
input_sync(ir->input_dev);
ir->keypressed = false;
}
+EXPORT_SYMBOL_GPL(ir_keyup);
/**
* ir_timer_keyup() - generates a keyup event after a timeout
@@ -640,6 +641,10 @@ int __ir_input_register(struct input_dev *input_dev,
goto out_event;
}
+ rc = ir_register_input(input_dev);
+ if (rc < 0)
+ goto out_event;
+
IR_dprintk(1, "Registered input device on %s for %s remote%s.\n",
driver_name, rc_tab->name,
(ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ?
diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c
index 1983cd3f3994..9fc0db9d344d 100644
--- a/drivers/media/IR/ir-lirc-codec.c
+++ b/drivers/media/IR/ir-lirc-codec.c
@@ -32,6 +32,7 @@
static int ir_lirc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct lirc_codec *lirc = &ir_dev->raw->lirc;
int sample;
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_LIRC))
@@ -40,21 +41,57 @@ static int ir_lirc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!ir_dev->raw->lirc.drv || !ir_dev->raw->lirc.drv->rbuf)
return -EINVAL;
- if (IS_RESET(ev))
+ /* Packet start */
+ if (ev.reset)
return 0;
- IR_dprintk(2, "LIRC data transfer started (%uus %s)\n",
- TO_US(ev.duration), TO_STR(ev.pulse));
+ /* Carrier reports */
+ if (ev.carrier_report) {
+ sample = LIRC_FREQUENCY(ev.carrier);
+
+ /* Packet end */
+ } else if (ev.timeout) {
+
+ if (lirc->gap)
+ return 0;
+
+ lirc->gap_start = ktime_get();
+ lirc->gap = true;
+ lirc->gap_duration = ev.duration;
+
+ if (!lirc->send_timeout_reports)
+ return 0;
+
+ sample = LIRC_TIMEOUT(ev.duration / 1000);
- sample = ev.duration / 1000;
- if (ev.pulse)
- sample |= PULSE_BIT;
+ /* Normal sample */
+ } else {
+
+ if (lirc->gap) {
+ int gap_sample;
+
+ lirc->gap_duration += ktime_to_ns(ktime_sub(ktime_get(),
+ lirc->gap_start));
+
+ /* Convert to ms and cap by LIRC_VALUE_MASK */
+ do_div(lirc->gap_duration, 1000);
+ lirc->gap_duration = min(lirc->gap_duration,
+ (u64)LIRC_VALUE_MASK);
+
+ gap_sample = LIRC_SPACE(lirc->gap_duration);
+ lirc_buffer_write(ir_dev->raw->lirc.drv->rbuf,
+ (unsigned char *) &gap_sample);
+ lirc->gap = false;
+ }
+
+ sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
+ LIRC_SPACE(ev.duration / 1000);
+ }
lirc_buffer_write(ir_dev->raw->lirc.drv->rbuf,
(unsigned char *) &sample);
wake_up(&ir_dev->raw->lirc.drv->rbuf->wait_poll);
-
return 0;
}
@@ -102,7 +139,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
struct ir_input_dev *ir_dev;
int ret = 0;
void *drv_data;
- unsigned long val = 0;
+ __u32 val = 0, tmp;
lirc = lirc_get_pdata(filep);
if (!lirc)
@@ -115,7 +152,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
drv_data = ir_dev->props->priv;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
- ret = get_user(val, (unsigned long *)arg);
+ ret = get_user(val, (__u32 *)arg);
if (ret)
return ret;
}
@@ -130,22 +167,20 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
case LIRC_SET_SEND_MODE:
if (val != (LIRC_MODE_PULSE & LIRC_CAN_SEND_MASK))
return -EINVAL;
- break;
+ return 0;
/* TX settings */
case LIRC_SET_TRANSMITTER_MASK:
- if (ir_dev->props->s_tx_mask)
- ret = ir_dev->props->s_tx_mask(drv_data, (u32)val);
- else
+ if (!ir_dev->props->s_tx_mask)
return -EINVAL;
- break;
+
+ return ir_dev->props->s_tx_mask(drv_data, val);
case LIRC_SET_SEND_CARRIER:
- if (ir_dev->props->s_tx_carrier)
- ir_dev->props->s_tx_carrier(drv_data, (u32)val);
- else
+ if (!ir_dev->props->s_tx_carrier)
return -EINVAL;
- break;
+
+ return ir_dev->props->s_tx_carrier(drv_data, val);
case LIRC_SET_SEND_DUTY_CYCLE:
if (!ir_dev->props->s_tx_duty_cycle)
@@ -154,39 +189,42 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (val <= 0 || val >= 100)
return -EINVAL;
- ir_dev->props->s_tx_duty_cycle(ir_dev->props->priv, val);
- break;
+ return ir_dev->props->s_tx_duty_cycle(drv_data, val);
/* RX settings */
case LIRC_SET_REC_CARRIER:
- if (ir_dev->props->s_rx_carrier_range)
- ret = ir_dev->props->s_rx_carrier_range(
- ir_dev->props->priv,
- ir_dev->raw->lirc.carrier_low, val);
- else
+ if (!ir_dev->props->s_rx_carrier_range)
return -ENOSYS;
- if (!ret)
- ir_dev->raw->lirc.carrier_low = 0;
- break;
+ if (val <= 0)
+ return -EINVAL;
+
+ return ir_dev->props->s_rx_carrier_range(drv_data,
+ ir_dev->raw->lirc.carrier_low, val);
case LIRC_SET_REC_CARRIER_RANGE:
- if (val >= 0)
- ir_dev->raw->lirc.carrier_low = val;
- break;
+ if (val <= 0)
+ return -EINVAL;
+ ir_dev->raw->lirc.carrier_low = val;
+ return 0;
case LIRC_GET_REC_RESOLUTION:
val = ir_dev->props->rx_resolution;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
- if (ir_dev->props->s_learning_mode)
- return ir_dev->props->s_learning_mode(
- ir_dev->props->priv, !!val);
- else
+ if (!ir_dev->props->s_learning_mode)
return -ENOSYS;
+ return ir_dev->props->s_learning_mode(drv_data, !!val);
+
+ case LIRC_SET_MEASURE_CARRIER_MODE:
+ if (!ir_dev->props->s_carrier_report)
+ return -ENOSYS;
+
+ return ir_dev->props->s_carrier_report(drv_data, !!val);
+
/* Generic timeout support */
case LIRC_GET_MIN_TIMEOUT:
if (!ir_dev->props->max_timeout)
@@ -201,10 +239,20 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
break;
case LIRC_SET_REC_TIMEOUT:
- if (val < ir_dev->props->min_timeout ||
- val > ir_dev->props->max_timeout)
- return -EINVAL;
- ir_dev->props->timeout = val * 1000;
+ if (!ir_dev->props->max_timeout)
+ return -ENOSYS;
+
+ tmp = val * 1000;
+
+ if (tmp < ir_dev->props->min_timeout ||
+ tmp > ir_dev->props->max_timeout)
+ return -EINVAL;
+
+ ir_dev->props->timeout = tmp;
+ break;
+
+ case LIRC_SET_REC_TIMEOUT_REPORTS:
+ lirc->send_timeout_reports = !!val;
break;
default:
@@ -212,7 +260,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
}
if (_IOC_DIR(cmd) & _IOC_READ)
- ret = put_user(val, (unsigned long *)arg);
+ ret = put_user(val, (__u32 *)arg);
return ret;
}
@@ -231,6 +279,9 @@ static struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = ir_lirc_transmit_ir,
.unlocked_ioctl = ir_lirc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ir_lirc_ioctl,
+#endif
.read = lirc_dev_fop_read,
.poll = lirc_dev_fop_poll,
.open = lirc_dev_fop_open,
@@ -278,6 +329,10 @@ static int ir_lirc_register(struct input_dev *input_dev)
if (ir_dev->props->s_learning_mode)
features |= LIRC_CAN_USE_WIDEBAND_RECEIVER;
+ if (ir_dev->props->s_carrier_report)
+ features |= LIRC_CAN_MEASURE_CARRIER;
+
+
if (ir_dev->props->max_timeout)
features |= LIRC_CAN_SET_REC_TIMEOUT;
diff --git a/drivers/media/IR/ir-nec-decoder.c b/drivers/media/IR/ir-nec-decoder.c
index d597421d6547..70993f79c8a2 100644
--- a/drivers/media/IR/ir-nec-decoder.c
+++ b/drivers/media/IR/ir-nec-decoder.c
@@ -54,8 +54,9 @@ static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_NEC))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 8e0e1b1f8c87..a06a07e4e0b1 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -39,22 +39,34 @@ static int ir_raw_event_thread(void *data)
struct ir_raw_event ev;
struct ir_raw_handler *handler;
struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
+ int retval;
while (!kthread_should_stop()) {
- try_to_freeze();
- mutex_lock(&ir_raw_handler_lock);
+ spin_lock_irq(&raw->lock);
+ retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
+
+ if (!retval) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop())
+ set_current_state(TASK_RUNNING);
- while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev)) {
- list_for_each_entry(handler, &ir_raw_handler_list, list)
- handler->decode(raw->input_dev, ev);
- raw->prev_ev = ev;
+ spin_unlock_irq(&raw->lock);
+ schedule();
+ continue;
}
- mutex_unlock(&ir_raw_handler_lock);
+ spin_unlock_irq(&raw->lock);
+
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ BUG_ON(retval != sizeof(ev));
+
+ mutex_lock(&ir_raw_handler_lock);
+ list_for_each_entry(handler, &ir_raw_handler_list, list)
+ handler->decode(raw->input_dev, ev);
+ raw->prev_ev = ev;
+ mutex_unlock(&ir_raw_handler_lock);
}
return 0;
@@ -77,7 +89,7 @@ int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
if (!ir->raw)
return -EINVAL;
- IR_dprintk(2, "sample: (05%dus %s)\n",
+ IR_dprintk(2, "sample: (%05dus %s)\n",
TO_US(ev->duration), TO_STR(ev->pulse));
if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
@@ -162,7 +174,7 @@ int ir_raw_event_store_with_filter(struct input_dev *input_dev,
if (ir->idle && !ev->pulse)
return 0;
else if (ir->idle)
- ir_raw_event_set_idle(input_dev, 0);
+ ir_raw_event_set_idle(input_dev, false);
if (!raw->this_ev.duration) {
raw->this_ev = *ev;
@@ -175,48 +187,35 @@ int ir_raw_event_store_with_filter(struct input_dev *input_dev,
/* Enter idle mode if nessesary */
if (!ev->pulse && ir->props->timeout &&
- raw->this_ev.duration >= ir->props->timeout)
- ir_raw_event_set_idle(input_dev, 1);
+ raw->this_ev.duration >= ir->props->timeout) {
+ ir_raw_event_set_idle(input_dev, true);
+ }
return 0;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
-void ir_raw_event_set_idle(struct input_dev *input_dev, int idle)
+/**
+ * ir_raw_event_set_idle() - hint the ir core if device is receiving
+ * IR data or not
+ * @input_dev: the struct input_dev device descriptor
+ * @idle: the hint value
+ */
+void ir_raw_event_set_idle(struct input_dev *input_dev, bool idle)
{
struct ir_input_dev *ir = input_get_drvdata(input_dev);
struct ir_raw_event_ctrl *raw = ir->raw;
- ktime_t now;
- u64 delta;
- if (!ir->props)
+ if (!ir->props || !ir->raw)
return;
- if (!ir->raw)
- goto out;
+ IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
if (idle) {
- IR_dprintk(2, "enter idle mode\n");
- raw->last_event = ktime_get();
- } else {
- IR_dprintk(2, "exit idle mode\n");
-
- now = ktime_get();
- delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event));
-
- WARN_ON(raw->this_ev.pulse);
-
- raw->this_ev.duration =
- min(raw->this_ev.duration + delta,
- (u64)IR_MAX_DURATION);
-
+ raw->this_ev.timeout = true;
ir_raw_event_store(input_dev, &raw->this_ev);
-
- if (raw->this_ev.duration == IR_MAX_DURATION)
- ir_raw_event_reset(input_dev);
-
- raw->this_ev.duration = 0;
+ init_ir_raw_event(&raw->this_ev);
}
-out:
+
if (ir->props->s_idle)
ir->props->s_idle(ir->props->priv, idle);
ir->idle = idle;
@@ -232,11 +231,14 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
void ir_raw_event_handle(struct input_dev *input_dev)
{
struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ unsigned long flags;
if (!ir->raw)
return;
+ spin_lock_irqsave(&ir->raw->lock, flags);
wake_up_process(ir->raw->thread);
+ spin_unlock_irqrestore(&ir->raw->lock, flags);
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
@@ -275,6 +277,7 @@ int ir_raw_event_register(struct input_dev *input_dev)
return rc;
}
+ spin_lock_init(&ir->raw->lock);
ir->raw->thread = kthread_run(ir_raw_event_thread, ir->raw,
"rc%u", (unsigned int)ir->devno);
diff --git a/drivers/media/IR/ir-rc5-decoder.c b/drivers/media/IR/ir-rc5-decoder.c
index df4770d978ad..572ed4ca8c68 100644
--- a/drivers/media/IR/ir-rc5-decoder.c
+++ b/drivers/media/IR/ir-rc5-decoder.c
@@ -55,8 +55,9 @@ static int ir_rc5_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_RC5))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-rc5-sz-decoder.c b/drivers/media/IR/ir-rc5-sz-decoder.c
new file mode 100644
index 000000000000..7c413501a3f7
--- /dev/null
+++ b/drivers/media/IR/ir-rc5-sz-decoder.c
@@ -0,0 +1,154 @@
+/* ir-rc5-sz-decoder.c - handle RC5 Streamzap IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This code handles the 15 bit RC5-ish protocol used by the Streamzap
+ * PC Remote.
+ * It considers a carrier of 36 kHz, with a total of 15 bits, where
+ * the first two bits are start bits, and a third one is a filing bit
+ */
+
+#include "ir-core-priv.h"
+
+#define RC5_SZ_NBITS 15
+#define RC5_UNIT 888888 /* ns */
+#define RC5_BIT_START (1 * RC5_UNIT)
+#define RC5_BIT_END (1 * RC5_UNIT)
+
+enum rc5_sz_state {
+ STATE_INACTIVE,
+ STATE_BIT_START,
+ STATE_BIT_END,
+ STATE_FINISHED,
+};
+
+/**
+ * ir_rc5_sz_decode() - Decode one RC-5 Streamzap pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc5_sz_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct rc5_sz_dec *data = &ir_dev->raw->rc5_sz;
+ u8 toggle, command, system;
+ u32 scancode;
+
+ if (!(ir_dev->raw->enabled_protocols & IR_TYPE_RC5_SZ))
+ return 0;
+
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC5-sz decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ data->state = STATE_BIT_START;
+ data->count = 1;
+ data->wanted_bits = RC5_SZ_NBITS;
+ decrease_duration(&ev, RC5_BIT_START);
+ goto again;
+
+ case STATE_BIT_START:
+ if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2))
+ break;
+
+ data->bits <<= 1;
+ if (!ev.pulse)
+ data->bits |= 1;
+ data->count++;
+ data->state = STATE_BIT_END;
+ return 0;
+
+ case STATE_BIT_END:
+ if (!is_transition(&ev, &ir_dev->raw->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else
+ data->state = STATE_BIT_START;
+
+ decrease_duration(&ev, RC5_BIT_END);
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ /* RC5-sz */
+ command = (data->bits & 0x0003F) >> 0;
+ system = (data->bits & 0x02FC0) >> 6;
+ toggle = (data->bits & 0x01000) ? 1 : 0;
+ scancode = system << 6 | command;
+
+ IR_dprintk(1, "RC5-sz scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC5-sz decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static struct ir_raw_handler rc5_sz_handler = {
+ .protocols = IR_TYPE_RC5_SZ,
+ .decode = ir_rc5_sz_decode,
+};
+
+static int __init ir_rc5_sz_decode_init(void)
+{
+ ir_raw_handler_register(&rc5_sz_handler);
+
+ printk(KERN_INFO "IR RC5 (streamzap) protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc5_sz_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc5_sz_handler);
+}
+
+module_init(ir_rc5_sz_decode_init);
+module_exit(ir_rc5_sz_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("RC5 (streamzap) IR protocol decoder");
diff --git a/drivers/media/IR/ir-rc6-decoder.c b/drivers/media/IR/ir-rc6-decoder.c
index f1624b8279bc..d25da91f44ff 100644
--- a/drivers/media/IR/ir-rc6-decoder.c
+++ b/drivers/media/IR/ir-rc6-decoder.c
@@ -85,8 +85,9 @@ static int ir_rc6_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_RC6))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-sony-decoder.c b/drivers/media/IR/ir-sony-decoder.c
index b9074f07c7a0..2d15730822bc 100644
--- a/drivers/media/IR/ir-sony-decoder.c
+++ b/drivers/media/IR/ir-sony-decoder.c
@@ -48,8 +48,9 @@ static int ir_sony_decode(struct input_dev *input_dev, struct ir_raw_event ev)
if (!(ir_dev->raw->enabled_protocols & IR_TYPE_SONY))
return 0;
- if (IS_RESET(ev)) {
- data->state = STATE_INACTIVE;
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index 46d42467f9b4..38423a8da871 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -43,6 +43,7 @@ static struct {
{ IR_TYPE_RC6, "rc-6" },
{ IR_TYPE_JVC, "jvc" },
{ IR_TYPE_SONY, "sony" },
+ { IR_TYPE_RC5_SZ, "rc-5-sz" },
{ IR_TYPE_LIRC, "lirc" },
};
@@ -67,6 +68,10 @@ static ssize_t show_protocols(struct device *d,
char *tmp = buf;
int i;
+ /* Device is being removed */
+ if (!ir_dev)
+ return -EINVAL;
+
if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
enabled = ir_dev->rc_tab.ir_type;
allowed = ir_dev->props->allowed_protos;
@@ -122,6 +127,10 @@ static ssize_t store_protocols(struct device *d,
int rc, i, count = 0;
unsigned long flags;
+ /* Device is being removed */
+ if (!ir_dev)
+ return -EINVAL;
+
if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
type = ir_dev->rc_tab.ir_type;
else if (ir_dev->raw)
@@ -256,8 +265,6 @@ static struct device_type rc_dev_type = {
*/
int ir_register_class(struct input_dev *input_dev)
{
- int rc;
- const char *path;
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
int devno = find_first_zero_bit(&ir_core_dev_number,
IRRCV_NUM_DEVICES);
@@ -266,17 +273,28 @@ int ir_register_class(struct input_dev *input_dev)
return devno;
ir_dev->dev.type = &rc_dev_type;
+ ir_dev->devno = devno;
ir_dev->dev.class = &ir_input_class;
ir_dev->dev.parent = input_dev->dev.parent;
+ input_dev->dev.parent = &ir_dev->dev;
dev_set_name(&ir_dev->dev, "rc%d", devno);
dev_set_drvdata(&ir_dev->dev, ir_dev);
- rc = device_register(&ir_dev->dev);
- if (rc)
- return rc;
+ return device_register(&ir_dev->dev);
+};
+
+/**
+ * ir_register_input - registers ir input device with input subsystem
+ * @input_dev: the struct input_dev descriptor of the device
+ */
+
+int ir_register_input(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ int rc;
+ const char *path;
- input_dev->dev.parent = &ir_dev->dev;
rc = input_register_device(input_dev);
if (rc < 0) {
device_del(&ir_dev->dev);
@@ -292,11 +310,9 @@ int ir_register_class(struct input_dev *input_dev)
path ? path : "N/A");
kfree(path);
- ir_dev->devno = devno;
- set_bit(devno, &ir_core_dev_number);
-
+ set_bit(ir_dev->devno, &ir_core_dev_number);
return 0;
-};
+}
/**
* ir_unregister_class() - removes the sysfs for sysfs for
@@ -309,6 +325,7 @@ void ir_unregister_class(struct input_dev *input_dev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ input_set_drvdata(input_dev, NULL);
clear_bit(ir_dev->devno, &ir_core_dev_number);
input_unregister_device(input_dev);
device_del(&ir_dev->dev);
diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile
index 950e5d953c6f..3194d391bbd4 100644
--- a/drivers/media/IR/keymaps/Makefile
+++ b/drivers/media/IR/keymaps/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
+ rc-alink-dtu-m.o \
+ rc-anysee.o \
rc-apac-viewcomp.o \
rc-asus-pc39.o \
rc-ati-tv-wonder-hd-600.o \
@@ -8,7 +10,9 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-avermedia-dvbt.o \
rc-avermedia-m135a.o \
rc-avermedia-m733a-rm-k6.o \
+ rc-avermedia-rm-ks.o \
rc-avertv-303.o \
+ rc-azurewave-ad-tu700.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
@@ -16,6 +20,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-cinergy.o \
rc-dib0700-nec.o \
rc-dib0700-rc5.o \
+ rc-digitalnow-tinytwin.o \
+ rc-digittrade.o \
rc-dm1105-nec.o \
rc-dntv-live-dvb-t.o \
rc-dntv-live-dvbt-pro.o \
@@ -38,8 +44,12 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-kaiomy.o \
rc-kworld-315u.o \
rc-kworld-plus-tv-analog.o \
+ rc-leadtek-y04g0051.o \
rc-lirc.o \
+ rc-lme2510.o \
rc-manli.o \
+ rc-msi-digivox-ii.o \
+ rc-msi-digivox-iii.o \
rc-msi-tvanywhere.o \
rc-msi-tvanywhere-plus.o \
rc-nebula.o \
@@ -58,14 +68,18 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-purpletv.o \
rc-pv951.o \
rc-rc5-hauppauge-new.o \
- rc-rc5-streamzap.o \
rc-rc5-tv.o \
rc-rc6-mce.o \
rc-real-audio-220-32-keys.o \
+ rc-streamzap.o \
rc-tbs-nec.o \
rc-terratec-cinergy-xs.o \
+ rc-terratec-slim.o \
rc-tevii-nec.o \
+ rc-total-media-in-hand.o \
+ rc-trekstor.o \
rc-tt-1500.o \
+ rc-twinhan1027.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
rc-winfast.o \
diff --git a/drivers/media/IR/keymaps/rc-alink-dtu-m.c b/drivers/media/IR/keymaps/rc-alink-dtu-m.c
new file mode 100644
index 000000000000..ddfee7f8093d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-alink-dtu-m.c
@@ -0,0 +1,68 @@
+/*
+ * A-Link DTU(m) remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* A-Link DTU(m) slim remote, 6 rows, 3 columns. */
+static struct ir_scancode alink_dtu_m[] = {
+ { 0x0800, KEY_VOLUMEUP },
+ { 0x0801, KEY_1 },
+ { 0x0802, KEY_3 },
+ { 0x0803, KEY_7 },
+ { 0x0804, KEY_9 },
+ { 0x0805, KEY_NEW }, /* symbol: PIP */
+ { 0x0806, KEY_0 },
+ { 0x0807, KEY_CHANNEL }, /* JUMP */
+ { 0x080d, KEY_5 },
+ { 0x080f, KEY_2 },
+ { 0x0812, KEY_POWER2 },
+ { 0x0814, KEY_CHANNELUP },
+ { 0x0816, KEY_VOLUMEDOWN },
+ { 0x0818, KEY_6 },
+ { 0x081a, KEY_MUTE },
+ { 0x081b, KEY_8 },
+ { 0x081c, KEY_4 },
+ { 0x081d, KEY_CHANNELDOWN },
+};
+
+static struct rc_keymap alink_dtu_m_map = {
+ .map = {
+ .scan = alink_dtu_m,
+ .size = ARRAY_SIZE(alink_dtu_m),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_ALINK_DTU_M,
+ }
+};
+
+static int __init init_rc_map_alink_dtu_m(void)
+{
+ return ir_register_map(&alink_dtu_m_map);
+}
+
+static void __exit exit_rc_map_alink_dtu_m(void)
+{
+ ir_unregister_map(&alink_dtu_m_map);
+}
+
+module_init(init_rc_map_alink_dtu_m)
+module_exit(exit_rc_map_alink_dtu_m)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-anysee.c b/drivers/media/IR/keymaps/rc-anysee.c
new file mode 100644
index 000000000000..30d70498cfed
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-anysee.c
@@ -0,0 +1,93 @@
+/*
+ * Anysee remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode anysee[] = {
+ { 0x0800, KEY_0 },
+ { 0x0801, KEY_1 },
+ { 0x0802, KEY_2 },
+ { 0x0803, KEY_3 },
+ { 0x0804, KEY_4 },
+ { 0x0805, KEY_5 },
+ { 0x0806, KEY_6 },
+ { 0x0807, KEY_7 },
+ { 0x0808, KEY_8 },
+ { 0x0809, KEY_9 },
+ { 0x080a, KEY_POWER2 }, /* [red power button] */
+ { 0x080b, KEY_VIDEO }, /* [*] MODE */
+ { 0x080c, KEY_CHANNEL }, /* [symbol counterclockwise arrow] */
+ { 0x080d, KEY_NEXT }, /* [>>|] */
+ { 0x080e, KEY_MENU }, /* MENU */
+ { 0x080f, KEY_EPG }, /* [EPG] */
+ { 0x0810, KEY_CLEAR }, /* EXIT */
+ { 0x0811, KEY_CHANNELUP },
+ { 0x0812, KEY_VOLUMEDOWN },
+ { 0x0813, KEY_VOLUMEUP },
+ { 0x0814, KEY_CHANNELDOWN },
+ { 0x0815, KEY_OK },
+ { 0x0816, KEY_RADIO }, /* [symbol TV/radio] */
+ { 0x0817, KEY_INFO }, /* [i] */
+ { 0x0818, KEY_PREVIOUS }, /* [|<<] */
+ { 0x0819, KEY_FAVORITES }, /* FAV. */
+ { 0x081a, KEY_SUBTITLE }, /* Subtitle */
+ { 0x081b, KEY_CAMERA }, /* [symbol camera] */
+ { 0x081c, KEY_YELLOW },
+ { 0x081d, KEY_RED },
+ { 0x081e, KEY_LANGUAGE }, /* [symbol Second Audio Program] */
+ { 0x081f, KEY_GREEN },
+ { 0x0820, KEY_SLEEP }, /* Sleep */
+ { 0x0821, KEY_SCREEN }, /* 16:9 / 4:3 */
+ { 0x0822, KEY_ZOOM }, /* SIZE */
+ { 0x0824, KEY_FN }, /* [F1] */
+ { 0x0825, KEY_FN }, /* [F2] */
+ { 0x0842, KEY_MUTE }, /* symbol mute */
+ { 0x0844, KEY_BLUE },
+ { 0x0847, KEY_TEXT }, /* TEXT */
+ { 0x0848, KEY_STOP },
+ { 0x0849, KEY_RECORD },
+ { 0x0850, KEY_PLAY },
+ { 0x0851, KEY_PAUSE },
+};
+
+static struct rc_keymap anysee_map = {
+ .map = {
+ .scan = anysee,
+ .size = ARRAY_SIZE(anysee),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_ANYSEE,
+ }
+};
+
+static int __init init_rc_map_anysee(void)
+{
+ return ir_register_map(&anysee_map);
+}
+
+static void __exit exit_rc_map_anysee(void)
+{
+ ir_unregister_map(&anysee_map);
+}
+
+module_init(init_rc_map_anysee)
+module_exit(exit_rc_map_anysee)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-asus-pc39.c b/drivers/media/IR/keymaps/rc-asus-pc39.c
index 2aa068cd6c75..2996e0a3b8d5 100644
--- a/drivers/media/IR/keymaps/rc-asus-pc39.c
+++ b/drivers/media/IR/keymaps/rc-asus-pc39.c
@@ -20,56 +20,56 @@
static struct ir_scancode asus_pc39[] = {
/* Keys 0 to 9 */
- { 0x15, KEY_0 },
- { 0x29, KEY_1 },
- { 0x2d, KEY_2 },
- { 0x2b, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0d, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x31, KEY_7 },
- { 0x35, KEY_8 },
- { 0x33, KEY_9 },
+ { 0x082a, KEY_0 },
+ { 0x0816, KEY_1 },
+ { 0x0812, KEY_2 },
+ { 0x0814, KEY_3 },
+ { 0x0836, KEY_4 },
+ { 0x0832, KEY_5 },
+ { 0x0834, KEY_6 },
+ { 0x080e, KEY_7 },
+ { 0x080a, KEY_8 },
+ { 0x080c, KEY_9 },
- { 0x3e, KEY_RADIO }, /* radio */
- { 0x03, KEY_MENU }, /* dvd/menu */
- { 0x2a, KEY_VOLUMEUP },
- { 0x19, KEY_VOLUMEDOWN },
- { 0x37, KEY_UP },
- { 0x3b, KEY_DOWN },
- { 0x27, KEY_LEFT },
- { 0x2f, KEY_RIGHT },
- { 0x25, KEY_VIDEO }, /* video */
- { 0x39, KEY_AUDIO }, /* music */
+ { 0x0801, KEY_RADIO }, /* radio */
+ { 0x083c, KEY_MENU }, /* dvd/menu */
+ { 0x0815, KEY_VOLUMEUP },
+ { 0x0826, KEY_VOLUMEDOWN },
+ { 0x0808, KEY_UP },
+ { 0x0804, KEY_DOWN },
+ { 0x0818, KEY_LEFT },
+ { 0x0810, KEY_RIGHT },
+ { 0x081a, KEY_VIDEO }, /* video */
+ { 0x0806, KEY_AUDIO }, /* music */
- { 0x21, KEY_TV }, /* tv */
- { 0x1d, KEY_EXIT }, /* back */
- { 0x0a, KEY_CHANNELUP }, /* channel / program + */
- { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1a, KEY_ENTER }, /* enter */
+ { 0x081e, KEY_TV }, /* tv */
+ { 0x0822, KEY_EXIT }, /* back */
+ { 0x0835, KEY_CHANNELUP }, /* channel / program + */
+ { 0x0824, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x0825, KEY_ENTER }, /* enter */
- { 0x06, KEY_PAUSE }, /* play/pause */
- { 0x1e, KEY_PREVIOUS }, /* rew */
- { 0x26, KEY_NEXT }, /* forward */
- { 0x0e, KEY_REWIND }, /* backward << */
- { 0x3a, KEY_FASTFORWARD }, /* forward >> */
- { 0x36, KEY_STOP },
- { 0x2e, KEY_RECORD }, /* recording */
- { 0x16, KEY_POWER }, /* the button that reads "close" */
+ { 0x0839, KEY_PAUSE }, /* play/pause */
+ { 0x0821, KEY_PREVIOUS }, /* rew */
+ { 0x0819, KEY_NEXT }, /* forward */
+ { 0x0831, KEY_REWIND }, /* backward << */
+ { 0x0805, KEY_FASTFORWARD }, /* forward >> */
+ { 0x0809, KEY_STOP },
+ { 0x0811, KEY_RECORD }, /* recording */
+ { 0x0829, KEY_POWER }, /* the button that reads "close" */
- { 0x11, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MACRO }, /* recall */
- { 0x23, KEY_HOME }, /* home */
- { 0x05, KEY_PVR }, /* picture */
- { 0x3d, KEY_MUTE }, /* mute */
- { 0x01, KEY_DVD }, /* dvd */
+ { 0x082e, KEY_ZOOM }, /* full screen */
+ { 0x082c, KEY_MACRO }, /* recall */
+ { 0x081c, KEY_HOME }, /* home */
+ { 0x083a, KEY_PVR }, /* picture */
+ { 0x0802, KEY_MUTE }, /* mute */
+ { 0x083e, KEY_DVD }, /* dvd */
};
static struct rc_keymap asus_pc39_map = {
.map = {
.scan = asus_pc39,
.size = ARRAY_SIZE(asus_pc39),
- .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .ir_type = IR_TYPE_RC5,
.name = RC_MAP_ASUS_PC39,
}
};
diff --git a/drivers/media/IR/keymaps/rc-avermedia-rm-ks.c b/drivers/media/IR/keymaps/rc-avermedia-rm-ks.c
new file mode 100644
index 000000000000..9ee60906c861
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-rm-ks.c
@@ -0,0 +1,79 @@
+/*
+ * AverMedia RM-KS remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* Initial keytable is from Jose Alberto Reguero <jareguero@telefonica.net>
+ and Felipe Morales Moreno <felipe.morales.moreno@gmail.com> */
+/* FIXME: mappings are not 100% correct? */
+static struct ir_scancode avermedia_rm_ks[] = {
+ { 0x0501, KEY_POWER2 },
+ { 0x0502, KEY_CHANNELUP },
+ { 0x0503, KEY_CHANNELDOWN },
+ { 0x0504, KEY_VOLUMEUP },
+ { 0x0505, KEY_VOLUMEDOWN },
+ { 0x0506, KEY_MUTE },
+ { 0x0507, KEY_RIGHT },
+ { 0x0508, KEY_PROG1 },
+ { 0x0509, KEY_1 },
+ { 0x050a, KEY_2 },
+ { 0x050b, KEY_3 },
+ { 0x050c, KEY_4 },
+ { 0x050d, KEY_5 },
+ { 0x050e, KEY_6 },
+ { 0x050f, KEY_7 },
+ { 0x0510, KEY_8 },
+ { 0x0511, KEY_9 },
+ { 0x0512, KEY_0 },
+ { 0x0513, KEY_AUDIO },
+ { 0x0515, KEY_EPG },
+ { 0x0516, KEY_PLAY },
+ { 0x0517, KEY_RECORD },
+ { 0x0518, KEY_STOP },
+ { 0x051c, KEY_BACK },
+ { 0x051d, KEY_FORWARD },
+ { 0x054d, KEY_LEFT },
+ { 0x0556, KEY_ZOOM },
+};
+
+static struct rc_keymap avermedia_rm_ks_map = {
+ .map = {
+ .scan = avermedia_rm_ks,
+ .size = ARRAY_SIZE(avermedia_rm_ks),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AVERMEDIA_RM_KS,
+ }
+};
+
+static int __init init_rc_map_avermedia_rm_ks(void)
+{
+ return ir_register_map(&avermedia_rm_ks_map);
+}
+
+static void __exit exit_rc_map_avermedia_rm_ks(void)
+{
+ ir_unregister_map(&avermedia_rm_ks_map);
+}
+
+module_init(init_rc_map_avermedia_rm_ks)
+module_exit(exit_rc_map_avermedia_rm_ks)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c b/drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c
new file mode 100644
index 000000000000..e0876147d471
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-azurewave-ad-tu700.c
@@ -0,0 +1,102 @@
+/*
+ * TwinHan AzureWave AD-TU700(704J) remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode azurewave_ad_tu700[] = {
+ { 0x0000, KEY_TAB }, /* Tab */
+ { 0x0001, KEY_2 },
+ { 0x0002, KEY_CHANNELDOWN },
+ { 0x0003, KEY_1 },
+ { 0x0004, KEY_MENU }, /* Record List */
+ { 0x0005, KEY_CHANNELUP },
+ { 0x0006, KEY_3 },
+ { 0x0007, KEY_SLEEP }, /* Hibernate */
+ { 0x0008, KEY_VIDEO }, /* A/V */
+ { 0x0009, KEY_4 },
+ { 0x000a, KEY_VOLUMEDOWN },
+ { 0x000c, KEY_CANCEL }, /* Cancel */
+ { 0x000d, KEY_7 },
+ { 0x000e, KEY_AGAIN }, /* Recall */
+ { 0x000f, KEY_TEXT }, /* Teletext */
+ { 0x0010, KEY_MUTE },
+ { 0x0011, KEY_RECORD },
+ { 0x0012, KEY_FASTFORWARD }, /* FF >> */
+ { 0x0013, KEY_BACK }, /* Back */
+ { 0x0014, KEY_PLAY },
+ { 0x0015, KEY_0 },
+ { 0x0016, KEY_POWER2 }, /* [red power button] */
+ { 0x0017, KEY_FAVORITES }, /* Favorite List */
+ { 0x0018, KEY_RED },
+ { 0x0019, KEY_8 },
+ { 0x001a, KEY_STOP },
+ { 0x001b, KEY_9 },
+ { 0x001c, KEY_EPG }, /* Info/EPG */
+ { 0x001d, KEY_5 },
+ { 0x001e, KEY_VOLUMEUP },
+ { 0x001f, KEY_6 },
+ { 0x0040, KEY_REWIND }, /* FR << */
+ { 0x0041, KEY_PREVIOUS }, /* Replay */
+ { 0x0042, KEY_NEXT }, /* Skip */
+ { 0x0043, KEY_SUBTITLE }, /* Subtitle / CC */
+ { 0x0045, KEY_KPPLUS }, /* Zoom+ */
+ { 0x0046, KEY_KPMINUS }, /* Zoom- */
+ { 0x0047, KEY_NEW }, /* PIP */
+ { 0x0048, KEY_INFO }, /* Preview */
+ { 0x0049, KEY_MODE }, /* L/R */
+ { 0x004a, KEY_CLEAR }, /* Clear */
+ { 0x004b, KEY_UP }, /* up arrow */
+ { 0x004c, KEY_PAUSE },
+ { 0x004d, KEY_ZOOM }, /* Full Screen */
+ { 0x004e, KEY_LEFT }, /* left arrow */
+ { 0x004f, KEY_OK }, /* Enter / ok */
+ { 0x0050, KEY_LANGUAGE }, /* SAP */
+ { 0x0051, KEY_DOWN }, /* down arrow */
+ { 0x0052, KEY_RIGHT }, /* right arrow */
+ { 0x0053, KEY_GREEN },
+ { 0x0054, KEY_CAMERA }, /* Capture */
+ { 0x005e, KEY_YELLOW },
+ { 0x005f, KEY_BLUE },
+};
+
+static struct rc_keymap azurewave_ad_tu700_map = {
+ .map = {
+ .scan = azurewave_ad_tu700,
+ .size = ARRAY_SIZE(azurewave_ad_tu700),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AZUREWAVE_AD_TU700,
+ }
+};
+
+static int __init init_rc_map_azurewave_ad_tu700(void)
+{
+ return ir_register_map(&azurewave_ad_tu700_map);
+}
+
+static void __exit exit_rc_map_azurewave_ad_tu700(void)
+{
+ ir_unregister_map(&azurewave_ad_tu700_map);
+}
+
+module_init(init_rc_map_azurewave_ad_tu700)
+module_exit(exit_rc_map_azurewave_ad_tu700)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c b/drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c
new file mode 100644
index 000000000000..63e469e2dd21
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-digitalnow-tinytwin.c
@@ -0,0 +1,98 @@
+/*
+ * DigitalNow TinyTwin remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode digitalnow_tinytwin[] = {
+ { 0x0000, KEY_MUTE }, /* [symbol speaker] */
+ { 0x0001, KEY_VOLUMEUP },
+ { 0x0002, KEY_POWER2 }, /* TV [power button] */
+ { 0x0003, KEY_2 },
+ { 0x0004, KEY_3 },
+ { 0x0005, KEY_4 },
+ { 0x0006, KEY_6 },
+ { 0x0007, KEY_7 },
+ { 0x0008, KEY_8 },
+ { 0x0009, KEY_NUMERIC_STAR }, /* [*] */
+ { 0x000a, KEY_0 },
+ { 0x000b, KEY_NUMERIC_POUND }, /* [#] */
+ { 0x000c, KEY_RIGHT }, /* [right arrow] */
+ { 0x000d, KEY_HOMEPAGE }, /* [symbol home] Start */
+ { 0x000e, KEY_RED }, /* [red] Videos */
+ { 0x0010, KEY_POWER }, /* PC [power button] */
+ { 0x0011, KEY_YELLOW }, /* [yellow] Pictures */
+ { 0x0012, KEY_DOWN }, /* [down arrow] */
+ { 0x0013, KEY_GREEN }, /* [green] Music */
+ { 0x0014, KEY_CYCLEWINDOWS }, /* BACK */
+ { 0x0015, KEY_FAVORITES }, /* MORE */
+ { 0x0016, KEY_UP }, /* [up arrow] */
+ { 0x0017, KEY_LEFT }, /* [left arrow] */
+ { 0x0018, KEY_OK }, /* OK */
+ { 0x0019, KEY_BLUE }, /* [blue] MyTV */
+ { 0x001a, KEY_REWIND }, /* REW [<<] */
+ { 0x001b, KEY_PLAY }, /* PLAY */
+ { 0x001c, KEY_5 },
+ { 0x001d, KEY_9 },
+ { 0x001e, KEY_VOLUMEDOWN },
+ { 0x001f, KEY_1 },
+ { 0x0040, KEY_STOP }, /* STOP */
+ { 0x0042, KEY_PAUSE }, /* PAUSE */
+ { 0x0043, KEY_SCREEN }, /* Aspect */
+ { 0x0044, KEY_FORWARD }, /* FWD [>>] */
+ { 0x0045, KEY_NEXT }, /* SKIP */
+ { 0x0048, KEY_RECORD }, /* RECORD */
+ { 0x0049, KEY_VIDEO }, /* RTV */
+ { 0x004a, KEY_EPG }, /* Guide */
+ { 0x004b, KEY_CHANNELUP },
+ { 0x004c, KEY_HELP }, /* Help */
+ { 0x004d, KEY_RADIO }, /* Radio */
+ { 0x004f, KEY_CHANNELDOWN },
+ { 0x0050, KEY_DVD }, /* DVD */
+ { 0x0051, KEY_AUDIO }, /* Audio */
+ { 0x0052, KEY_TITLE }, /* Title */
+ { 0x0053, KEY_NEW }, /* [symbol PIP?] */
+ { 0x0057, KEY_MENU }, /* Mouse */
+ { 0x005a, KEY_PREVIOUS }, /* REPLAY */
+};
+
+static struct rc_keymap digitalnow_tinytwin_map = {
+ .map = {
+ .scan = digitalnow_tinytwin,
+ .size = ARRAY_SIZE(digitalnow_tinytwin),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_DIGITALNOW_TINYTWIN,
+ }
+};
+
+static int __init init_rc_map_digitalnow_tinytwin(void)
+{
+ return ir_register_map(&digitalnow_tinytwin_map);
+}
+
+static void __exit exit_rc_map_digitalnow_tinytwin(void)
+{
+ ir_unregister_map(&digitalnow_tinytwin_map);
+}
+
+module_init(init_rc_map_digitalnow_tinytwin)
+module_exit(exit_rc_map_digitalnow_tinytwin)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-digittrade.c b/drivers/media/IR/keymaps/rc-digittrade.c
new file mode 100644
index 000000000000..5dece78e19c5
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-digittrade.c
@@ -0,0 +1,82 @@
+/*
+ * Digittrade DVB-T USB Stick remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* Digittrade DVB-T USB Stick remote controller. */
+/* Imported from af9015.h.
+ Initial keytable was from Alain Kalker <miki@dds.nl> */
+
+/* Digittrade DVB-T USB Stick */
+static struct ir_scancode digittrade[] = {
+ { 0x0000, KEY_9 },
+ { 0x0001, KEY_EPG }, /* EPG */
+ { 0x0002, KEY_VOLUMEDOWN }, /* Vol Dn */
+ { 0x0003, KEY_TEXT }, /* TELETEXT */
+ { 0x0004, KEY_8 },
+ { 0x0005, KEY_MUTE }, /* MUTE */
+ { 0x0006, KEY_POWER2 }, /* POWER */
+ { 0x0009, KEY_ZOOM }, /* FULLSCREEN */
+ { 0x000a, KEY_RECORD }, /* RECORD */
+ { 0x000d, KEY_SUBTITLE }, /* SUBTITLE */
+ { 0x000e, KEY_STOP }, /* STOP */
+ { 0x0010, KEY_OK }, /* RETURN */
+ { 0x0011, KEY_2 },
+ { 0x0012, KEY_4 },
+ { 0x0015, KEY_3 },
+ { 0x0016, KEY_5 },
+ { 0x0017, KEY_CHANNELDOWN }, /* Ch Dn */
+ { 0x0019, KEY_CHANNELUP }, /* CH Up */
+ { 0x001a, KEY_PAUSE }, /* PAUSE */
+ { 0x001b, KEY_1 },
+ { 0x001d, KEY_AUDIO }, /* DUAL SOUND */
+ { 0x001e, KEY_PLAY }, /* PLAY */
+ { 0x001f, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x0040, KEY_VOLUMEUP }, /* Vol Up */
+ { 0x0048, KEY_7 },
+ { 0x004c, KEY_6 },
+ { 0x004d, KEY_PLAYPAUSE }, /* TIMESHIFT */
+ { 0x0054, KEY_0 },
+};
+
+static struct rc_keymap digittrade_map = {
+ .map = {
+ .scan = digittrade,
+ .size = ARRAY_SIZE(digittrade),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_DIGITTRADE,
+ }
+};
+
+static int __init init_rc_map_digittrade(void)
+{
+ return ir_register_map(&digittrade_map);
+}
+
+static void __exit exit_rc_map_digittrade(void)
+{
+ ir_unregister_map(&digittrade_map);
+}
+
+module_init(init_rc_map_digittrade)
+module_exit(exit_rc_map_digittrade)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-leadtek-y04g0051.c b/drivers/media/IR/keymaps/rc-leadtek-y04g0051.c
new file mode 100644
index 000000000000..7521315fd876
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-leadtek-y04g0051.c
@@ -0,0 +1,99 @@
+/*
+ * LeadTek Y04G0051 remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode leadtek_y04g0051[] = {
+ { 0x0300, KEY_POWER2 },
+ { 0x0303, KEY_SCREEN },
+ { 0x0304, KEY_RIGHT },
+ { 0x0305, KEY_1 },
+ { 0x0306, KEY_2 },
+ { 0x0307, KEY_3 },
+ { 0x0308, KEY_LEFT },
+ { 0x0309, KEY_4 },
+ { 0x030a, KEY_5 },
+ { 0x030b, KEY_6 },
+ { 0x030c, KEY_UP },
+ { 0x030d, KEY_7 },
+ { 0x030e, KEY_8 },
+ { 0x030f, KEY_9 },
+ { 0x0310, KEY_DOWN },
+ { 0x0311, KEY_AGAIN },
+ { 0x0312, KEY_0 },
+ { 0x0313, KEY_OK }, /* 1st ok */
+ { 0x0314, KEY_MUTE },
+ { 0x0316, KEY_OK }, /* 2nd ok */
+ { 0x031e, KEY_VIDEO }, /* 2nd video */
+ { 0x031b, KEY_AUDIO },
+ { 0x031f, KEY_TEXT },
+ { 0x0340, KEY_SLEEP },
+ { 0x0341, KEY_DOT },
+ { 0x0342, KEY_REWIND },
+ { 0x0343, KEY_PLAY },
+ { 0x0344, KEY_FASTFORWARD },
+ { 0x0345, KEY_TIME },
+ { 0x0346, KEY_STOP }, /* 2nd stop */
+ { 0x0347, KEY_RECORD },
+ { 0x0348, KEY_CAMERA },
+ { 0x0349, KEY_ESC },
+ { 0x034a, KEY_NEW },
+ { 0x034b, KEY_RED },
+ { 0x034c, KEY_GREEN },
+ { 0x034d, KEY_YELLOW },
+ { 0x034e, KEY_BLUE },
+ { 0x034f, KEY_MENU },
+ { 0x0350, KEY_STOP }, /* 1st stop */
+ { 0x0351, KEY_CHANNEL },
+ { 0x0352, KEY_VIDEO }, /* 1st video */
+ { 0x0353, KEY_EPG },
+ { 0x0354, KEY_PREVIOUS },
+ { 0x0355, KEY_NEXT },
+ { 0x0356, KEY_TV },
+ { 0x035a, KEY_VOLUMEDOWN },
+ { 0x035b, KEY_CHANNELUP },
+ { 0x035e, KEY_VOLUMEUP },
+ { 0x035f, KEY_CHANNELDOWN },
+};
+
+static struct rc_keymap leadtek_y04g0051_map = {
+ .map = {
+ .scan = leadtek_y04g0051,
+ .size = ARRAY_SIZE(leadtek_y04g0051),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_LEADTEK_Y04G0051,
+ }
+};
+
+static int __init init_rc_map_leadtek_y04g0051(void)
+{
+ return ir_register_map(&leadtek_y04g0051_map);
+}
+
+static void __exit exit_rc_map_leadtek_y04g0051(void)
+{
+ ir_unregister_map(&leadtek_y04g0051_map);
+}
+
+module_init(init_rc_map_leadtek_y04g0051)
+module_exit(exit_rc_map_leadtek_y04g0051)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-lme2510.c b/drivers/media/IR/keymaps/rc-lme2510.c
new file mode 100644
index 000000000000..40dcf0b4e21a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-lme2510.c
@@ -0,0 +1,68 @@
+/* LME2510 remote control
+ *
+ *
+ * Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+
+static struct ir_scancode lme2510_rc[] = {
+ { 0xba45, KEY_0 },
+ { 0xa05f, KEY_1 },
+ { 0xaf50, KEY_2 },
+ { 0xa25d, KEY_3 },
+ { 0xbe41, KEY_4 },
+ { 0xf50a, KEY_5 },
+ { 0xbd42, KEY_6 },
+ { 0xb847, KEY_7 },
+ { 0xb649, KEY_8 },
+ { 0xfa05, KEY_9 },
+ { 0xbc43, KEY_POWER },
+ { 0xb946, KEY_SUBTITLE },
+ { 0xf906, KEY_PAUSE },
+ { 0xfc03, KEY_MEDIA_REPEAT},
+ { 0xfd02, KEY_PAUSE },
+ { 0xa15e, KEY_VOLUMEUP },
+ { 0xa35c, KEY_VOLUMEDOWN },
+ { 0xf609, KEY_CHANNELUP },
+ { 0xe51a, KEY_CHANNELDOWN },
+ { 0xe11e, KEY_PLAY },
+ { 0xe41b, KEY_ZOOM },
+ { 0xa659, KEY_MUTE },
+ { 0xa55a, KEY_TV },
+ { 0xe718, KEY_RECORD },
+ { 0xf807, KEY_EPG },
+ { 0xfe01, KEY_STOP },
+
+};
+
+static struct rc_keymap lme2510_map = {
+ .map = {
+ .scan = lme2510_rc,
+ .size = ARRAY_SIZE(lme2510_rc),
+ .ir_type = IR_TYPE_UNKNOWN,
+ .name = RC_MAP_LME2510,
+ }
+};
+
+static int __init init_rc_lme2510_map(void)
+{
+ return ir_register_map(&lme2510_map);
+}
+
+static void __exit exit_rc_lme2510_map(void)
+{
+ ir_unregister_map(&lme2510_map);
+}
+
+module_init(init_rc_lme2510_map)
+module_exit(exit_rc_lme2510_map)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
diff --git a/drivers/media/IR/keymaps/rc-msi-digivox-ii.c b/drivers/media/IR/keymaps/rc-msi-digivox-ii.c
new file mode 100644
index 000000000000..67237fbf9e4b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-digivox-ii.c
@@ -0,0 +1,67 @@
+/*
+ * MSI DIGIVOX mini II remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode msi_digivox_ii[] = {
+ { 0x0002, KEY_2 },
+ { 0x0003, KEY_UP }, /* up */
+ { 0x0004, KEY_3 },
+ { 0x0005, KEY_CHANNELDOWN },
+ { 0x0008, KEY_5 },
+ { 0x0009, KEY_0 },
+ { 0x000b, KEY_8 },
+ { 0x000d, KEY_DOWN }, /* down */
+ { 0x0010, KEY_9 },
+ { 0x0011, KEY_7 },
+ { 0x0014, KEY_VOLUMEUP },
+ { 0x0015, KEY_CHANNELUP },
+ { 0x0016, KEY_OK },
+ { 0x0017, KEY_POWER2 },
+ { 0x001a, KEY_1 },
+ { 0x001c, KEY_4 },
+ { 0x001d, KEY_6 },
+ { 0x001f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap msi_digivox_ii_map = {
+ .map = {
+ .scan = msi_digivox_ii,
+ .size = ARRAY_SIZE(msi_digivox_ii),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_MSI_DIGIVOX_II,
+ }
+};
+
+static int __init init_rc_map_msi_digivox_ii(void)
+{
+ return ir_register_map(&msi_digivox_ii_map);
+}
+
+static void __exit exit_rc_map_msi_digivox_ii(void)
+{
+ ir_unregister_map(&msi_digivox_ii_map);
+}
+
+module_init(init_rc_map_msi_digivox_ii)
+module_exit(exit_rc_map_msi_digivox_ii)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-msi-digivox-iii.c b/drivers/media/IR/keymaps/rc-msi-digivox-iii.c
new file mode 100644
index 000000000000..882056e52ef9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-digivox-iii.c
@@ -0,0 +1,85 @@
+/*
+ * MSI DIGIVOX mini III remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* MSI DIGIVOX mini III */
+/* Uses NEC extended 0x61d6. */
+/* This remote seems to be same as rc-kworld-315u.c. Anyhow, add new remote
+ since rc-kworld-315u.c lacks NEC extended address byte. */
+static struct ir_scancode msi_digivox_iii[] = {
+ { 0x61d601, KEY_VIDEO }, /* Source */
+ { 0x61d602, KEY_3 },
+ { 0x61d603, KEY_POWER }, /* ShutDown */
+ { 0x61d604, KEY_1 },
+ { 0x61d605, KEY_5 },
+ { 0x61d606, KEY_6 },
+ { 0x61d607, KEY_CHANNELDOWN }, /* CH- */
+ { 0x61d608, KEY_2 },
+ { 0x61d609, KEY_CHANNELUP }, /* CH+ */
+ { 0x61d60a, KEY_9 },
+ { 0x61d60b, KEY_ZOOM }, /* Zoom */
+ { 0x61d60c, KEY_7 },
+ { 0x61d60d, KEY_8 },
+ { 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
+ { 0x61d60f, KEY_4 },
+ { 0x61d610, KEY_ESC }, /* [back up arrow] */
+ { 0x61d611, KEY_0 },
+ { 0x61d612, KEY_OK }, /* [enter arrow] */
+ { 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
+ { 0x61d614, KEY_RECORD }, /* Rec */
+ { 0x61d615, KEY_STOP }, /* Stop */
+ { 0x61d616, KEY_PLAY }, /* Play */
+ { 0x61d617, KEY_MUTE }, /* Mute */
+ { 0x61d618, KEY_UP },
+ { 0x61d619, KEY_DOWN },
+ { 0x61d61a, KEY_LEFT },
+ { 0x61d61b, KEY_RIGHT },
+ { 0x61d61c, KEY_RED },
+ { 0x61d61d, KEY_GREEN },
+ { 0x61d61e, KEY_YELLOW },
+ { 0x61d61f, KEY_BLUE },
+ { 0x61d643, KEY_POWER2 }, /* [red power button] */
+};
+
+static struct rc_keymap msi_digivox_iii_map = {
+ .map = {
+ .scan = msi_digivox_iii,
+ .size = ARRAY_SIZE(msi_digivox_iii),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_MSI_DIGIVOX_III,
+ }
+};
+
+static int __init init_rc_map_msi_digivox_iii(void)
+{
+ return ir_register_map(&msi_digivox_iii_map);
+}
+
+static void __exit exit_rc_map_msi_digivox_iii(void)
+{
+ ir_unregister_map(&msi_digivox_iii_map);
+}
+
+module_init(init_rc_map_msi_digivox_iii)
+module_exit(exit_rc_map_msi_digivox_iii)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-streamzap.c b/drivers/media/IR/keymaps/rc-rc5-streamzap.c
deleted file mode 100644
index 4c19c58b46d8..000000000000
--- a/drivers/media/IR/keymaps/rc-rc5-streamzap.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/* rc-rc5-streamzap.c - Keytable for Streamzap PC Remote, for use
- * with the Streamzap PC Remote IR Receiver.
- *
- * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <media/rc-map.h>
-
-static struct ir_scancode rc5_streamzap[] = {
-/*
- * FIXME: The Streamzap remote isn't actually true RC-5, it has an extra
- * bit in it, which presently throws the in-kernel RC-5 decoder for a loop.
- * We either have to enhance the decoder to support it, add a new decoder,
- * or just rely on lirc userspace decoding.
- */
- { 0x00, KEY_NUMERIC_0 },
- { 0x01, KEY_NUMERIC_1 },
- { 0x02, KEY_NUMERIC_2 },
- { 0x03, KEY_NUMERIC_3 },
- { 0x04, KEY_NUMERIC_4 },
- { 0x05, KEY_NUMERIC_5 },
- { 0x06, KEY_NUMERIC_6 },
- { 0x07, KEY_NUMERIC_7 },
- { 0x08, KEY_NUMERIC_8 },
- { 0x0a, KEY_POWER },
- { 0x0b, KEY_MUTE },
- { 0x0c, KEY_CHANNELUP },
- { 0x0d, KEY_VOLUMEUP },
- { 0x0e, KEY_CHANNELDOWN },
- { 0x0f, KEY_VOLUMEDOWN },
- { 0x10, KEY_UP },
- { 0x11, KEY_LEFT },
- { 0x12, KEY_OK },
- { 0x13, KEY_RIGHT },
- { 0x14, KEY_DOWN },
- { 0x15, KEY_MENU },
- { 0x16, KEY_EXIT },
- { 0x17, KEY_PLAY },
- { 0x18, KEY_PAUSE },
- { 0x19, KEY_STOP },
- { 0x1a, KEY_BACK },
- { 0x1b, KEY_FORWARD },
- { 0x1c, KEY_RECORD },
- { 0x1d, KEY_REWIND },
- { 0x1e, KEY_FASTFORWARD },
- { 0x20, KEY_RED },
- { 0x21, KEY_GREEN },
- { 0x22, KEY_YELLOW },
- { 0x23, KEY_BLUE },
-
-};
-
-static struct rc_keymap rc5_streamzap_map = {
- .map = {
- .scan = rc5_streamzap,
- .size = ARRAY_SIZE(rc5_streamzap),
- .ir_type = IR_TYPE_RC5,
- .name = RC_MAP_RC5_STREAMZAP,
- }
-};
-
-static int __init init_rc_map_rc5_streamzap(void)
-{
- return ir_register_map(&rc5_streamzap_map);
-}
-
-static void __exit exit_rc_map_rc5_streamzap(void)
-{
- ir_unregister_map(&rc5_streamzap_map);
-}
-
-module_init(init_rc_map_rc5_streamzap)
-module_exit(exit_rc_map_rc5_streamzap)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c
index 39557ad401b6..1b7adabbcee9 100644
--- a/drivers/media/IR/keymaps/rc-rc6-mce.c
+++ b/drivers/media/IR/keymaps/rc-rc6-mce.c
@@ -12,76 +12,78 @@
#include <media/rc-map.h>
static struct ir_scancode rc6_mce[] = {
- { 0x800f0415, KEY_REWIND },
- { 0x800f0414, KEY_FASTFORWARD },
- { 0x800f041b, KEY_PREVIOUS },
- { 0x800f041a, KEY_NEXT },
+ { 0x800f0400, KEY_NUMERIC_0 },
+ { 0x800f0401, KEY_NUMERIC_1 },
+ { 0x800f0402, KEY_NUMERIC_2 },
+ { 0x800f0403, KEY_NUMERIC_3 },
+ { 0x800f0404, KEY_NUMERIC_4 },
+ { 0x800f0405, KEY_NUMERIC_5 },
+ { 0x800f0406, KEY_NUMERIC_6 },
+ { 0x800f0407, KEY_NUMERIC_7 },
+ { 0x800f0408, KEY_NUMERIC_8 },
+ { 0x800f0409, KEY_NUMERIC_9 },
+
+ { 0x800f040a, KEY_DELETE },
+ { 0x800f040b, KEY_ENTER },
+ { 0x800f040c, KEY_POWER },
+ { 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
+ { 0x800f040e, KEY_MUTE },
+ { 0x800f040f, KEY_INFO },
+
+ { 0x800f0410, KEY_VOLUMEUP },
+ { 0x800f0411, KEY_VOLUMEDOWN },
+ { 0x800f0412, KEY_CHANNELUP },
+ { 0x800f0413, KEY_CHANNELDOWN },
+
+ { 0x800f0414, KEY_FASTFORWARD },
+ { 0x800f0415, KEY_REWIND },
{ 0x800f0416, KEY_PLAY },
+ { 0x800f0417, KEY_RECORD },
{ 0x800f0418, KEY_PAUSE },
{ 0x800f046e, KEY_PLAYPAUSE },
{ 0x800f0419, KEY_STOP },
- { 0x800f0417, KEY_RECORD },
+ { 0x800f041a, KEY_NEXT },
+ { 0x800f041b, KEY_PREVIOUS },
+ { 0x800f041c, KEY_NUMERIC_POUND },
+ { 0x800f041d, KEY_NUMERIC_STAR },
{ 0x800f041e, KEY_UP },
{ 0x800f041f, KEY_DOWN },
{ 0x800f0420, KEY_LEFT },
{ 0x800f0421, KEY_RIGHT },
- { 0x800f040b, KEY_ENTER },
{ 0x800f0422, KEY_OK },
{ 0x800f0423, KEY_EXIT },
- { 0x800f040a, KEY_DELETE },
+ { 0x800f0424, KEY_DVD },
+ { 0x800f0425, KEY_TUNER }, /* LiveTV */
+ { 0x800f0426, KEY_EPG }, /* Guide */
+ { 0x800f0427, KEY_ZOOM }, /* Aspect */
- { 0x800f040e, KEY_MUTE },
- { 0x800f0410, KEY_VOLUMEUP },
- { 0x800f0411, KEY_VOLUMEDOWN },
- { 0x800f0412, KEY_CHANNELUP },
- { 0x800f0413, KEY_CHANNELDOWN },
{ 0x800f043a, KEY_BRIGHTNESSUP },
- { 0x800f0480, KEY_BRIGHTNESSDOWN },
-
- { 0x800f0401, KEY_NUMERIC_1 },
- { 0x800f0402, KEY_NUMERIC_2 },
- { 0x800f0403, KEY_NUMERIC_3 },
- { 0x800f0404, KEY_NUMERIC_4 },
- { 0x800f0405, KEY_NUMERIC_5 },
- { 0x800f0406, KEY_NUMERIC_6 },
- { 0x800f0407, KEY_NUMERIC_7 },
- { 0x800f0408, KEY_NUMERIC_8 },
- { 0x800f0409, KEY_NUMERIC_9 },
- { 0x800f0400, KEY_NUMERIC_0 },
-
- { 0x800f041d, KEY_NUMERIC_STAR },
- { 0x800f041c, KEY_NUMERIC_POUND },
{ 0x800f0446, KEY_TV },
- { 0x800f0447, KEY_AUDIO }, /* My Music */
- { 0x800f0448, KEY_PVR }, /* RecordedTV */
+ { 0x800f0447, KEY_AUDIO }, /* My Music */
+ { 0x800f0448, KEY_PVR }, /* RecordedTV */
{ 0x800f0449, KEY_CAMERA },
{ 0x800f044a, KEY_VIDEO },
- { 0x800f0424, KEY_DVD },
- { 0x800f0425, KEY_TUNER }, /* LiveTV */
- { 0x800f0450, KEY_RADIO },
-
{ 0x800f044c, KEY_LANGUAGE },
- { 0x800f0427, KEY_ZOOM }, /* Aspect */
+ { 0x800f044d, KEY_TITLE },
+ { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
+ { 0x800f0450, KEY_RADIO },
+
+ { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
{ 0x800f045b, KEY_RED },
{ 0x800f045c, KEY_GREEN },
{ 0x800f045d, KEY_YELLOW },
{ 0x800f045e, KEY_BLUE },
- { 0x800f040f, KEY_INFO },
- { 0x800f0426, KEY_EPG }, /* Guide */
- { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
- { 0x800f044d, KEY_TITLE },
-
- { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
-
- { 0x800f040c, KEY_POWER },
- { 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
+ { 0x800f046e, KEY_PLAYPAUSE },
+ { 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */
+ { 0x800f0480, KEY_BRIGHTNESSDOWN },
+ { 0x800f0481, KEY_PLAYPAUSE },
};
static struct rc_keymap rc6_mce_map = {
diff --git a/drivers/media/IR/keymaps/rc-streamzap.c b/drivers/media/IR/keymaps/rc-streamzap.c
new file mode 100644
index 000000000000..df32013a321c
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-streamzap.c
@@ -0,0 +1,82 @@
+/* rc-streamzap.c - Keytable for Streamzap PC Remote, for use
+ * with the Streamzap PC Remote IR Receiver.
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode streamzap[] = {
+/*
+ * The Streamzap remote is almost, but not quite, RC-5, as it has an extra
+ * bit in it, which throws the in-kernel RC-5 decoder for a loop. Currently,
+ * an additional RC-5-sz decoder is being deployed to support it, but it
+ * may be possible to merge it back with the standard RC-5 decoder.
+ */
+ { 0x28c0, KEY_NUMERIC_0 },
+ { 0x28c1, KEY_NUMERIC_1 },
+ { 0x28c2, KEY_NUMERIC_2 },
+ { 0x28c3, KEY_NUMERIC_3 },
+ { 0x28c4, KEY_NUMERIC_4 },
+ { 0x28c5, KEY_NUMERIC_5 },
+ { 0x28c6, KEY_NUMERIC_6 },
+ { 0x28c7, KEY_NUMERIC_7 },
+ { 0x28c8, KEY_NUMERIC_8 },
+ { 0x28c9, KEY_NUMERIC_9 },
+ { 0x28ca, KEY_POWER },
+ { 0x28cb, KEY_MUTE },
+ { 0x28cc, KEY_CHANNELUP },
+ { 0x28cd, KEY_VOLUMEUP },
+ { 0x28ce, KEY_CHANNELDOWN },
+ { 0x28cf, KEY_VOLUMEDOWN },
+ { 0x28d0, KEY_UP },
+ { 0x28d1, KEY_LEFT },
+ { 0x28d2, KEY_OK },
+ { 0x28d3, KEY_RIGHT },
+ { 0x28d4, KEY_DOWN },
+ { 0x28d5, KEY_MENU },
+ { 0x28d6, KEY_EXIT },
+ { 0x28d7, KEY_PLAY },
+ { 0x28d8, KEY_PAUSE },
+ { 0x28d9, KEY_STOP },
+ { 0x28da, KEY_BACK },
+ { 0x28db, KEY_FORWARD },
+ { 0x28dc, KEY_RECORD },
+ { 0x28dd, KEY_REWIND },
+ { 0x28de, KEY_FASTFORWARD },
+ { 0x28e0, KEY_RED },
+ { 0x28e1, KEY_GREEN },
+ { 0x28e2, KEY_YELLOW },
+ { 0x28e3, KEY_BLUE },
+
+};
+
+static struct rc_keymap streamzap_map = {
+ .map = {
+ .scan = streamzap,
+ .size = ARRAY_SIZE(streamzap),
+ .ir_type = IR_TYPE_RC5_SZ,
+ .name = RC_MAP_STREAMZAP,
+ }
+};
+
+static int __init init_rc_map_streamzap(void)
+{
+ return ir_register_map(&streamzap_map);
+}
+
+static void __exit exit_rc_map_streamzap(void)
+{
+ ir_unregister_map(&streamzap_map);
+}
+
+module_init(init_rc_map_streamzap)
+module_exit(exit_rc_map_streamzap)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-terratec-slim.c b/drivers/media/IR/keymaps/rc-terratec-slim.c
new file mode 100644
index 000000000000..10dee4c1deff
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-terratec-slim.c
@@ -0,0 +1,79 @@
+/*
+ * TerraTec remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* TerraTec slim remote, 7 rows, 4 columns. */
+/* Uses NEC extended 0x02bd. */
+static struct ir_scancode terratec_slim[] = {
+ { 0x02bd00, KEY_1 },
+ { 0x02bd01, KEY_2 },
+ { 0x02bd02, KEY_3 },
+ { 0x02bd03, KEY_4 },
+ { 0x02bd04, KEY_5 },
+ { 0x02bd05, KEY_6 },
+ { 0x02bd06, KEY_7 },
+ { 0x02bd07, KEY_8 },
+ { 0x02bd08, KEY_9 },
+ { 0x02bd09, KEY_0 },
+ { 0x02bd0a, KEY_MUTE },
+ { 0x02bd0b, KEY_NEW }, /* symbol: PIP */
+ { 0x02bd0e, KEY_VOLUMEDOWN },
+ { 0x02bd0f, KEY_PLAYPAUSE },
+ { 0x02bd10, KEY_RIGHT },
+ { 0x02bd11, KEY_LEFT },
+ { 0x02bd12, KEY_UP },
+ { 0x02bd13, KEY_DOWN },
+ { 0x02bd15, KEY_OK },
+ { 0x02bd16, KEY_STOP },
+ { 0x02bd17, KEY_CAMERA }, /* snapshot */
+ { 0x02bd18, KEY_CHANNELUP },
+ { 0x02bd19, KEY_RECORD },
+ { 0x02bd1a, KEY_CHANNELDOWN },
+ { 0x02bd1c, KEY_ESC },
+ { 0x02bd1f, KEY_VOLUMEUP },
+ { 0x02bd44, KEY_EPG },
+ { 0x02bd45, KEY_POWER2 }, /* [red power button] */
+};
+
+static struct rc_keymap terratec_slim_map = {
+ .map = {
+ .scan = terratec_slim,
+ .size = ARRAY_SIZE(terratec_slim),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_TERRATEC_SLIM,
+ }
+};
+
+static int __init init_rc_map_terratec_slim(void)
+{
+ return ir_register_map(&terratec_slim_map);
+}
+
+static void __exit exit_rc_map_terratec_slim(void)
+{
+ ir_unregister_map(&terratec_slim_map);
+}
+
+module_init(init_rc_map_terratec_slim)
+module_exit(exit_rc_map_terratec_slim)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-total-media-in-hand.c b/drivers/media/IR/keymaps/rc-total-media-in-hand.c
new file mode 100644
index 000000000000..fd1985763781
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-total-media-in-hand.c
@@ -0,0 +1,85 @@
+/*
+ * Total Media In Hand remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* Uses NEC extended 0x02bd */
+static struct ir_scancode total_media_in_hand[] = {
+ { 0x02bd00, KEY_1 },
+ { 0x02bd01, KEY_2 },
+ { 0x02bd02, KEY_3 },
+ { 0x02bd03, KEY_4 },
+ { 0x02bd04, KEY_5 },
+ { 0x02bd05, KEY_6 },
+ { 0x02bd06, KEY_7 },
+ { 0x02bd07, KEY_8 },
+ { 0x02bd08, KEY_9 },
+ { 0x02bd09, KEY_0 },
+ { 0x02bd0a, KEY_MUTE },
+ { 0x02bd0b, KEY_CYCLEWINDOWS }, /* yellow, [min / max] */
+ { 0x02bd0c, KEY_VIDEO }, /* TV / AV */
+ { 0x02bd0e, KEY_VOLUMEDOWN },
+ { 0x02bd0f, KEY_TIME }, /* TimeShift */
+ { 0x02bd10, KEY_RIGHT }, /* right arrow */
+ { 0x02bd11, KEY_LEFT }, /* left arrow */
+ { 0x02bd12, KEY_UP }, /* up arrow */
+ { 0x02bd13, KEY_DOWN }, /* down arrow */
+ { 0x02bd14, KEY_POWER2 }, /* [red] */
+ { 0x02bd15, KEY_OK }, /* OK */
+ { 0x02bd16, KEY_STOP },
+ { 0x02bd17, KEY_CAMERA }, /* Snapshot */
+ { 0x02bd18, KEY_CHANNELUP },
+ { 0x02bd19, KEY_RECORD },
+ { 0x02bd1a, KEY_CHANNELDOWN },
+ { 0x02bd1c, KEY_ESC }, /* Esc */
+ { 0x02bd1e, KEY_PLAY },
+ { 0x02bd1f, KEY_VOLUMEUP },
+ { 0x02bd40, KEY_PAUSE },
+ { 0x02bd41, KEY_FASTFORWARD }, /* FF >> */
+ { 0x02bd42, KEY_REWIND }, /* FR << */
+ { 0x02bd43, KEY_ZOOM }, /* [window + mouse pointer] */
+ { 0x02bd44, KEY_SHUFFLE }, /* Shuffle */
+ { 0x02bd45, KEY_INFO }, /* [red (I)] */
+};
+
+static struct rc_keymap total_media_in_hand_map = {
+ .map = {
+ .scan = total_media_in_hand,
+ .size = ARRAY_SIZE(total_media_in_hand),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_TOTAL_MEDIA_IN_HAND,
+ }
+};
+
+static int __init init_rc_map_total_media_in_hand(void)
+{
+ return ir_register_map(&total_media_in_hand_map);
+}
+
+static void __exit exit_rc_map_total_media_in_hand(void)
+{
+ ir_unregister_map(&total_media_in_hand_map);
+}
+
+module_init(init_rc_map_total_media_in_hand)
+module_exit(exit_rc_map_total_media_in_hand)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-trekstor.c b/drivers/media/IR/keymaps/rc-trekstor.c
new file mode 100644
index 000000000000..91092caca452
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-trekstor.c
@@ -0,0 +1,80 @@
+/*
+ * TrekStor remote controller keytable
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <media/rc-map.h>
+
+/* TrekStor DVB-T USB Stick remote controller. */
+/* Imported from af9015.h.
+ Initial keytable was from Marc Schneider <macke@macke.org> */
+static struct ir_scancode trekstor[] = {
+ { 0x0084, KEY_0 },
+ { 0x0085, KEY_MUTE }, /* Mute */
+ { 0x0086, KEY_HOMEPAGE }, /* Home */
+ { 0x0087, KEY_UP }, /* Up */
+ { 0x0088, KEY_OK }, /* OK */
+ { 0x0089, KEY_RIGHT }, /* Right */
+ { 0x008a, KEY_FASTFORWARD }, /* Fast forward */
+ { 0x008b, KEY_VOLUMEUP }, /* Volume + */
+ { 0x008c, KEY_DOWN }, /* Down */
+ { 0x008d, KEY_PLAY }, /* Play/Pause */
+ { 0x008e, KEY_STOP }, /* Stop */
+ { 0x008f, KEY_EPG }, /* Info/EPG */
+ { 0x0090, KEY_7 },
+ { 0x0091, KEY_4 },
+ { 0x0092, KEY_1 },
+ { 0x0093, KEY_CHANNELDOWN }, /* Channel - */
+ { 0x0094, KEY_8 },
+ { 0x0095, KEY_5 },
+ { 0x0096, KEY_2 },
+ { 0x0097, KEY_CHANNELUP }, /* Channel + */
+ { 0x0098, KEY_9 },
+ { 0x0099, KEY_6 },
+ { 0x009a, KEY_3 },
+ { 0x009b, KEY_VOLUMEDOWN }, /* Volume - */
+ { 0x009c, KEY_TV }, /* TV */
+ { 0x009d, KEY_RECORD }, /* Record */
+ { 0x009e, KEY_REWIND }, /* Rewind */
+ { 0x009f, KEY_LEFT }, /* Left */
+};
+
+static struct rc_keymap trekstor_map = {
+ .map = {
+ .scan = trekstor,
+ .size = ARRAY_SIZE(trekstor),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_TREKSTOR,
+ }
+};
+
+static int __init init_rc_map_trekstor(void)
+{
+ return ir_register_map(&trekstor_map);
+}
+
+static void __exit exit_rc_map_trekstor(void)
+{
+ ir_unregister_map(&trekstor_map);
+}
+
+module_init(init_rc_map_trekstor)
+module_exit(exit_rc_map_trekstor)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/IR/keymaps/rc-twinhan1027.c b/drivers/media/IR/keymaps/rc-twinhan1027.c
new file mode 100644
index 000000000000..0b5d356c2d84
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-twinhan1027.c
@@ -0,0 +1,87 @@
+#include <media/rc-map.h>
+
+static struct ir_scancode twinhan_vp1027[] = {
+ { 0x16, KEY_POWER2 },
+ { 0x17, KEY_FAVORITES },
+ { 0x0f, KEY_TEXT },
+ { 0x48, KEY_INFO},
+ { 0x1c, KEY_EPG },
+ { 0x04, KEY_LIST },
+
+ { 0x03, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x1d, KEY_5 },
+ { 0x1f, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x19, KEY_8 },
+ { 0x1b, KEY_9 },
+ { 0x15, KEY_0 },
+
+ { 0x0c, KEY_CANCEL },
+ { 0x4a, KEY_CLEAR },
+ { 0x13, KEY_BACKSPACE },
+ { 0x00, KEY_TAB },
+
+ { 0x4b, KEY_UP },
+ { 0x51, KEY_DOWN },
+ { 0x4e, KEY_LEFT },
+ { 0x52, KEY_RIGHT },
+ { 0x4f, KEY_ENTER },
+
+ { 0x1e, KEY_VOLUMEUP },
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x05, KEY_CHANNELUP },
+ { 0x11, KEY_RECORD },
+
+ { 0x14, KEY_PLAY },
+ { 0x4c, KEY_PAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x40, KEY_REWIND },
+ { 0x12, KEY_FASTFORWARD },
+ { 0x41, KEY_PREVIOUSSONG },
+ { 0x42, KEY_NEXTSONG },
+ { 0x54, KEY_SAVE },
+ { 0x50, KEY_LANGUAGE },
+ { 0x47, KEY_MEDIA },
+ { 0x4d, KEY_SCREEN },
+ { 0x43, KEY_SUBTITLE },
+ { 0x10, KEY_MUTE },
+ { 0x49, KEY_AUDIO },
+ { 0x07, KEY_SLEEP },
+ { 0x08, KEY_VIDEO },
+ { 0x0e, KEY_AGAIN },
+ { 0x45, KEY_EQUAL },
+ { 0x46, KEY_MINUS },
+ { 0x18, KEY_RED },
+ { 0x53, KEY_GREEN },
+ { 0x5e, KEY_YELLOW },
+ { 0x5f, KEY_BLUE },
+};
+
+static struct rc_keymap twinhan_vp1027_map = {
+ .map = {
+ .scan = twinhan_vp1027,
+ .size = ARRAY_SIZE(twinhan_vp1027),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TWINHAN_VP1027_DVBS,
+ }
+};
+
+static int __init init_rc_map_twinhan_vp1027(void)
+{
+ return ir_register_map(&twinhan_vp1027_map);
+}
+
+static void __exit exit_rc_map_twinhan_vp1027(void)
+{
+ ir_unregister_map(&twinhan_vp1027_map);
+}
+
+module_init(init_rc_map_twinhan_vp1027)
+module_exit(exit_rc_map_twinhan_vp1027)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sergey Ivanov <123kash@gmail.com>");
diff --git a/drivers/media/IR/lirc_dev.c b/drivers/media/IR/lirc_dev.c
index 0acf6396e068..8418b14ee4d2 100644
--- a/drivers/media/IR/lirc_dev.c
+++ b/drivers/media/IR/lirc_dev.c
@@ -27,7 +27,6 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/completion.h>
-#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/unistd.h>
@@ -58,13 +57,12 @@ struct irctl {
struct task_struct *task;
long jiffies_to_wait;
-
- struct cdev cdev;
};
static DEFINE_MUTEX(lirc_dev_lock);
static struct irctl *irctls[MAX_IRCTL_DEVICES];
+static struct cdev cdevs[MAX_IRCTL_DEVICES];
/* Only used for sysfs but defined to void otherwise */
static struct class *lirc_class;
@@ -72,15 +70,13 @@ static struct class *lirc_class;
/* helper function
* initializes the irctl structure
*/
-static void init_irctl(struct irctl *ir)
+static void lirc_irctl_init(struct irctl *ir)
{
- dev_dbg(ir->d.dev, LOGHEAD "initializing irctl\n",
- ir->d.name, ir->d.minor);
mutex_init(&ir->irctl_lock);
ir->d.minor = NOPLUG;
}
-static void cleanup(struct irctl *ir)
+static void lirc_irctl_cleanup(struct irctl *ir)
{
dev_dbg(ir->d.dev, LOGHEAD "cleaning up\n", ir->d.name, ir->d.minor);
@@ -97,7 +93,7 @@ static void cleanup(struct irctl *ir)
* reads key codes from driver and puts them into buffer
* returns 0 on success
*/
-static int add_to_buf(struct irctl *ir)
+static int lirc_add_to_buf(struct irctl *ir)
{
if (ir->d.add_to_buf) {
int res = -ENODATA;
@@ -140,7 +136,7 @@ static int lirc_thread(void *irctl)
}
if (kthread_should_stop())
break;
- if (!add_to_buf(ir))
+ if (!lirc_add_to_buf(ir))
wake_up_interruptible(&ir->buf->wait_poll);
} else {
set_current_state(TASK_INTERRUPTIBLE);
@@ -155,12 +151,15 @@ static int lirc_thread(void *irctl)
}
-static struct file_operations fops = {
+static struct file_operations lirc_dev_fops = {
.owner = THIS_MODULE,
.read = lirc_dev_fop_read,
.write = lirc_dev_fop_write,
.poll = lirc_dev_fop_poll,
.unlocked_ioctl = lirc_dev_fop_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lirc_dev_fop_ioctl,
+#endif
.open = lirc_dev_fop_open,
.release = lirc_dev_fop_close,
.llseek = noop_llseek,
@@ -170,19 +169,20 @@ static int lirc_cdev_add(struct irctl *ir)
{
int retval;
struct lirc_driver *d = &ir->d;
+ struct cdev *cdev = &cdevs[d->minor];
if (d->fops) {
- cdev_init(&ir->cdev, d->fops);
- ir->cdev.owner = d->owner;
+ cdev_init(cdev, d->fops);
+ cdev->owner = d->owner;
} else {
- cdev_init(&ir->cdev, &fops);
- ir->cdev.owner = THIS_MODULE;
+ cdev_init(cdev, &lirc_dev_fops);
+ cdev->owner = THIS_MODULE;
}
- kobject_set_name(&ir->cdev.kobj, "lirc%d", d->minor);
+ kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
- retval = cdev_add(&ir->cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
+ retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
if (retval)
- kobject_put(&ir->cdev.kobj);
+ kobject_put(&cdev->kobj);
return retval;
}
@@ -203,6 +203,12 @@ int lirc_register_driver(struct lirc_driver *d)
goto out;
}
+ if (!d->dev) {
+ printk(KERN_ERR "%s: dev pointer not filled in!\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
if (MAX_IRCTL_DEVICES <= d->minor) {
dev_err(d->dev, "lirc_dev: lirc_register_driver: "
"\"minor\" must be between 0 and %d (%d)!\n",
@@ -278,7 +284,7 @@ int lirc_register_driver(struct lirc_driver *d)
err = -ENOMEM;
goto out_lock;
}
- init_irctl(ir);
+ lirc_irctl_init(ir);
irctls[minor] = ir;
d->minor = minor;
@@ -317,7 +323,6 @@ int lirc_register_driver(struct lirc_driver *d)
d->features = LIRC_CAN_REC_LIRCCODE;
ir->d = *d;
- ir->d.minor = minor;
device_create(lirc_class, ir->d.dev,
MKDEV(MAJOR(lirc_base_dev), ir->d.minor), NULL,
@@ -358,21 +363,28 @@ EXPORT_SYMBOL(lirc_register_driver);
int lirc_unregister_driver(int minor)
{
struct irctl *ir;
+ struct cdev *cdev;
if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
- printk(KERN_ERR "lirc_dev: lirc_unregister_driver: "
- "\"minor (%d)\" must be between 0 and %d!\n",
- minor, MAX_IRCTL_DEVICES-1);
+ printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
+ "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES-1);
return -EBADRQC;
}
ir = irctls[minor];
+ if (!ir) {
+ printk(KERN_ERR "lirc_dev: %s: failed to get irctl struct "
+ "for minor %d!\n", __func__, minor);
+ return -ENOENT;
+ }
+
+ cdev = &cdevs[minor];
mutex_lock(&lirc_dev_lock);
if (ir->d.minor != minor) {
- printk(KERN_ERR "lirc_dev: lirc_unregister_driver: "
- "minor (%d) device not registered!", minor);
+ printk(KERN_ERR "lirc_dev: %s: minor (%d) device not "
+ "registered!\n", __func__, minor);
mutex_unlock(&lirc_dev_lock);
return -ENOENT;
}
@@ -391,12 +403,11 @@ int lirc_unregister_driver(int minor)
wake_up_interruptible(&ir->buf->wait_poll);
mutex_lock(&ir->irctl_lock);
ir->d.set_use_dec(ir->d.data);
- module_put(ir->d.owner);
+ module_put(cdev->owner);
mutex_unlock(&ir->irctl_lock);
- cdev_del(&ir->cdev);
} else {
- cleanup(ir);
- cdev_del(&ir->cdev);
+ lirc_irctl_cleanup(ir);
+ cdev_del(cdev);
kfree(ir);
irctls[minor] = NULL;
}
@@ -410,6 +421,7 @@ EXPORT_SYMBOL(lirc_unregister_driver);
int lirc_dev_fop_open(struct inode *inode, struct file *file)
{
struct irctl *ir;
+ struct cdev *cdev;
int retval = 0;
if (iminor(inode) >= MAX_IRCTL_DEVICES) {
@@ -426,7 +438,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
retval = -ENODEV;
goto error;
}
- file->private_data = ir;
dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor);
@@ -440,13 +451,14 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
- if (try_module_get(ir->d.owner)) {
- ++ir->open;
+ cdev = &cdevs[iminor(inode)];
+ if (try_module_get(cdev->owner)) {
+ ir->open++;
retval = ir->d.set_use_inc(ir->d.data);
if (retval) {
- module_put(ir->d.owner);
- --ir->open;
+ module_put(cdev->owner);
+ ir->open--;
} else {
lirc_buffer_clear(ir->buf);
}
@@ -470,17 +482,24 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
+ struct cdev *cdev = &cdevs[iminor(inode)];
+
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return -EINVAL;
+ }
dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
WARN_ON(mutex_lock_killable(&lirc_dev_lock));
- --ir->open;
+ ir->open--;
if (ir->attached) {
ir->d.set_use_dec(ir->d.data);
- module_put(ir->d.owner);
+ module_put(cdev->owner);
} else {
- cleanup(ir);
+ lirc_irctl_cleanup(ir);
+ cdev_del(cdev);
irctls[ir->d.minor] = NULL;
kfree(ir);
}
@@ -496,6 +515,11 @@ unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
unsigned int ret;
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return POLLERR;
+ }
+
dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor);
if (!ir->attached) {
@@ -522,9 +546,14 @@ EXPORT_SYMBOL(lirc_dev_fop_poll);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- unsigned long mode;
+ __u32 mode;
int result = 0;
- struct irctl *ir = file->private_data;
+ struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+
+ if (!ir) {
+ printk(KERN_ERR "lirc_dev: %s: no irctl found!\n", __func__);
+ return -ENODEV;
+ }
dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n",
ir->d.name, ir->d.minor, cmd);
@@ -539,7 +568,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case LIRC_GET_FEATURES:
- result = put_user(ir->d.features, (unsigned long *)arg);
+ result = put_user(ir->d.features, (__u32 *)arg);
break;
case LIRC_GET_REC_MODE:
if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
@@ -549,7 +578,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
result = put_user(LIRC_REC2MODE
(ir->d.features & LIRC_CAN_REC_MASK),
- (unsigned long *)arg);
+ (__u32 *)arg);
break;
case LIRC_SET_REC_MODE:
if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
@@ -557,7 +586,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = get_user(mode, (unsigned long *)arg);
+ result = get_user(mode, (__u32 *)arg);
if (!result && !(LIRC_MODE2REC(mode) & ir->d.features))
result = -EINVAL;
/*
@@ -566,7 +595,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
break;
case LIRC_GET_LENGTH:
- result = put_user(ir->d.code_length, (unsigned long *)arg);
+ result = put_user(ir->d.code_length, (__u32 *)arg);
break;
case LIRC_GET_MIN_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
@@ -575,7 +604,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = put_user(ir->d.min_timeout, (unsigned long *)arg);
+ result = put_user(ir->d.min_timeout, (__u32 *)arg);
break;
case LIRC_GET_MAX_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
@@ -584,7 +613,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = put_user(ir->d.max_timeout, (unsigned long *)arg);
+ result = put_user(ir->d.max_timeout, (__u32 *)arg);
break;
default:
result = -EINVAL;
@@ -605,12 +634,21 @@ ssize_t lirc_dev_fop_read(struct file *file,
loff_t *ppos)
{
struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
- unsigned char buf[ir->chunk_size];
+ unsigned char *buf;
int ret = 0, written = 0;
DECLARE_WAITQUEUE(wait, current);
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return -ENODEV;
+ }
+
dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor);
+ buf = kzalloc(ir->chunk_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
if (mutex_lock_interruptible(&ir->irctl_lock))
return -ERESTARTSYS;
if (!ir->attached) {
@@ -682,6 +720,7 @@ ssize_t lirc_dev_fop_read(struct file *file,
mutex_unlock(&ir->irctl_lock);
out_unlocked:
+ kfree(buf);
dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n",
ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret);
@@ -710,6 +749,11 @@ ssize_t lirc_dev_fop_write(struct file *file, const char *buffer,
{
struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+ if (!ir) {
+ printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ return -ENODEV;
+ }
+
dev_dbg(ir->d.dev, LOGHEAD "write called\n", ir->d.name, ir->d.minor);
if (!ir->attached)
diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c
index bc620e10ef77..9dce684fd231 100644
--- a/drivers/media/IR/mceusb.c
+++ b/drivers/media/IR/mceusb.c
@@ -46,24 +46,58 @@
"device driver"
#define DRIVER_NAME "mceusb"
-#define USB_BUFLEN 32 /* USB reception buffer length */
-#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
-#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
+#define USB_BUFLEN 32 /* USB reception buffer length */
+#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
+#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
/* MCE constants */
-#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
-#define MCE_TIME_UNIT 50 /* Approx 50us resolution */
-#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
-#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
-#define MCE_PACKET_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
-#define MCE_CONTROL_HEADER 0x9F /* MCE status header */
-#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */
-#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
-#define MCE_DEFAULT_TX_MASK 0x03 /* Val opts: TX1=0x01, TX2=0x02, ALL=0x03 */
-#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
-#define MCE_PULSE_MASK 0x7F /* Pulse mask */
-#define MCE_MAX_PULSE_LENGTH 0x7F /* Longest transmittable pulse symbol */
-#define MCE_PACKET_LENGTH_MASK 0x1F /* Packet length mask */
+#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
+#define MCE_TIME_UNIT 50 /* Approx 50us resolution */
+#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
+#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
+#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
+#define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
+#define MCE_TX_HEADER_LENGTH 3 /* # of bytes in the initializing tx header */
+#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
+#define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
+#define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */
+#define MCE_PULSE_MASK 0x7f /* Pulse mask */
+#define MCE_MAX_PULSE_LENGTH 0x7f /* Longest transmittable pulse symbol */
+
+#define MCE_HW_CMD_HEADER 0xff /* MCE hardware command header */
+#define MCE_COMMAND_HEADER 0x9f /* MCE command header */
+#define MCE_COMMAND_MASK 0xe0 /* Mask out command bits */
+#define MCE_COMMAND_NULL 0x00 /* These show up various places... */
+/* if buf[i] & MCE_COMMAND_MASK == 0x80 and buf[i] != MCE_COMMAND_HEADER,
+ * then we're looking at a raw IR data sample */
+#define MCE_COMMAND_IRDATA 0x80
+#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
+
+/* Sub-commands, which follow MCE_COMMAND_HEADER or MCE_HW_CMD_HEADER */
+#define MCE_CMD_PING 0x03 /* Ping device */
+#define MCE_CMD_UNKNOWN 0x04 /* Unknown */
+#define MCE_CMD_UNKNOWN2 0x05 /* Unknown */
+#define MCE_CMD_S_CARRIER 0x06 /* Set TX carrier frequency */
+#define MCE_CMD_G_CARRIER 0x07 /* Get TX carrier frequency */
+#define MCE_CMD_S_TXMASK 0x08 /* Set TX port bitmask */
+#define MCE_CMD_UNKNOWN3 0x09 /* Unknown */
+#define MCE_CMD_UNKNOWN4 0x0a /* Unknown */
+#define MCE_CMD_G_REVISION 0x0b /* Get hw/sw revision */
+#define MCE_CMD_S_TIMEOUT 0x0c /* Set RX timeout value */
+#define MCE_CMD_G_TIMEOUT 0x0d /* Get RX timeout value */
+#define MCE_CMD_UNKNOWN5 0x0e /* Unknown */
+#define MCE_CMD_UNKNOWN6 0x0f /* Unknown */
+#define MCE_CMD_G_RXPORTSTS 0x11 /* Get RX port status */
+#define MCE_CMD_G_TXMASK 0x13 /* Set TX port bitmask */
+#define MCE_CMD_S_RXSENSOR 0x14 /* Set RX sensor (std/learning) */
+#define MCE_CMD_G_RXSENSOR 0x15 /* Get RX sensor (std/learning) */
+#define MCE_CMD_TX_PORTS 0x16 /* Get number of TX ports */
+#define MCE_CMD_G_WAKESRC 0x17 /* Get wake source */
+#define MCE_CMD_UNKNOWN7 0x18 /* Unknown */
+#define MCE_CMD_UNKNOWN8 0x19 /* Unknown */
+#define MCE_CMD_UNKNOWN9 0x1b /* Unknown */
+#define MCE_CMD_DEVICE_RESET 0xaa /* Reset the hardware */
+#define MCE_RSP_CMD_INVALID 0xfe /* Invalid command issued */
/* module parameters */
@@ -104,14 +138,64 @@ static int debug;
#define VENDOR_NORTHSTAR 0x04eb
#define VENDOR_REALTEK 0x0bda
#define VENDOR_TIVO 0x105a
+#define VENDOR_CONEXANT 0x0572
+
+enum mceusb_model_type {
+ MCE_GEN2 = 0, /* Most boards */
+ MCE_GEN1,
+ MCE_GEN3,
+ MCE_GEN2_TX_INV,
+ POLARIS_EVK,
+};
+
+struct mceusb_model {
+ u32 mce_gen1:1;
+ u32 mce_gen2:1;
+ u32 mce_gen3:1;
+ u32 tx_mask_inverted:1;
+ u32 is_polaris:1;
+
+ const char *rc_map; /* Allow specify a per-board map */
+ const char *name; /* per-board name */
+};
+
+static const struct mceusb_model mceusb_model[] = {
+ [MCE_GEN1] = {
+ .mce_gen1 = 1,
+ .tx_mask_inverted = 1,
+ },
+ [MCE_GEN2] = {
+ .mce_gen2 = 1,
+ },
+ [MCE_GEN2_TX_INV] = {
+ .mce_gen2 = 1,
+ .tx_mask_inverted = 1,
+ },
+ [MCE_GEN3] = {
+ .mce_gen3 = 1,
+ .tx_mask_inverted = 1,
+ },
+ [POLARIS_EVK] = {
+ .is_polaris = 1,
+ /*
+ * In fact, the EVK is shipped without
+ * remotes, but we should have something handy,
+ * to allow testing it
+ */
+ .rc_map = RC_MAP_RC5_HAUPPAUGE_NEW,
+ .name = "cx231xx MCE IR",
+ },
+};
static struct usb_device_id mceusb_dev_table[] = {
/* Original Microsoft MCE IR Transceiver (often HP-branded) */
- { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) },
+ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d),
+ .driver_info = MCE_GEN1 },
/* Philips Infrared Transceiver - Sahara branded */
{ USB_DEVICE(VENDOR_PHILIPS, 0x0608) },
/* Philips Infrared Transceiver - HP branded */
- { USB_DEVICE(VENDOR_PHILIPS, 0x060c) },
+ { USB_DEVICE(VENDOR_PHILIPS, 0x060c),
+ .driver_info = MCE_GEN2_TX_INV },
/* Philips SRM5100 */
{ USB_DEVICE(VENDOR_PHILIPS, 0x060d) },
/* Philips Infrared Transceiver - Omaura */
@@ -127,11 +211,14 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Realtek MCE IR Receiver */
{ USB_DEVICE(VENDOR_REALTEK, 0x0161) },
/* SMK/Toshiba G83C0004D410 */
- { USB_DEVICE(VENDOR_SMK, 0x031d) },
+ { USB_DEVICE(VENDOR_SMK, 0x031d),
+ .driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver (Sony VAIO) */
- { USB_DEVICE(VENDOR_SMK, 0x0322) },
+ { USB_DEVICE(VENDOR_SMK, 0x0322),
+ .driver_info = MCE_GEN2_TX_INV },
/* bundled with Hauppauge PVR-150 */
- { USB_DEVICE(VENDOR_SMK, 0x0334) },
+ { USB_DEVICE(VENDOR_SMK, 0x0334),
+ .driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SMK, 0x0338) },
/* Tatung eHome Infrared Transceiver */
@@ -145,17 +232,23 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Mitsumi */
{ USB_DEVICE(VENDOR_MITSUMI, 0x2501) },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0001) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0001),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed HP eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0006) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0006),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0007) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0007),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0008) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0008),
+ .driver_info = MCE_GEN3 },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x000a) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x000a),
+ .driver_info = MCE_GEN2_TX_INV },
/* Topseed eHome Infrared Transceiver */
- { USB_DEVICE(VENDOR_TOPSEED, 0x0011) },
+ { USB_DEVICE(VENDOR_TOPSEED, 0x0011),
+ .driver_info = MCE_GEN2_TX_INV },
/* Ricavision internal Infrared Transceiver */
{ USB_DEVICE(VENDOR_RICAVISION, 0x0010) },
/* Itron ione Libra Q-11 */
@@ -185,7 +278,8 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
{ USB_DEVICE(VENDOR_FINTEK, 0x0702) },
/* Pinnacle Remote Kit */
- { USB_DEVICE(VENDOR_PINNACLE, 0x0225) },
+ { USB_DEVICE(VENDOR_PINNACLE, 0x0225),
+ .driver_info = MCE_GEN3 },
/* Elitegroup Computer Systems IR */
{ USB_DEVICE(VENDOR_ECS, 0x0f38) },
/* Wistron Corp. eHome Infrared Receiver */
@@ -198,37 +292,13 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) },
/* TiVo PC IR Receiver */
{ USB_DEVICE(VENDOR_TIVO, 0x2000) },
+ /* Conexant SDK */
+ { USB_DEVICE(VENDOR_CONEXANT, 0x58a1),
+ .driver_info = POLARIS_EVK },
/* Terminating entry */
{ }
};
-static struct usb_device_id gen3_list[] = {
- { USB_DEVICE(VENDOR_PINNACLE, 0x0225) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0008) },
- {}
-};
-
-static struct usb_device_id microsoft_gen1_list[] = {
- { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) },
- {}
-};
-
-static struct usb_device_id std_tx_mask_list[] = {
- { USB_DEVICE(VENDOR_MICROSOFT, 0x006d) },
- { USB_DEVICE(VENDOR_PHILIPS, 0x060c) },
- { USB_DEVICE(VENDOR_SMK, 0x031d) },
- { USB_DEVICE(VENDOR_SMK, 0x0322) },
- { USB_DEVICE(VENDOR_SMK, 0x0334) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0001) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0006) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0007) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0008) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x000a) },
- { USB_DEVICE(VENDOR_TOPSEED, 0x0011) },
- { USB_DEVICE(VENDOR_PINNACLE, 0x0225) },
- {}
-};
-
/* data structure for each usb transceiver */
struct mceusb_dev {
/* ir-core bits */
@@ -248,8 +318,15 @@ struct mceusb_dev {
/* buffers and dma */
unsigned char *buf_in;
unsigned int len_in;
- u8 cmd; /* MCE command type */
- u8 rem; /* Remaining IR data bytes in packet */
+
+ enum {
+ CMD_HEADER = 0,
+ SUBCMD,
+ CMD_DATA,
+ PARSE_IRDATA,
+ } parser_state;
+ u8 cmd, rem; /* Remaining IR data bytes in packet */
+
dma_addr_t dma_in;
dma_addr_t dma_out;
@@ -257,7 +334,6 @@ struct mceusb_dev {
u32 connected:1;
u32 tx_mask_inverted:1;
u32 microsoft_gen1:1;
- u32 reserved:29;
} flags;
/* transmit support */
@@ -267,6 +343,7 @@ struct mceusb_dev {
char name[128];
char phys[64];
+ enum mceusb_model_type model;
};
/*
@@ -291,43 +368,81 @@ struct mceusb_dev {
* - SET_RX_TIMEOUT sets the receiver timeout
* - SET_RX_SENSOR sets which receiver sensor to use
*/
-static char DEVICE_RESET[] = {0x00, 0xff, 0xaa};
-static char GET_REVISION[] = {0xff, 0x0b};
-static char GET_UNKNOWN[] = {0xff, 0x18};
-static char GET_UNKNOWN2[] = {0x9f, 0x05};
-static char GET_CARRIER_FREQ[] = {0x9f, 0x07};
-static char GET_RX_TIMEOUT[] = {0x9f, 0x0d};
-static char GET_TX_BITMASK[] = {0x9f, 0x13};
-static char GET_RX_SENSOR[] = {0x9f, 0x15};
+static char DEVICE_RESET[] = {MCE_COMMAND_NULL, MCE_HW_CMD_HEADER,
+ MCE_CMD_DEVICE_RESET};
+static char GET_REVISION[] = {MCE_HW_CMD_HEADER, MCE_CMD_G_REVISION};
+static char GET_UNKNOWN[] = {MCE_HW_CMD_HEADER, MCE_CMD_UNKNOWN7};
+static char GET_UNKNOWN2[] = {MCE_COMMAND_HEADER, MCE_CMD_UNKNOWN2};
+static char GET_CARRIER_FREQ[] = {MCE_COMMAND_HEADER, MCE_CMD_G_CARRIER};
+static char GET_RX_TIMEOUT[] = {MCE_COMMAND_HEADER, MCE_CMD_G_TIMEOUT};
+static char GET_TX_BITMASK[] = {MCE_COMMAND_HEADER, MCE_CMD_G_TXMASK};
+static char GET_RX_SENSOR[] = {MCE_COMMAND_HEADER, MCE_CMD_G_RXSENSOR};
/* sub in desired values in lower byte or bytes for full command */
/* FIXME: make use of these for transmit.
-static char SET_CARRIER_FREQ[] = {0x9f, 0x06, 0x00, 0x00};
-static char SET_TX_BITMASK[] = {0x9f, 0x08, 0x00};
-static char SET_RX_TIMEOUT[] = {0x9f, 0x0c, 0x00, 0x00};
-static char SET_RX_SENSOR[] = {0x9f, 0x14, 0x00};
+static char SET_CARRIER_FREQ[] = {MCE_COMMAND_HEADER,
+ MCE_CMD_S_CARRIER, 0x00, 0x00};
+static char SET_TX_BITMASK[] = {MCE_COMMAND_HEADER, MCE_CMD_S_TXMASK, 0x00};
+static char SET_RX_TIMEOUT[] = {MCE_COMMAND_HEADER,
+ MCE_CMD_S_TIMEOUT, 0x00, 0x00};
+static char SET_RX_SENSOR[] = {MCE_COMMAND_HEADER,
+ MCE_CMD_S_RXSENSOR, 0x00};
*/
+static int mceusb_cmdsize(u8 cmd, u8 subcmd)
+{
+ int datasize = 0;
+
+ switch (cmd) {
+ case MCE_COMMAND_NULL:
+ if (subcmd == MCE_HW_CMD_HEADER)
+ datasize = 1;
+ break;
+ case MCE_HW_CMD_HEADER:
+ switch (subcmd) {
+ case MCE_CMD_G_REVISION:
+ datasize = 2;
+ break;
+ }
+ case MCE_COMMAND_HEADER:
+ switch (subcmd) {
+ case MCE_CMD_UNKNOWN:
+ case MCE_CMD_S_CARRIER:
+ case MCE_CMD_S_TIMEOUT:
+ case MCE_CMD_G_RXSENSOR:
+ datasize = 2;
+ break;
+ case MCE_CMD_S_TXMASK:
+ case MCE_CMD_S_RXSENSOR:
+ datasize = 1;
+ break;
+ }
+ }
+ return datasize;
+}
+
static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
- int len, bool out)
+ int offset, int len, bool out)
{
char codes[USB_BUFLEN * 3 + 1];
char inout[9];
- int i;
u8 cmd, subcmd, data1, data2;
struct device *dev = ir->dev;
- int idx = 0;
+ int i, start, skip = 0;
+
+ if (!debug)
+ return;
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
if (ir->flags.microsoft_gen1 && !out)
- idx = 2;
+ skip = 2;
- if (len <= idx)
+ if (len <= skip)
return;
for (i = 0; i < len && i < USB_BUFLEN; i++)
- snprintf(codes + i * 3, 4, "%02x ", buf[i] & 0xFF);
+ snprintf(codes + i * 3, 4, "%02x ", buf[i + offset] & 0xff);
- dev_info(dev, "%sx data: %s (length=%d)\n",
+ dev_info(dev, "%sx data: %s(length=%d)\n",
(out ? "t" : "r"), codes, len);
if (out)
@@ -335,91 +450,93 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
else
strcpy(inout, "Got\0");
- cmd = buf[idx] & 0xff;
- subcmd = buf[idx + 1] & 0xff;
- data1 = buf[idx + 2] & 0xff;
- data2 = buf[idx + 3] & 0xff;
+ start = offset + skip;
+ cmd = buf[start] & 0xff;
+ subcmd = buf[start + 1] & 0xff;
+ data1 = buf[start + 2] & 0xff;
+ data2 = buf[start + 3] & 0xff;
switch (cmd) {
- case 0x00:
- if (subcmd == 0xff && data1 == 0xaa)
+ case MCE_COMMAND_NULL:
+ if ((subcmd == MCE_HW_CMD_HEADER) &&
+ (data1 == MCE_CMD_DEVICE_RESET))
dev_info(dev, "Device reset requested\n");
else
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
break;
- case 0xff:
+ case MCE_HW_CMD_HEADER:
switch (subcmd) {
- case 0x0b:
+ case MCE_CMD_G_REVISION:
if (len == 2)
dev_info(dev, "Get hw/sw rev?\n");
else
dev_info(dev, "hw/sw rev 0x%02x 0x%02x "
"0x%02x 0x%02x\n", data1, data2,
- buf[idx + 4], buf[idx + 5]);
+ buf[start + 4], buf[start + 5]);
break;
- case 0xaa:
+ case MCE_CMD_DEVICE_RESET:
dev_info(dev, "Device reset requested\n");
break;
- case 0xfe:
+ case MCE_RSP_CMD_INVALID:
dev_info(dev, "Previous command not supported\n");
break;
- case 0x18:
- case 0x1b:
+ case MCE_CMD_UNKNOWN7:
+ case MCE_CMD_UNKNOWN9:
default:
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
break;
}
break;
- case 0x9f:
+ case MCE_COMMAND_HEADER:
switch (subcmd) {
- case 0x03:
+ case MCE_CMD_PING:
dev_info(dev, "Ping\n");
break;
- case 0x04:
+ case MCE_CMD_UNKNOWN:
dev_info(dev, "Resp to 9f 05 of 0x%02x 0x%02x\n",
data1, data2);
break;
- case 0x06:
+ case MCE_CMD_S_CARRIER:
dev_info(dev, "%s carrier mode and freq of "
"0x%02x 0x%02x\n", inout, data1, data2);
break;
- case 0x07:
+ case MCE_CMD_G_CARRIER:
dev_info(dev, "Get carrier mode and freq\n");
break;
- case 0x08:
+ case MCE_CMD_S_TXMASK:
dev_info(dev, "%s transmit blaster mask of 0x%02x\n",
inout, data1);
break;
- case 0x0c:
+ case MCE_CMD_S_TIMEOUT:
/* value is in units of 50us, so x*50/100 or x/2 ms */
dev_info(dev, "%s receive timeout of %d ms\n",
inout, ((data1 << 8) | data2) / 2);
break;
- case 0x0d:
+ case MCE_CMD_G_TIMEOUT:
dev_info(dev, "Get receive timeout\n");
break;
- case 0x13:
+ case MCE_CMD_G_TXMASK:
dev_info(dev, "Get transmit blaster mask\n");
break;
- case 0x14:
+ case MCE_CMD_S_RXSENSOR:
dev_info(dev, "%s %s-range receive sensor in use\n",
inout, data1 == 0x02 ? "short" : "long");
break;
- case 0x15:
+ case MCE_CMD_G_RXSENSOR:
if (len == 2)
dev_info(dev, "Get receive sensor\n");
else
dev_info(dev, "Received pulse count is %d\n",
((data1 << 8) | data2));
break;
- case 0xfe:
+ case MCE_RSP_CMD_INVALID:
dev_info(dev, "Error! Hardware is likely wedged...\n");
break;
- case 0x05:
- case 0x09:
- case 0x0f:
+ case MCE_CMD_UNKNOWN2:
+ case MCE_CMD_UNKNOWN3:
+ case MCE_CMD_UNKNOWN5:
default:
dev_info(dev, "Unknown command 0x%02x 0x%02x\n",
cmd, subcmd);
@@ -429,6 +546,12 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
default:
break;
}
+
+ if (cmd == MCE_IRDATA_TRAILER)
+ dev_info(dev, "End of raw IR data\n");
+ else if ((cmd != MCE_COMMAND_HEADER) &&
+ ((cmd & MCE_COMMAND_MASK) == MCE_COMMAND_IRDATA))
+ dev_info(dev, "Raw IR data, %d pulse/space samples\n", ir->rem);
}
static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
@@ -446,9 +569,7 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
dev_dbg(ir->dev, "callback called (status=%d len=%d)\n",
urb->status, len);
- if (debug)
- mceusb_dev_printdata(ir, urb->transfer_buffer,
- len, true);
+ mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
}
}
@@ -536,8 +657,8 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
return -ENOMEM;
/* MCE tx init header */
- cmdbuf[cmdcount++] = MCE_CONTROL_HEADER;
- cmdbuf[cmdcount++] = 0x08;
+ cmdbuf[cmdcount++] = MCE_COMMAND_HEADER;
+ cmdbuf[cmdcount++] = MCE_CMD_S_TXMASK;
cmdbuf[cmdcount++] = ir->tx_mask;
/* Generate mce packet data */
@@ -551,7 +672,7 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
if ((cmdcount < MCE_CMDBUF_SIZE) &&
(cmdcount - MCE_TX_HEADER_LENGTH) %
MCE_CODE_LENGTH == 0)
- cmdbuf[cmdcount++] = MCE_PACKET_HEADER;
+ cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
/* Insert mce packet data */
if (cmdcount < MCE_CMDBUF_SIZE)
@@ -570,7 +691,8 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
/* Fix packet length in last header */
cmdbuf[cmdcount - (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH] =
- 0x80 + (cmdcount - MCE_TX_HEADER_LENGTH) % MCE_CODE_LENGTH - 1;
+ MCE_COMMAND_IRDATA + (cmdcount - MCE_TX_HEADER_LENGTH) %
+ MCE_CODE_LENGTH - 1;
/* Check if we have room for the empty packet at the end */
if (cmdcount >= MCE_CMDBUF_SIZE) {
@@ -579,7 +701,7 @@ static int mceusb_tx_ir(void *priv, int *txbuf, u32 n)
}
/* All mce commands end with an empty packet (0x80) */
- cmdbuf[cmdcount++] = 0x80;
+ cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
/* Transmit the command to the mce device */
mce_async_out(ir, cmdbuf, cmdcount);
@@ -608,7 +730,8 @@ static int mceusb_set_tx_mask(void *priv, u32 mask)
struct mceusb_dev *ir = priv;
if (ir->flags.tx_mask_inverted)
- ir->tx_mask = (mask != 0x03 ? mask ^ 0x03 : mask) << 1;
+ ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ?
+ mask ^ MCE_DEFAULT_TX_MASK : mask) << 1;
else
ir->tx_mask = mask;
@@ -621,7 +744,8 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
struct mceusb_dev *ir = priv;
int clk = 10000000;
int prescaler = 0, divisor = 0;
- unsigned char cmdbuf[4] = { 0x9f, 0x06, 0x00, 0x00 };
+ unsigned char cmdbuf[4] = { MCE_COMMAND_HEADER,
+ MCE_CMD_S_CARRIER, 0x00, 0x00 };
/* Carrier has changed */
if (ir->carrier != carrier) {
@@ -629,7 +753,7 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
if (carrier == 0) {
ir->carrier = carrier;
cmdbuf[2] = 0x01;
- cmdbuf[3] = 0x80;
+ cmdbuf[3] = MCE_IRDATA_TRAILER;
dev_dbg(ir->dev, "%s: disabling carrier "
"modulation\n", __func__);
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
@@ -638,7 +762,7 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
for (prescaler = 0; prescaler < 4; ++prescaler) {
divisor = (clk >> (2 * prescaler)) / carrier;
- if (divisor <= 0xFF) {
+ if (divisor <= 0xff) {
ir->carrier = carrier;
cmdbuf[2] = prescaler;
cmdbuf[3] = divisor;
@@ -660,47 +784,36 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
{
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- int i, start_index = 0;
- u8 hdr = MCE_CONTROL_HEADER;
+ DEFINE_IR_RAW_EVENT(rawir);
+ int i = 0;
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
if (ir->flags.microsoft_gen1)
- start_index = 2;
-
- for (i = start_index; i < buf_len;) {
- if (ir->rem == 0) {
- /* decode mce packets of the form (84),AA,BB,CC,DD */
- /* IR data packets can span USB messages - rem */
- hdr = ir->buf_in[i];
- ir->rem = (hdr & MCE_PACKET_LENGTH_MASK);
- ir->cmd = (hdr & ~MCE_PACKET_LENGTH_MASK);
- dev_dbg(ir->dev, "New data. rem: 0x%02x, cmd: 0x%02x\n",
- ir->rem, ir->cmd);
- i++;
- }
-
- /* don't process MCE commands */
- if (hdr == MCE_CONTROL_HEADER || hdr == 0xff) {
- ir->rem = 0;
- return;
- }
-
- for (; (ir->rem > 0) && (i < buf_len); i++) {
+ i = 2;
+
+ for (; i < buf_len; i++) {
+ switch (ir->parser_state) {
+ case SUBCMD:
+ ir->rem = mceusb_cmdsize(ir->cmd, ir->buf_in[i]);
+ mceusb_dev_printdata(ir, ir->buf_in, i - 1,
+ ir->rem + 2, false);
+ ir->parser_state = CMD_DATA;
+ break;
+ case PARSE_IRDATA:
ir->rem--;
-
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* MCE_TIME_UNIT * 1000;
if ((ir->buf_in[i] & MCE_PULSE_MASK) == 0x7f) {
- if (ir->rawir.pulse == rawir.pulse)
+ if (ir->rawir.pulse == rawir.pulse) {
ir->rawir.duration += rawir.duration;
- else {
+ } else {
ir->rawir.duration = rawir.duration;
ir->rawir.pulse = rawir.pulse;
}
- continue;
+ if (ir->rem)
+ break;
}
rawir.duration += ir->rawir.duration;
ir->rawir.duration = 0;
@@ -711,14 +824,40 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.duration);
ir_raw_event_store(ir->idev, &rawir);
+ break;
+ case CMD_DATA:
+ ir->rem--;
+ break;
+ case CMD_HEADER:
+ /* decode mce packets of the form (84),AA,BB,CC,DD */
+ /* IR data packets can span USB messages - rem */
+ ir->cmd = ir->buf_in[i];
+ if ((ir->cmd == MCE_COMMAND_HEADER) ||
+ ((ir->cmd & MCE_COMMAND_MASK) !=
+ MCE_COMMAND_IRDATA)) {
+ ir->parser_state = SUBCMD;
+ continue;
+ }
+ ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
+ mceusb_dev_printdata(ir, ir->buf_in, i, ir->rem + 1, false);
+ if (ir->rem) {
+ ir->parser_state = PARSE_IRDATA;
+ break;
+ }
+ /*
+ * a package with len=0 (e. g. 0x80) means end of
+ * data. We could use it to do the call to
+ * ir_raw_event_handle(). For now, we don't need to
+ * use it.
+ */
+ break;
}
- if (ir->buf_in[i] == 0x80 || ir->buf_in[i] == 0x9f)
- ir->rem = 0;
-
- dev_dbg(ir->dev, "calling ir_raw_event_handle\n");
- ir_raw_event_handle(ir->idev);
+ if (ir->parser_state != CMD_HEADER && !ir->rem)
+ ir->parser_state = CMD_HEADER;
}
+ dev_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
+ ir_raw_event_handle(ir->idev);
}
static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
@@ -737,9 +876,6 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
buf_len = urb->actual_length;
- if (debug)
- mceusb_dev_printdata(ir, urb->transfer_buffer, buf_len, false);
-
if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
ir->send_flags = SEND_FLAG_COMPLETE;
dev_dbg(ir->dev, "setup answer received %d bytes\n",
@@ -760,6 +896,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
case -EPIPE:
default:
+ dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
break;
}
@@ -865,6 +1002,8 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
struct input_dev *idev;
struct ir_dev_props *props;
struct device *dev = ir->dev;
+ const char *rc_map = RC_MAP_RC6_MCE;
+ const char *name = "Media Center Ed. eHome Infrared Remote Transceiver";
int ret = -ENODEV;
idev = input_allocate_device();
@@ -880,8 +1019,11 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
goto props_alloc_failed;
}
- snprintf(ir->name, sizeof(ir->name), "Media Center Ed. eHome "
- "Infrared Remote Transceiver (%04x:%04x)",
+ if (mceusb_model[ir->model].name)
+ name = mceusb_model[ir->model].name;
+
+ snprintf(ir->name, sizeof(ir->name), "%s (%04x:%04x)",
+ name,
le16_to_cpu(ir->usbdev->descriptor.idVendor),
le16_to_cpu(ir->usbdev->descriptor.idProduct));
@@ -899,7 +1041,10 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
ir->props = props;
- ret = ir_input_register(idev, RC_MAP_RC6_MCE, props, DRIVER_NAME);
+ if (mceusb_model[ir->model].rc_map)
+ rc_map = mceusb_model[ir->model].rc_map;
+
+ ret = ir_input_register(idev, rc_map, props, DRIVER_NAME);
if (ret < 0) {
dev_err(dev, "remote input device register failed\n");
goto irdev_failed;
@@ -926,17 +1071,26 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
struct mceusb_dev *ir = NULL;
int pipe, maxp, i;
char buf[63], name[128] = "";
+ enum mceusb_model_type model = id->driver_info;
bool is_gen3;
bool is_microsoft_gen1;
bool tx_mask_inverted;
+ bool is_polaris;
dev_dbg(&intf->dev, ": %s called\n", __func__);
idesc = intf->cur_altsetting;
- is_gen3 = usb_match_id(intf, gen3_list) ? 1 : 0;
- is_microsoft_gen1 = usb_match_id(intf, microsoft_gen1_list) ? 1 : 0;
- tx_mask_inverted = usb_match_id(intf, std_tx_mask_list) ? 0 : 1;
+ is_gen3 = mceusb_model[model].mce_gen3;
+ is_microsoft_gen1 = mceusb_model[model].mce_gen1;
+ tx_mask_inverted = mceusb_model[model].tx_mask_inverted;
+ is_polaris = mceusb_model[model].is_polaris;
+
+ if (is_polaris) {
+ /* Interface 0 is IR */
+ if (idesc->desc.bInterfaceNumber)
+ return -ENODEV;
+ }
/* step through the endpoints to find first bulk in and out endpoint */
for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
@@ -997,6 +1151,9 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ir->len_in = maxp;
ir->flags.microsoft_gen1 = is_microsoft_gen1;
ir->flags.tx_mask_inverted = tx_mask_inverted;
+ ir->model = model;
+
+ init_ir_raw_event(&ir->rawir);
/* Saving usb interface data for use by the transmitter routine */
ir->usb_ep_in = ep_in;
diff --git a/drivers/media/IR/nuvoton-cir.c b/drivers/media/IR/nuvoton-cir.c
new file mode 100644
index 000000000000..301be53aee85
--- /dev/null
+++ b/drivers/media/IR/nuvoton-cir.c
@@ -0,0 +1,1246 @@
+/*
+ * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
+ *
+ * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
+ * Copyright (C) 2009 Nuvoton PS Team
+ *
+ * Special thanks to Nuvoton for providing hardware, spec sheets and
+ * sample code upon which portions of this driver are based. Indirect
+ * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
+ * modeled after.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pnp.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <media/ir-core.h>
+#include <linux/pci_ids.h>
+
+#include "nuvoton-cir.h"
+
+static char *chip_id = "w836x7hg";
+
+/* write val to config reg */
+static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
+{
+ outb(reg, nvt->cr_efir);
+ outb(val, nvt->cr_efdr);
+}
+
+/* read val from config reg */
+static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
+{
+ outb(reg, nvt->cr_efir);
+ return inb(nvt->cr_efdr);
+}
+
+/* update config register bit without changing other bits */
+static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
+{
+ u8 tmp = nvt_cr_read(nvt, reg) | val;
+ nvt_cr_write(nvt, tmp, reg);
+}
+
+/* clear config register bit without changing other bits */
+static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
+{
+ u8 tmp = nvt_cr_read(nvt, reg) & ~val;
+ nvt_cr_write(nvt, tmp, reg);
+}
+
+/* enter extended function mode */
+static inline void nvt_efm_enable(struct nvt_dev *nvt)
+{
+ /* Enabling Extended Function Mode explicitly requires writing 2x */
+ outb(EFER_EFM_ENABLE, nvt->cr_efir);
+ outb(EFER_EFM_ENABLE, nvt->cr_efir);
+}
+
+/* exit extended function mode */
+static inline void nvt_efm_disable(struct nvt_dev *nvt)
+{
+ outb(EFER_EFM_DISABLE, nvt->cr_efir);
+}
+
+/*
+ * When you want to address a specific logical device, write its logical
+ * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
+ * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
+ */
+static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+ outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
+ outb(ldev, nvt->cr_efdr);
+}
+
+/* write val to cir config register */
+static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
+{
+ outb(val, nvt->cir_addr + offset);
+}
+
+/* read val from cir config register */
+static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
+{
+ u8 val;
+
+ val = inb(nvt->cir_addr + offset);
+
+ return val;
+}
+
+/* write val to cir wake register */
+static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
+ u8 val, u8 offset)
+{
+ outb(val, nvt->cir_wake_addr + offset);
+}
+
+/* read val from cir wake config register */
+static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
+{
+ u8 val;
+
+ val = inb(nvt->cir_wake_addr + offset);
+
+ return val;
+}
+
+#define pr_reg(text, ...) \
+ printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+/* dump current cir register contents */
+static void cir_dump_regs(struct nvt_dev *nvt)
+{
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+
+ pr_reg("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
+ pr_reg(" * CR CIR ACTIVE : 0x%x\n",
+ nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
+ pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
+ (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
+ nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
+ pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
+ nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
+
+ nvt_efm_disable(nvt);
+
+ pr_reg("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
+ pr_reg(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
+ pr_reg(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
+ pr_reg(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
+ pr_reg(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
+ pr_reg(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
+ pr_reg(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
+ pr_reg(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
+ pr_reg(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
+ pr_reg(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
+ pr_reg(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
+ pr_reg(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
+ pr_reg(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
+ pr_reg(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
+ pr_reg(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
+ pr_reg(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
+ pr_reg(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
+}
+
+/* dump current cir wake register contents */
+static void cir_wake_dump_regs(struct nvt_dev *nvt)
+{
+ u8 i, fifo_len;
+
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
+
+ pr_reg("%s: Dump CIR WAKE logical device registers:\n",
+ NVT_DRIVER_NAME);
+ pr_reg(" * CR CIR WAKE ACTIVE : 0x%x\n",
+ nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
+ pr_reg(" * CR CIR WAKE BASE ADDR: 0x%x\n",
+ (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
+ nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
+ pr_reg(" * CR CIR WAKE IRQ NUM: 0x%x\n",
+ nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
+
+ nvt_efm_disable(nvt);
+
+ pr_reg("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
+ pr_reg(" * IRCON: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
+ pr_reg(" * IRSTS: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
+ pr_reg(" * IREN: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
+ pr_reg(" * FIFO CMP DEEP: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
+ pr_reg(" * FIFO CMP TOL: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
+ pr_reg(" * FIFO COUNT: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
+ pr_reg(" * SLCH: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
+ pr_reg(" * SLCL: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
+ pr_reg(" * FIFOCON: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
+ pr_reg(" * SRXFSTS: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
+ pr_reg(" * SAMPLE RX FIFO: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
+ pr_reg(" * WR FIFO DATA: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
+ pr_reg(" * RD FIFO ONLY: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
+ pr_reg(" * RD FIFO ONLY IDX: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
+ pr_reg(" * FIFO IGNORE: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
+ pr_reg(" * IRFSM: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
+
+ fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
+ pr_reg("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
+ pr_reg("* Contents = ");
+ for (i = 0; i < fifo_len; i++)
+ printk(KERN_CONT "%02x ",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
+ printk(KERN_CONT "\n");
+}
+
+/* detect hardware features */
+static int nvt_hw_detect(struct nvt_dev *nvt)
+{
+ unsigned long flags;
+ u8 chip_major, chip_minor;
+ int ret = 0;
+
+ nvt_efm_enable(nvt);
+
+ /* Check if we're wired for the alternate EFER setup */
+ chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
+ if (chip_major == 0xff) {
+ nvt->cr_efir = CR_EFIR2;
+ nvt->cr_efdr = CR_EFDR2;
+ nvt_efm_enable(nvt);
+ chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
+ }
+
+ chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
+ nvt_dbg("%s: chip id: 0x%02x 0x%02x", chip_id, chip_major, chip_minor);
+
+ if (chip_major != CHIP_ID_HIGH &&
+ (chip_minor != CHIP_ID_LOW || chip_minor != CHIP_ID_LOW2))
+ ret = -ENODEV;
+
+ nvt_efm_disable(nvt);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->chip_major = chip_major;
+ nvt->chip_minor = chip_minor;
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ return ret;
+}
+
+static void nvt_cir_ldev_init(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ /* output pin selection (Pin95=CIRRX, Pin96=CIRTX1, WB enabled */
+ val = nvt_cr_read(nvt, CR_OUTPUT_PIN_SEL);
+ val &= OUTPUT_PIN_SEL_MASK;
+ val |= (OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB);
+ nvt_cr_write(nvt, val, CR_OUTPUT_PIN_SEL);
+
+ /* Select CIR logical device and enable */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
+ nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+
+ nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
+
+ nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
+ nvt->cir_addr, nvt->cir_irq);
+}
+
+static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
+{
+ /* Select ACPI logical device, enable it and CIR Wake */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ /* Enable CIR Wake via PSOUT# (Pin60) */
+ nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
+
+ /* enable cir interrupt of mouse/keyboard IRQ event */
+ nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
+
+ /* enable pme interrupt of cir wakeup event */
+ nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
+
+ /* Select CIR Wake logical device and enable */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
+ nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+
+ nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
+
+ nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
+ nvt->cir_wake_addr, nvt->cir_wake_irq);
+}
+
+/* clear out the hardware's cir rx fifo */
+static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
+ nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
+}
+
+/* clear out the hardware's cir wake rx fifo */
+static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
+ nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
+ CIR_WAKE_FIFOCON);
+}
+
+/* clear out the hardware's cir tx fifo */
+static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
+{
+ u8 val;
+
+ val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
+ nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
+}
+
+/* enable RX Trigger Level Reach and Packet End interrupts */
+static void nvt_set_cir_iren(struct nvt_dev *nvt)
+{
+ u8 iren;
+
+ iren = CIR_IREN_RTR | CIR_IREN_PE;
+ nvt_cir_reg_write(nvt, iren, CIR_IREN);
+}
+
+static void nvt_cir_regs_init(struct nvt_dev *nvt)
+{
+ /* set sample limit count (PE interrupt raised when reached) */
+ nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
+ nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
+
+ /* set fifo irq trigger levels */
+ nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
+ CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
+
+ /*
+ * Enable TX and RX, specify carrier on = low, off = high, and set
+ * sample period (currently 50us)
+ */
+ nvt_cir_reg_write(nvt,
+ CIR_IRCON_TXEN | CIR_IRCON_RXEN |
+ CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_IRCON);
+
+ /* clear hardware rx and tx fifos */
+ nvt_clear_cir_fifo(nvt);
+ nvt_clear_tx_fifo(nvt);
+
+ /* clear any and all stray interrupts */
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+ /* and finally, enable interrupts */
+ nvt_set_cir_iren(nvt);
+}
+
+static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
+{
+ /* set number of bytes needed for wake key comparison (default 67) */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
+
+ /* set tolerance/variance allowed per byte during wake compare */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
+ CIR_WAKE_FIFO_CMP_TOL);
+
+ /* set sample limit count (PE interrupt raised when reached) */
+ nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
+ nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
+
+ /* set cir wake fifo rx trigger level (currently 67) */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
+ CIR_WAKE_FIFOCON);
+
+ /*
+ * Enable TX and RX, specific carrier on = low, off = high, and set
+ * sample period (currently 50us)
+ */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
+ CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
+ CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_WAKE_IRCON);
+
+ /* clear cir wake rx fifo */
+ nvt_clear_cir_wake_fifo(nvt);
+
+ /* clear any and all stray interrupts */
+ nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
+}
+
+static void nvt_enable_wake(struct nvt_dev *nvt)
+{
+ nvt_efm_enable(nvt);
+
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
+ nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
+ nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
+ nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
+
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
+ CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
+ CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_WAKE_IRCON);
+ nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
+ nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
+}
+
+/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
+static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
+{
+ u32 count, carrier, duration = 0;
+ int i;
+
+ count = nvt_cir_reg_read(nvt, CIR_FCCL) |
+ nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
+
+ for (i = 0; i < nvt->pkts; i++) {
+ if (nvt->buf[i] & BUF_PULSE_BIT)
+ duration += nvt->buf[i] & BUF_LEN_MASK;
+ }
+
+ duration *= SAMPLE_PERIOD;
+
+ if (!count || !duration) {
+ nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
+ count, duration);
+ return 0;
+ }
+
+ carrier = (count * 1000000) / duration;
+
+ if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
+ nvt_dbg("WTF? Carrier frequency out of range!");
+
+ nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
+ carrier, count, duration);
+
+ return carrier;
+}
+
+/*
+ * set carrier frequency
+ *
+ * set carrier on 2 registers: CP & CC
+ * always set CP as 0x81
+ * set CC by SPEC, CC = 3MHz/carrier - 1
+ */
+static int nvt_set_tx_carrier(void *data, u32 carrier)
+{
+ struct nvt_dev *nvt = data;
+ u16 val;
+
+ nvt_cir_reg_write(nvt, 1, CIR_CP);
+ val = 3000000 / (carrier) - 1;
+ nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
+
+ nvt_dbg("cp: 0x%x cc: 0x%x\n",
+ nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
+
+ return 0;
+}
+
+/*
+ * nvt_tx_ir
+ *
+ * 1) clean TX fifo first (handled by AP)
+ * 2) copy data from user space
+ * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
+ * 4) send 9 packets to TX FIFO to open TTR
+ * in interrupt_handler:
+ * 5) send all data out
+ * go back to write():
+ * 6) disable TX interrupts, re-enable RX interupts
+ *
+ * The key problem of this function is user space data may larger than
+ * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
+ * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
+ * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
+ * set TXFCONT as 0xff, until buf_count less than 0xff.
+ */
+static int nvt_tx_ir(void *priv, int *txbuf, u32 n)
+{
+ struct nvt_dev *nvt = priv;
+ unsigned long flags;
+ size_t cur_count;
+ unsigned int i;
+ u8 iren;
+ int ret;
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+
+ if (n >= TX_BUF_LEN) {
+ nvt->tx.buf_count = cur_count = TX_BUF_LEN;
+ ret = TX_BUF_LEN;
+ } else {
+ nvt->tx.buf_count = cur_count = n;
+ ret = n;
+ }
+
+ memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
+
+ nvt->tx.cur_buf_num = 0;
+
+ /* save currently enabled interrupts */
+ iren = nvt_cir_reg_read(nvt, CIR_IREN);
+
+ /* now disable all interrupts, save TFU & TTR */
+ nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
+
+ nvt->tx.tx_state = ST_TX_REPLY;
+
+ nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
+ CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
+
+ /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
+ for (i = 0; i < 9; i++)
+ nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
+
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ nvt->tx.tx_state = ST_TX_NONE;
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ /* restore enabled interrupts to prior state */
+ nvt_cir_reg_write(nvt, iren, CIR_IREN);
+
+ return ret;
+}
+
+/* dump contents of the last rx buffer we got from the hw rx fifo */
+static void nvt_dump_rx_buf(struct nvt_dev *nvt)
+{
+ int i;
+
+ printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
+ for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
+ printk(KERN_CONT "0x%02x ", nvt->buf[i]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * Process raw data in rx driver buffer, store it in raw IR event kfifo,
+ * trigger decode when appropriate.
+ *
+ * We get IR data samples one byte at a time. If the msb is set, its a pulse,
+ * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
+ * (default 50us) intervals for that pulse/space. A discrete signal is
+ * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
+ * to signal more IR coming (repeats) or end of IR, respectively. We store
+ * sample data in the raw event kfifo until we see 0x7<something> (except f)
+ * or 0x80, at which time, we trigger a decode operation.
+ */
+static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
+{
+ DEFINE_IR_RAW_EVENT(rawir);
+ unsigned int count;
+ u32 carrier;
+ u8 sample;
+ int i;
+
+ nvt_dbg_verbose("%s firing", __func__);
+
+ if (debug)
+ nvt_dump_rx_buf(nvt);
+
+ if (nvt->carrier_detect_enabled)
+ carrier = nvt_rx_carrier_detect(nvt);
+
+ count = nvt->pkts;
+ nvt_dbg_verbose("Processing buffer of len %d", count);
+
+ for (i = 0; i < count; i++) {
+ nvt->pkts--;
+ sample = nvt->buf[i];
+
+ rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
+ rawir.duration = (sample & BUF_LEN_MASK)
+ * SAMPLE_PERIOD * 1000;
+
+ if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
+ if (nvt->rawir.pulse == rawir.pulse)
+ nvt->rawir.duration += rawir.duration;
+ else {
+ nvt->rawir.duration = rawir.duration;
+ nvt->rawir.pulse = rawir.pulse;
+ }
+ continue;
+ }
+
+ rawir.duration += nvt->rawir.duration;
+
+ init_ir_raw_event(&nvt->rawir);
+ nvt->rawir.duration = 0;
+ nvt->rawir.pulse = rawir.pulse;
+
+ if (sample == BUF_PULSE_BIT)
+ rawir.pulse = false;
+
+ if (rawir.duration) {
+ nvt_dbg("Storing %s with duration %d",
+ rawir.pulse ? "pulse" : "space",
+ rawir.duration);
+
+ ir_raw_event_store(nvt->rdev, &rawir);
+ }
+
+ /*
+ * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
+ * indicates end of IR signal, but new data incoming. In both
+ * cases, it means we're ready to call ir_raw_event_handle
+ */
+ if (sample == BUF_PULSE_BIT || ((sample != BUF_LEN_MASK) &&
+ (sample & BUF_REPEAT_MASK) == BUF_REPEAT_BYTE))
+ ir_raw_event_handle(nvt->rdev);
+ }
+
+ if (nvt->pkts) {
+ nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
+ nvt->pkts = 0;
+ }
+
+ nvt_dbg_verbose("%s done", __func__);
+}
+
+static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
+{
+ nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!");
+
+ nvt->pkts = 0;
+ nvt_clear_cir_fifo(nvt);
+ ir_raw_event_reset(nvt->rdev);
+}
+
+/* copy data from hardware rx fifo into driver buffer */
+static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
+{
+ unsigned long flags;
+ u8 fifocount, val;
+ unsigned int b_idx;
+ bool overrun = false;
+ int i;
+
+ /* Get count of how many bytes to read from RX FIFO */
+ fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
+ /* if we get 0xff, probably means the logical dev is disabled */
+ if (fifocount == 0xff)
+ return;
+ /* watch out for a fifo overrun condition */
+ else if (fifocount > RX_BUF_LEN) {
+ overrun = true;
+ fifocount = RX_BUF_LEN;
+ }
+
+ nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ b_idx = nvt->pkts;
+
+ /* This should never happen, but lets check anyway... */
+ if (b_idx + fifocount > RX_BUF_LEN) {
+ nvt_process_rx_ir_data(nvt);
+ b_idx = 0;
+ }
+
+ /* Read fifocount bytes from CIR Sample RX FIFO register */
+ for (i = 0; i < fifocount; i++) {
+ val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
+ nvt->buf[b_idx + i] = val;
+ }
+
+ nvt->pkts += fifocount;
+ nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
+
+ nvt_process_rx_ir_data(nvt);
+
+ if (overrun)
+ nvt_handle_rx_fifo_overrun(nvt);
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+}
+
+static void nvt_cir_log_irqs(u8 status, u8 iren)
+{
+ nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
+ status, iren,
+ status & CIR_IRSTS_RDR ? " RDR" : "",
+ status & CIR_IRSTS_RTR ? " RTR" : "",
+ status & CIR_IRSTS_PE ? " PE" : "",
+ status & CIR_IRSTS_RFO ? " RFO" : "",
+ status & CIR_IRSTS_TE ? " TE" : "",
+ status & CIR_IRSTS_TTR ? " TTR" : "",
+ status & CIR_IRSTS_TFU ? " TFU" : "",
+ status & CIR_IRSTS_GH ? " GH" : "",
+ status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
+ CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
+ CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
+}
+
+static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
+{
+ unsigned long flags;
+ bool tx_inactive;
+ u8 tx_state;
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ tx_state = nvt->tx.tx_state;
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ tx_inactive = (tx_state == ST_TX_NONE);
+
+ return tx_inactive;
+}
+
+/* interrupt service routine for incoming and outgoing CIR data */
+static irqreturn_t nvt_cir_isr(int irq, void *data)
+{
+ struct nvt_dev *nvt = data;
+ u8 status, iren, cur_state;
+ unsigned long flags;
+
+ nvt_dbg_verbose("%s firing", __func__);
+
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_efm_disable(nvt);
+
+ /*
+ * Get IR Status register contents. Write 1 to ack/clear
+ *
+ * bit: reg name - description
+ * 7: CIR_IRSTS_RDR - RX Data Ready
+ * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
+ * 5: CIR_IRSTS_PE - Packet End
+ * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
+ * 3: CIR_IRSTS_TE - TX FIFO Empty
+ * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
+ * 1: CIR_IRSTS_TFU - TX FIFO Underrun
+ * 0: CIR_IRSTS_GH - Min Length Detected
+ */
+ status = nvt_cir_reg_read(nvt, CIR_IRSTS);
+ if (!status) {
+ nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+ return IRQ_RETVAL(IRQ_NONE);
+ }
+
+ /* ack/clear all irq flags we've got */
+ nvt_cir_reg_write(nvt, status, CIR_IRSTS);
+ nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
+
+ /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
+ iren = nvt_cir_reg_read(nvt, CIR_IREN);
+ if (!iren) {
+ nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
+ return IRQ_RETVAL(IRQ_NONE);
+ }
+
+ if (debug)
+ nvt_cir_log_irqs(status, iren);
+
+ if (status & CIR_IRSTS_RTR) {
+ /* FIXME: add code for study/learn mode */
+ /* We only do rx if not tx'ing */
+ if (nvt_cir_tx_inactive(nvt))
+ nvt_get_rx_ir_data(nvt);
+ }
+
+ if (status & CIR_IRSTS_PE) {
+ if (nvt_cir_tx_inactive(nvt))
+ nvt_get_rx_ir_data(nvt);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ cur_state = nvt->study_state;
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ if (cur_state == ST_STUDY_NONE)
+ nvt_clear_cir_fifo(nvt);
+ }
+
+ if (status & CIR_IRSTS_TE)
+ nvt_clear_tx_fifo(nvt);
+
+ if (status & CIR_IRSTS_TTR) {
+ unsigned int pos, count;
+ u8 tmp;
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+
+ pos = nvt->tx.cur_buf_num;
+ count = nvt->tx.buf_count;
+
+ /* Write data into the hardware tx fifo while pos < count */
+ if (pos < count) {
+ nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
+ nvt->tx.cur_buf_num++;
+ /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
+ } else {
+ tmp = nvt_cir_reg_read(nvt, CIR_IREN);
+ nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
+ }
+
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ }
+
+ if (status & CIR_IRSTS_TFU) {
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ if (nvt->tx.tx_state == ST_TX_REPLY) {
+ nvt->tx.tx_state = ST_TX_REQUEST;
+ wake_up(&nvt->tx.queue);
+ }
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ }
+
+ nvt_dbg_verbose("%s done", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+/* Interrupt service routine for CIR Wake */
+static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
+{
+ u8 status, iren, val;
+ struct nvt_dev *nvt = data;
+ unsigned long flags;
+
+ nvt_dbg_wake("%s firing", __func__);
+
+ status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
+ if (!status)
+ return IRQ_RETVAL(IRQ_NONE);
+
+ if (status & CIR_WAKE_IRSTS_IR_PENDING)
+ nvt_clear_cir_wake_fifo(nvt);
+
+ nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
+ nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
+
+ /* Interrupt may be shared with CIR, bail if Wake not enabled */
+ iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
+ if (!iren) {
+ nvt_dbg_wake("%s exiting, wake not enabled", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+ }
+
+ if ((status & CIR_WAKE_IRSTS_PE) &&
+ (nvt->wake_state == ST_WAKE_START)) {
+ while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
+ val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
+ nvt_dbg("setting wake up key: 0x%x", val);
+ }
+
+ nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->wake_state = ST_WAKE_FINISH;
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ }
+
+ nvt_dbg_wake("%s done", __func__);
+ return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+static void nvt_enable_cir(struct nvt_dev *nvt)
+{
+ /* set function enable flags */
+ nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
+ CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_IRCON);
+
+ nvt_efm_enable(nvt);
+
+ /* enable the CIR logical device */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ /* clear all pending interrupts */
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+ /* enable interrupts */
+ nvt_set_cir_iren(nvt);
+}
+
+static void nvt_disable_cir(struct nvt_dev *nvt)
+{
+ /* disable CIR interrupts */
+ nvt_cir_reg_write(nvt, 0, CIR_IREN);
+
+ /* clear any and all pending interrupts */
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+ /* clear all function enable flags */
+ nvt_cir_reg_write(nvt, 0, CIR_IRCON);
+
+ /* clear hardware rx and tx fifos */
+ nvt_clear_cir_fifo(nvt);
+ nvt_clear_tx_fifo(nvt);
+
+ nvt_efm_enable(nvt);
+
+ /* disable the CIR logical device */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+}
+
+static int nvt_open(void *data)
+{
+ struct nvt_dev *nvt = (struct nvt_dev *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->in_use = true;
+ nvt_enable_cir(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ return 0;
+}
+
+static void nvt_close(void *data)
+{
+ struct nvt_dev *nvt = (struct nvt_dev *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->in_use = false;
+ nvt_disable_cir(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+}
+
+/* Allocate memory, probe hardware, and initialize everything */
+static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+{
+ struct nvt_dev *nvt = NULL;
+ struct input_dev *rdev = NULL;
+ struct ir_dev_props *props = NULL;
+ int ret = -ENOMEM;
+
+ nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
+ if (!nvt)
+ return ret;
+
+ props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
+ if (!props)
+ goto failure;
+
+ /* input device for IR remote (and tx) */
+ rdev = input_allocate_device();
+ if (!rdev)
+ goto failure;
+
+ ret = -ENODEV;
+ /* validate pnp resources */
+ if (!pnp_port_valid(pdev, 0) ||
+ pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
+ dev_err(&pdev->dev, "IR PNP Port not valid!\n");
+ goto failure;
+ }
+
+ if (!pnp_irq_valid(pdev, 0)) {
+ dev_err(&pdev->dev, "PNP IRQ not valid!\n");
+ goto failure;
+ }
+
+ if (!pnp_port_valid(pdev, 1) ||
+ pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
+ dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
+ goto failure;
+ }
+
+ nvt->cir_addr = pnp_port_start(pdev, 0);
+ nvt->cir_irq = pnp_irq(pdev, 0);
+
+ nvt->cir_wake_addr = pnp_port_start(pdev, 1);
+ /* irq is always shared between cir and cir wake */
+ nvt->cir_wake_irq = nvt->cir_irq;
+
+ nvt->cr_efir = CR_EFIR;
+ nvt->cr_efdr = CR_EFDR;
+
+ spin_lock_init(&nvt->nvt_lock);
+ spin_lock_init(&nvt->tx.lock);
+ init_ir_raw_event(&nvt->rawir);
+
+ ret = -EBUSY;
+ /* now claim resources */
+ if (!request_region(nvt->cir_addr,
+ CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+ goto failure;
+
+ if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
+ NVT_DRIVER_NAME, (void *)nvt))
+ goto failure;
+
+ if (!request_region(nvt->cir_wake_addr,
+ CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+ goto failure;
+
+ if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
+ NVT_DRIVER_NAME, (void *)nvt))
+ goto failure;
+
+ pnp_set_drvdata(pdev, nvt);
+ nvt->pdev = pdev;
+
+ init_waitqueue_head(&nvt->tx.queue);
+
+ ret = nvt_hw_detect(nvt);
+ if (ret)
+ goto failure;
+
+ /* Initialize CIR & CIR Wake Logical Devices */
+ nvt_efm_enable(nvt);
+ nvt_cir_ldev_init(nvt);
+ nvt_cir_wake_ldev_init(nvt);
+ nvt_efm_disable(nvt);
+
+ /* Initialize CIR & CIR Wake Config Registers */
+ nvt_cir_regs_init(nvt);
+ nvt_cir_wake_regs_init(nvt);
+
+ /* Set up ir-core props */
+ props->priv = nvt;
+ props->driver_type = RC_DRIVER_IR_RAW;
+ props->allowed_protos = IR_TYPE_ALL;
+ props->open = nvt_open;
+ props->close = nvt_close;
+#if 0
+ props->min_timeout = XYZ;
+ props->max_timeout = XYZ;
+ props->timeout = XYZ;
+ /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
+ props->rx_resolution = XYZ;
+
+ /* tx bits */
+ props->tx_resolution = XYZ;
+#endif
+ props->tx_ir = nvt_tx_ir;
+ props->s_tx_carrier = nvt_set_tx_carrier;
+
+ rdev->name = "Nuvoton w836x7hg Infrared Remote Transceiver";
+ rdev->id.bustype = BUS_HOST;
+ rdev->id.vendor = PCI_VENDOR_ID_WINBOND2;
+ rdev->id.product = nvt->chip_major;
+ rdev->id.version = nvt->chip_minor;
+
+ nvt->props = props;
+ nvt->rdev = rdev;
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ device_set_wakeup_enable(&pdev->dev, 1);
+
+ ret = ir_input_register(rdev, RC_MAP_RC6_MCE, props, NVT_DRIVER_NAME);
+ if (ret)
+ goto failure;
+
+ nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+ if (debug) {
+ cir_dump_regs(nvt);
+ cir_wake_dump_regs(nvt);
+ }
+
+ return 0;
+
+failure:
+ if (nvt->cir_irq)
+ free_irq(nvt->cir_irq, nvt);
+ if (nvt->cir_addr)
+ release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
+
+ if (nvt->cir_wake_irq)
+ free_irq(nvt->cir_wake_irq, nvt);
+ if (nvt->cir_wake_addr)
+ release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
+
+ input_free_device(rdev);
+ kfree(props);
+ kfree(nvt);
+
+ return ret;
+}
+
+static void __devexit nvt_remove(struct pnp_dev *pdev)
+{
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ /* disable CIR */
+ nvt_cir_reg_write(nvt, 0, CIR_IREN);
+ nvt_disable_cir(nvt);
+ /* enable CIR Wake (for IR power-on) */
+ nvt_enable_wake(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ /* free resources */
+ free_irq(nvt->cir_irq, nvt);
+ free_irq(nvt->cir_wake_irq, nvt);
+ release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
+ release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
+
+ ir_input_unregister(nvt->rdev);
+
+ kfree(nvt->props);
+ kfree(nvt);
+}
+
+static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
+{
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+ unsigned long flags;
+
+ nvt_dbg("%s called", __func__);
+
+ /* zero out misc state tracking */
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+ nvt->study_state = ST_STUDY_NONE;
+ nvt->wake_state = ST_WAKE_NONE;
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ spin_lock_irqsave(&nvt->tx.lock, flags);
+ nvt->tx.tx_state = ST_TX_NONE;
+ spin_unlock_irqrestore(&nvt->tx.lock, flags);
+
+ /* disable all CIR interrupts */
+ nvt_cir_reg_write(nvt, 0, CIR_IREN);
+
+ nvt_efm_enable(nvt);
+
+ /* disable cir logical dev */
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ /* make sure wake is enabled */
+ nvt_enable_wake(nvt);
+
+ return 0;
+}
+
+static int nvt_resume(struct pnp_dev *pdev)
+{
+ int ret = 0;
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+
+ nvt_dbg("%s called", __func__);
+
+ /* open interrupt */
+ nvt_set_cir_iren(nvt);
+
+ /* Enable CIR logical device */
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+
+ nvt_efm_disable(nvt);
+
+ nvt_cir_regs_init(nvt);
+ nvt_cir_wake_regs_init(nvt);
+
+ return ret;
+}
+
+static void nvt_shutdown(struct pnp_dev *pdev)
+{
+ struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+ nvt_enable_wake(nvt);
+}
+
+static const struct pnp_device_id nvt_ids[] = {
+ { "WEC0530", 0 }, /* CIR */
+ { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
+ { "", 0 },
+};
+
+static struct pnp_driver nvt_driver = {
+ .name = NVT_DRIVER_NAME,
+ .id_table = nvt_ids,
+ .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
+ .probe = nvt_probe,
+ .remove = __devexit_p(nvt_remove),
+ .suspend = nvt_suspend,
+ .resume = nvt_resume,
+ .shutdown = nvt_shutdown,
+};
+
+int nvt_init(void)
+{
+ return pnp_register_driver(&nvt_driver);
+}
+
+void nvt_exit(void)
+{
+ pnp_unregister_driver(&nvt_driver);
+}
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging output");
+
+MODULE_DEVICE_TABLE(pnp, nvt_ids);
+MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
+
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(nvt_init);
+module_exit(nvt_exit);
diff --git a/drivers/media/IR/nuvoton-cir.h b/drivers/media/IR/nuvoton-cir.h
new file mode 100644
index 000000000000..62dc53017c8e
--- /dev/null
+++ b/drivers/media/IR/nuvoton-cir.h
@@ -0,0 +1,408 @@
+/*
+ * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
+ *
+ * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
+ * Copyright (C) 2009 Nuvoton PS Team
+ *
+ * Special thanks to Nuvoton for providing hardware, spec sheets and
+ * sample code upon which portions of this driver are based. Indirect
+ * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
+ * modeled after.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/spinlock.h>
+#include <linux/ioctl.h>
+
+/* platform driver name to register */
+#define NVT_DRIVER_NAME "nuvoton-cir"
+
+/* debugging module parameter */
+static int debug;
+
+
+#define nvt_pr(level, text, ...) \
+ printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+#define nvt_dbg(text, ...) \
+ if (debug) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define nvt_dbg_verbose(text, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define nvt_dbg_wake(text, ...) \
+ if (debug > 2) \
+ printk(KERN_DEBUG \
+ KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+
+/*
+ * Original lirc driver said min value of 76, and recommended value of 256
+ * for the buffer length, but then used 2048. Never mind that the size of the
+ * RX FIFO is 32 bytes... So I'm using 32 for RX and 256 for TX atm, but I'm
+ * not sure if maybe that TX value is off by a factor of 8 (bits vs. bytes),
+ * and I don't have TX-capable hardware to test/debug on...
+ */
+#define TX_BUF_LEN 256
+#define RX_BUF_LEN 32
+
+struct nvt_dev {
+ struct pnp_dev *pdev;
+ struct input_dev *rdev;
+ struct ir_dev_props *props;
+ struct ir_raw_event rawir;
+
+ spinlock_t nvt_lock;
+ bool in_use;
+
+ /* for rx */
+ u8 buf[RX_BUF_LEN];
+ unsigned int pkts;
+
+ struct {
+ spinlock_t lock;
+ u8 buf[TX_BUF_LEN];
+ unsigned int buf_count;
+ unsigned int cur_buf_num;
+ wait_queue_head_t queue;
+ u8 tx_state;
+ } tx;
+
+ /* EFER Config register index/data pair */
+ u8 cr_efir;
+ u8 cr_efdr;
+
+ /* hardware I/O settings */
+ unsigned long cir_addr;
+ unsigned long cir_wake_addr;
+ int cir_irq;
+ int cir_wake_irq;
+
+ /* hardware id */
+ u8 chip_major;
+ u8 chip_minor;
+
+ /* hardware features */
+ bool hw_learning_capable;
+ bool hw_tx_capable;
+
+ /* rx settings */
+ bool learning_enabled;
+ bool carrier_detect_enabled;
+
+ /* track cir wake state */
+ u8 wake_state;
+ /* for study */
+ u8 study_state;
+ /* carrier period = 1 / frequency */
+ u32 carrier;
+};
+
+/* study states */
+#define ST_STUDY_NONE 0x0
+#define ST_STUDY_START 0x1
+#define ST_STUDY_CARRIER 0x2
+#define ST_STUDY_ALL_RECV 0x4
+
+/* wake states */
+#define ST_WAKE_NONE 0x0
+#define ST_WAKE_START 0x1
+#define ST_WAKE_FINISH 0x2
+
+/* receive states */
+#define ST_RX_WAIT_7F 0x1
+#define ST_RX_WAIT_HEAD 0x2
+#define ST_RX_WAIT_SILENT_END 0x4
+
+/* send states */
+#define ST_TX_NONE 0x0
+#define ST_TX_REQUEST 0x2
+#define ST_TX_REPLY 0x4
+
+/* buffer packet constants */
+#define BUF_PULSE_BIT 0x80
+#define BUF_LEN_MASK 0x7f
+#define BUF_REPEAT_BYTE 0x70
+#define BUF_REPEAT_MASK 0xf0
+
+/* CIR settings */
+
+/* total length of CIR and CIR WAKE */
+#define CIR_IOREG_LENGTH 0x0f
+
+/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL (0x7d0 = 2000) */
+#define CIR_RX_LIMIT_COUNT 0x7d0
+
+/* CIR Regs */
+#define CIR_IRCON 0x00
+#define CIR_IRSTS 0x01
+#define CIR_IREN 0x02
+#define CIR_RXFCONT 0x03
+#define CIR_CP 0x04
+#define CIR_CC 0x05
+#define CIR_SLCH 0x06
+#define CIR_SLCL 0x07
+#define CIR_FIFOCON 0x08
+#define CIR_IRFIFOSTS 0x09
+#define CIR_SRXFIFO 0x0a
+#define CIR_TXFCONT 0x0b
+#define CIR_STXFIFO 0x0c
+#define CIR_FCCH 0x0d
+#define CIR_FCCL 0x0e
+#define CIR_IRFSM 0x0f
+
+/* CIR IRCON settings */
+#define CIR_IRCON_RECV 0x80
+#define CIR_IRCON_WIREN 0x40
+#define CIR_IRCON_TXEN 0x20
+#define CIR_IRCON_RXEN 0x10
+#define CIR_IRCON_WRXINV 0x08
+#define CIR_IRCON_RXINV 0x04
+
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_1 0x00
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_25 0x01
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_50 0x02
+#define CIR_IRCON_SAMPLE_PERIOD_SEL_100 0x03
+
+/* FIXME: make this a runtime option */
+/* select sample period as 50us */
+#define CIR_IRCON_SAMPLE_PERIOD_SEL CIR_IRCON_SAMPLE_PERIOD_SEL_50
+
+/* CIR IRSTS settings */
+#define CIR_IRSTS_RDR 0x80
+#define CIR_IRSTS_RTR 0x40
+#define CIR_IRSTS_PE 0x20
+#define CIR_IRSTS_RFO 0x10
+#define CIR_IRSTS_TE 0x08
+#define CIR_IRSTS_TTR 0x04
+#define CIR_IRSTS_TFU 0x02
+#define CIR_IRSTS_GH 0x01
+
+/* CIR IREN settings */
+#define CIR_IREN_RDR 0x80
+#define CIR_IREN_RTR 0x40
+#define CIR_IREN_PE 0x20
+#define CIR_IREN_RFO 0x10
+#define CIR_IREN_TE 0x08
+#define CIR_IREN_TTR 0x04
+#define CIR_IREN_TFU 0x02
+#define CIR_IREN_GH 0x01
+
+/* CIR FIFOCON settings */
+#define CIR_FIFOCON_TXFIFOCLR 0x80
+
+#define CIR_FIFOCON_TX_TRIGGER_LEV_31 0x00
+#define CIR_FIFOCON_TX_TRIGGER_LEV_24 0x10
+#define CIR_FIFOCON_TX_TRIGGER_LEV_16 0x20
+#define CIR_FIFOCON_TX_TRIGGER_LEV_8 0x30
+
+/* FIXME: make this a runtime option */
+/* select TX trigger level as 16 */
+#define CIR_FIFOCON_TX_TRIGGER_LEV CIR_FIFOCON_TX_TRIGGER_LEV_16
+
+#define CIR_FIFOCON_RXFIFOCLR 0x08
+
+#define CIR_FIFOCON_RX_TRIGGER_LEV_1 0x00
+#define CIR_FIFOCON_RX_TRIGGER_LEV_8 0x01
+#define CIR_FIFOCON_RX_TRIGGER_LEV_16 0x02
+#define CIR_FIFOCON_RX_TRIGGER_LEV_24 0x03
+
+/* FIXME: make this a runtime option */
+/* select RX trigger level as 24 */
+#define CIR_FIFOCON_RX_TRIGGER_LEV CIR_FIFOCON_RX_TRIGGER_LEV_24
+
+/* CIR IRFIFOSTS settings */
+#define CIR_IRFIFOSTS_IR_PENDING 0x80
+#define CIR_IRFIFOSTS_RX_GS 0x40
+#define CIR_IRFIFOSTS_RX_FTA 0x20
+#define CIR_IRFIFOSTS_RX_EMPTY 0x10
+#define CIR_IRFIFOSTS_RX_FULL 0x08
+#define CIR_IRFIFOSTS_TX_FTA 0x04
+#define CIR_IRFIFOSTS_TX_EMPTY 0x02
+#define CIR_IRFIFOSTS_TX_FULL 0x01
+
+
+/* CIR WAKE UP Regs */
+#define CIR_WAKE_IRCON 0x00
+#define CIR_WAKE_IRSTS 0x01
+#define CIR_WAKE_IREN 0x02
+#define CIR_WAKE_FIFO_CMP_DEEP 0x03
+#define CIR_WAKE_FIFO_CMP_TOL 0x04
+#define CIR_WAKE_FIFO_COUNT 0x05
+#define CIR_WAKE_SLCH 0x06
+#define CIR_WAKE_SLCL 0x07
+#define CIR_WAKE_FIFOCON 0x08
+#define CIR_WAKE_SRXFSTS 0x09
+#define CIR_WAKE_SAMPLE_RX_FIFO 0x0a
+#define CIR_WAKE_WR_FIFO_DATA 0x0b
+#define CIR_WAKE_RD_FIFO_ONLY 0x0c
+#define CIR_WAKE_RD_FIFO_ONLY_IDX 0x0d
+#define CIR_WAKE_FIFO_IGNORE 0x0e
+#define CIR_WAKE_IRFSM 0x0f
+
+/* CIR WAKE UP IRCON settings */
+#define CIR_WAKE_IRCON_DEC_RST 0x80
+#define CIR_WAKE_IRCON_MODE1 0x40
+#define CIR_WAKE_IRCON_MODE0 0x20
+#define CIR_WAKE_IRCON_RXEN 0x10
+#define CIR_WAKE_IRCON_R 0x08
+#define CIR_WAKE_IRCON_RXINV 0x04
+
+/* FIXME/jarod: make this a runtime option */
+/* select a same sample period like cir register */
+#define CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL CIR_IRCON_SAMPLE_PERIOD_SEL_50
+
+/* CIR WAKE IRSTS Bits */
+#define CIR_WAKE_IRSTS_RDR 0x80
+#define CIR_WAKE_IRSTS_RTR 0x40
+#define CIR_WAKE_IRSTS_PE 0x20
+#define CIR_WAKE_IRSTS_RFO 0x10
+#define CIR_WAKE_IRSTS_GH 0x08
+#define CIR_WAKE_IRSTS_IR_PENDING 0x01
+
+/* CIR WAKE UP IREN Bits */
+#define CIR_WAKE_IREN_RDR 0x80
+#define CIR_WAKE_IREN_RTR 0x40
+#define CIR_WAKE_IREN_PE 0x20
+#define CIR_WAKE_IREN_RFO 0x10
+#define CIR_WAKE_IREN_TE 0x08
+#define CIR_WAKE_IREN_TTR 0x04
+#define CIR_WAKE_IREN_TFU 0x02
+#define CIR_WAKE_IREN_GH 0x01
+
+/* CIR WAKE FIFOCON settings */
+#define CIR_WAKE_FIFOCON_RXFIFOCLR 0x08
+
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_67 0x00
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_66 0x01
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_65 0x02
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_64 0x03
+
+/* FIXME: make this a runtime option */
+/* select WAKE UP RX trigger level as 67 */
+#define CIR_WAKE_FIFOCON_RX_TRIGGER_LEV CIR_WAKE_FIFOCON_RX_TRIGGER_LEV_67
+
+/* CIR WAKE SRXFSTS settings */
+#define CIR_WAKE_IRFIFOSTS_RX_GS 0x80
+#define CIR_WAKE_IRFIFOSTS_RX_FTA 0x40
+#define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20
+#define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10
+
+/* CIR Wake FIFO buffer is 67 bytes long */
+#define CIR_WAKE_FIFO_LEN 67
+/* CIR Wake byte comparison tolerance */
+#define CIR_WAKE_CMP_TOLERANCE 5
+
+/*
+ * Extended Function Enable Registers:
+ * Extended Function Index Register
+ * Extended Function Data Register
+ */
+#define CR_EFIR 0x2e
+#define CR_EFDR 0x2f
+
+/* Possible alternate EFER values, depends on how the chip is wired */
+#define CR_EFIR2 0x4e
+#define CR_EFDR2 0x4f
+
+/* Extended Function Mode enable/disable magic values */
+#define EFER_EFM_ENABLE 0x87
+#define EFER_EFM_DISABLE 0xaa
+
+/* Chip IDs found in CR_CHIP_ID_{HI,LO} */
+#define CHIP_ID_HIGH 0xb4
+#define CHIP_ID_LOW 0x72
+#define CHIP_ID_LOW2 0x73
+
+/* Config regs we need to care about */
+#define CR_SOFTWARE_RESET 0x02
+#define CR_LOGICAL_DEV_SEL 0x07
+#define CR_CHIP_ID_HI 0x20
+#define CR_CHIP_ID_LO 0x21
+#define CR_DEV_POWER_DOWN 0x22 /* bit 2 is CIR power, default power on */
+#define CR_OUTPUT_PIN_SEL 0x27
+#define CR_LOGICAL_DEV_EN 0x30 /* valid for all logical devices */
+/* next three regs valid for both the CIR and CIR_WAKE logical devices */
+#define CR_CIR_BASE_ADDR_HI 0x60
+#define CR_CIR_BASE_ADDR_LO 0x61
+#define CR_CIR_IRQ_RSRC 0x70
+/* next three regs valid only for ACPI logical dev */
+#define CR_ACPI_CIR_WAKE 0xe0
+#define CR_ACPI_IRQ_EVENTS 0xf6
+#define CR_ACPI_IRQ_EVENTS2 0xf7
+
+/* Logical devices that we need to care about */
+#define LOGICAL_DEV_LPT 0x01
+#define LOGICAL_DEV_CIR 0x06
+#define LOGICAL_DEV_ACPI 0x0a
+#define LOGICAL_DEV_CIR_WAKE 0x0e
+
+#define LOGICAL_DEV_DISABLE 0x00
+#define LOGICAL_DEV_ENABLE 0x01
+
+#define CIR_WAKE_ENABLE_BIT 0x08
+#define CIR_INTR_MOUSE_IRQ_BIT 0x80
+#define PME_INTR_CIR_PASS_BIT 0x08
+
+#define OUTPUT_PIN_SEL_MASK 0xbc
+#define OUTPUT_ENABLE_CIR 0x01 /* Pin95=CIRRX, Pin96=CIRTX1 */
+#define OUTPUT_ENABLE_CIRWB 0x40 /* enable wide-band sensor */
+
+/* MCE CIR signal length, related on sample period */
+
+/* MCE CIR controller signal length: about 43ms
+ * 43ms / 50us (sample period) * 0.85 (inaccuracy)
+ */
+#define CONTROLLER_BUF_LEN_MIN 830
+
+/* MCE CIR keyboard signal length: about 26ms
+ * 26ms / 50us (sample period) * 0.85 (inaccuracy)
+ */
+#define KEYBOARD_BUF_LEN_MAX 650
+#define KEYBOARD_BUF_LEN_MIN 610
+
+/* MCE CIR mouse signal length: about 24ms
+ * 24ms / 50us (sample period) * 0.85 (inaccuracy)
+ */
+#define MOUSE_BUF_LEN_MIN 565
+
+#define CIR_SAMPLE_PERIOD 50
+#define CIR_SAMPLE_LOW_INACCURACY 0.85
+
+/* MAX silence time that driver will sent to lirc */
+#define MAX_SILENCE_TIME 60000
+
+#if CIR_IRCON_SAMPLE_PERIOD_SEL == CIR_IRCON_SAMPLE_PERIOD_SEL_100
+#define SAMPLE_PERIOD 100
+
+#elif CIR_IRCON_SAMPLE_PERIOD_SEL == CIR_IRCON_SAMPLE_PERIOD_SEL_50
+#define SAMPLE_PERIOD 50
+
+#elif CIR_IRCON_SAMPLE_PERIOD_SEL == CIR_IRCON_SAMPLE_PERIOD_SEL_25
+#define SAMPLE_PERIOD 25
+
+#else
+#define SAMPLE_PERIOD 1
+#endif
+
+/* as VISTA MCE definition, valid carrier value */
+#define MAX_CARRIER 60000
+#define MIN_CARRIER 30000
diff --git a/drivers/media/IR/streamzap.c b/drivers/media/IR/streamzap.c
index 058e29fd478c..548381c35bfd 100644
--- a/drivers/media/IR/streamzap.c
+++ b/drivers/media/IR/streamzap.c
@@ -38,7 +38,7 @@
#include <linux/input.h>
#include <media/ir-core.h>
-#define DRIVER_VERSION "1.60"
+#define DRIVER_VERSION "1.61"
#define DRIVER_NAME "streamzap"
#define DRIVER_DESC "Streamzap Remote Control driver"
@@ -61,14 +61,21 @@ static struct usb_device_id streamzap_table[] = {
MODULE_DEVICE_TABLE(usb, streamzap_table);
-#define STREAMZAP_PULSE_MASK 0xf0
-#define STREAMZAP_SPACE_MASK 0x0f
-#define STREAMZAP_TIMEOUT 0xff
-#define STREAMZAP_RESOLUTION 256
+#define SZ_PULSE_MASK 0xf0
+#define SZ_SPACE_MASK 0x0f
+#define SZ_TIMEOUT 0xff
+#define SZ_RESOLUTION 256
/* number of samples buffered */
#define SZ_BUF_LEN 128
+/* from ir-rc5-sz-decoder.c */
+#ifdef CONFIG_IR_RC5_SZ_DECODER_MODULE
+#define load_rc5_sz_decode() request_module("ir-rc5-sz-decoder")
+#else
+#define load_rc5_sz_decode() 0
+#endif
+
enum StreamzapDecoderState {
PulseSpace,
FullPulse,
@@ -81,7 +88,6 @@ struct streamzap_ir {
/* ir-core */
struct ir_dev_props *props;
- struct ir_raw_event rawir;
/* core device info */
struct device *dev;
@@ -98,17 +104,6 @@ struct streamzap_ir {
dma_addr_t dma_in;
unsigned int buf_in_len;
- /* timer used to support delay buffering */
- struct timer_list delay_timer;
- bool timer_running;
- spinlock_t timer_lock;
- struct timer_list flush_timer;
- bool flush;
-
- /* delay buffer */
- struct kfifo fifo;
- bool fifo_initialized;
-
/* track what state we're in */
enum StreamzapDecoderState decoder_state;
/* tracks whether we are currently receiving some signal */
@@ -118,7 +113,7 @@ struct streamzap_ir {
/* start time of signal; necessary for gap tracking */
struct timeval signal_last;
struct timeval signal_start;
- /* bool timeout_enabled; */
+ bool timeout_enabled;
char name[128];
char phys[64];
@@ -143,122 +138,16 @@ static struct usb_driver streamzap_driver = {
.id_table = streamzap_table,
};
-static void streamzap_stop_timer(struct streamzap_ir *sz)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sz->timer_lock, flags);
- if (sz->timer_running) {
- sz->timer_running = false;
- spin_unlock_irqrestore(&sz->timer_lock, flags);
- del_timer_sync(&sz->delay_timer);
- } else {
- spin_unlock_irqrestore(&sz->timer_lock, flags);
- }
-}
-
-static void streamzap_flush_timeout(unsigned long arg)
-{
- struct streamzap_ir *sz = (struct streamzap_ir *)arg;
-
- dev_info(sz->dev, "%s: callback firing\n", __func__);
-
- /* finally start accepting data */
- sz->flush = false;
-}
-
-static void streamzap_delay_timeout(unsigned long arg)
-{
- struct streamzap_ir *sz = (struct streamzap_ir *)arg;
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- unsigned long flags;
- int len, ret;
- static unsigned long delay;
- bool wake = false;
-
- /* deliver data every 10 ms */
- delay = msecs_to_jiffies(10);
-
- spin_lock_irqsave(&sz->timer_lock, flags);
-
- if (kfifo_len(&sz->fifo) > 0) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- wake = true;
- }
-
- len = kfifo_len(&sz->fifo);
- if (len > 0) {
- while ((len < SZ_BUF_LEN / 2) &&
- (len < SZ_BUF_LEN * sizeof(int))) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- wake = true;
- len = kfifo_len(&sz->fifo);
- }
- if (sz->timer_running)
- mod_timer(&sz->delay_timer, jiffies + delay);
-
- } else {
- sz->timer_running = false;
- }
-
- if (wake)
- ir_raw_event_handle(sz->idev);
-
- spin_unlock_irqrestore(&sz->timer_lock, flags);
-}
-
-static void streamzap_flush_delay_buffer(struct streamzap_ir *sz)
+static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir)
{
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- bool wake = false;
- int ret;
-
- while (kfifo_len(&sz->fifo) > 0) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- wake = true;
- }
-
- if (wake)
- ir_raw_event_handle(sz->idev);
-}
-
-static void sz_push(struct streamzap_ir *sz)
-{
- struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&sz->timer_lock, flags);
- if (kfifo_len(&sz->fifo) >= sizeof(int) * SZ_BUF_LEN) {
- ret = kfifo_out(&sz->fifo, &rawir, sizeof(rawir));
- if (ret != sizeof(rawir))
- dev_err(sz->dev, "Problem w/kfifo_out...\n");
- ir_raw_event_store(sz->idev, &rawir);
- }
-
- kfifo_in(&sz->fifo, &sz->rawir, sizeof(rawir));
-
- if (!sz->timer_running) {
- sz->delay_timer.expires = jiffies + (HZ / 10);
- add_timer(&sz->delay_timer);
- sz->timer_running = true;
- }
-
- spin_unlock_irqrestore(&sz->timer_lock, flags);
+ ir_raw_event_store(sz->idev, &rawir);
}
static void sz_push_full_pulse(struct streamzap_ir *sz,
unsigned char value)
{
+ DEFINE_IR_RAW_EVENT(rawir);
+
if (sz->idle) {
long deltv;
@@ -266,57 +155,59 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
do_gettimeofday(&sz->signal_start);
deltv = sz->signal_start.tv_sec - sz->signal_last.tv_sec;
- sz->rawir.pulse = false;
+ rawir.pulse = false;
if (deltv > 15) {
/* really long time */
- sz->rawir.duration = IR_MAX_DURATION;
+ rawir.duration = IR_MAX_DURATION;
} else {
- sz->rawir.duration = (int)(deltv * 1000000 +
+ rawir.duration = (int)(deltv * 1000000 +
sz->signal_start.tv_usec -
sz->signal_last.tv_usec);
- sz->rawir.duration -= sz->sum;
- sz->rawir.duration *= 1000;
- sz->rawir.duration &= IR_MAX_DURATION;
+ rawir.duration -= sz->sum;
+ rawir.duration *= 1000;
+ rawir.duration &= IR_MAX_DURATION;
}
- dev_dbg(sz->dev, "ls %u\n", sz->rawir.duration);
- sz_push(sz);
+ dev_dbg(sz->dev, "ls %u\n", rawir.duration);
+ sz_push(sz, rawir);
- sz->idle = 0;
+ sz->idle = false;
sz->sum = 0;
}
- sz->rawir.pulse = true;
- sz->rawir.duration = ((int) value) * STREAMZAP_RESOLUTION;
- sz->rawir.duration += STREAMZAP_RESOLUTION / 2;
- sz->sum += sz->rawir.duration;
- sz->rawir.duration *= 1000;
- sz->rawir.duration &= IR_MAX_DURATION;
- dev_dbg(sz->dev, "p %u\n", sz->rawir.duration);
- sz_push(sz);
+ rawir.pulse = true;
+ rawir.duration = ((int) value) * SZ_RESOLUTION;
+ rawir.duration += SZ_RESOLUTION / 2;
+ sz->sum += rawir.duration;
+ rawir.duration *= 1000;
+ rawir.duration &= IR_MAX_DURATION;
+ dev_dbg(sz->dev, "p %u\n", rawir.duration);
+ sz_push(sz, rawir);
}
static void sz_push_half_pulse(struct streamzap_ir *sz,
unsigned char value)
{
- sz_push_full_pulse(sz, (value & STREAMZAP_PULSE_MASK) >> 4);
+ sz_push_full_pulse(sz, (value & SZ_PULSE_MASK) >> 4);
}
static void sz_push_full_space(struct streamzap_ir *sz,
unsigned char value)
{
- sz->rawir.pulse = false;
- sz->rawir.duration = ((int) value) * STREAMZAP_RESOLUTION;
- sz->rawir.duration += STREAMZAP_RESOLUTION / 2;
- sz->sum += sz->rawir.duration;
- sz->rawir.duration *= 1000;
- dev_dbg(sz->dev, "s %u\n", sz->rawir.duration);
- sz_push(sz);
+ DEFINE_IR_RAW_EVENT(rawir);
+
+ rawir.pulse = false;
+ rawir.duration = ((int) value) * SZ_RESOLUTION;
+ rawir.duration += SZ_RESOLUTION / 2;
+ sz->sum += rawir.duration;
+ rawir.duration *= 1000;
+ dev_dbg(sz->dev, "s %u\n", rawir.duration);
+ sz_push(sz, rawir);
}
static void sz_push_half_space(struct streamzap_ir *sz,
unsigned long value)
{
- sz_push_full_space(sz, value & STREAMZAP_SPACE_MASK);
+ sz_push_full_space(sz, value & SZ_SPACE_MASK);
}
/**
@@ -330,10 +221,8 @@ static void streamzap_callback(struct urb *urb)
struct streamzap_ir *sz;
unsigned int i;
int len;
- #if 0
- static int timeout = (((STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION) &
+ static int timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
IR_MAX_DURATION) | 0x03000000);
- #endif
if (!urb)
return;
@@ -356,57 +245,53 @@ static void streamzap_callback(struct urb *urb)
}
dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
- if (!sz->flush) {
- for (i = 0; i < urb->actual_length; i++) {
- dev_dbg(sz->dev, "%d: %x\n", i,
- (unsigned char)sz->buf_in[i]);
- switch (sz->decoder_state) {
- case PulseSpace:
- if ((sz->buf_in[i] & STREAMZAP_PULSE_MASK) ==
- STREAMZAP_PULSE_MASK) {
- sz->decoder_state = FullPulse;
- continue;
- } else if ((sz->buf_in[i] & STREAMZAP_SPACE_MASK)
- == STREAMZAP_SPACE_MASK) {
- sz_push_half_pulse(sz, sz->buf_in[i]);
- sz->decoder_state = FullSpace;
- continue;
- } else {
- sz_push_half_pulse(sz, sz->buf_in[i]);
- sz_push_half_space(sz, sz->buf_in[i]);
- }
- break;
- case FullPulse:
- sz_push_full_pulse(sz, sz->buf_in[i]);
- sz->decoder_state = IgnorePulse;
- break;
- case FullSpace:
- if (sz->buf_in[i] == STREAMZAP_TIMEOUT) {
- sz->idle = 1;
- streamzap_stop_timer(sz);
- #if 0
- if (sz->timeout_enabled) {
- sz->rawir.pulse = false;
- sz->rawir.duration = timeout;
- sz->rawir.duration *= 1000;
- sz_push(sz);
- }
- #endif
- streamzap_flush_delay_buffer(sz);
- } else
- sz_push_full_space(sz, sz->buf_in[i]);
- sz->decoder_state = PulseSpace;
- break;
- case IgnorePulse:
- if ((sz->buf_in[i]&STREAMZAP_SPACE_MASK) ==
- STREAMZAP_SPACE_MASK) {
- sz->decoder_state = FullSpace;
- continue;
- }
+ for (i = 0; i < len; i++) {
+ dev_dbg(sz->dev, "sz idx %d: %x\n",
+ i, (unsigned char)sz->buf_in[i]);
+ switch (sz->decoder_state) {
+ case PulseSpace:
+ if ((sz->buf_in[i] & SZ_PULSE_MASK) ==
+ SZ_PULSE_MASK) {
+ sz->decoder_state = FullPulse;
+ continue;
+ } else if ((sz->buf_in[i] & SZ_SPACE_MASK)
+ == SZ_SPACE_MASK) {
+ sz_push_half_pulse(sz, sz->buf_in[i]);
+ sz->decoder_state = FullSpace;
+ continue;
+ } else {
+ sz_push_half_pulse(sz, sz->buf_in[i]);
sz_push_half_space(sz, sz->buf_in[i]);
- sz->decoder_state = PulseSpace;
- break;
}
+ break;
+ case FullPulse:
+ sz_push_full_pulse(sz, sz->buf_in[i]);
+ sz->decoder_state = IgnorePulse;
+ break;
+ case FullSpace:
+ if (sz->buf_in[i] == SZ_TIMEOUT) {
+ DEFINE_IR_RAW_EVENT(rawir);
+
+ rawir.pulse = false;
+ rawir.duration = timeout;
+ sz->idle = true;
+ if (sz->timeout_enabled)
+ sz_push(sz, rawir);
+ ir_raw_event_handle(sz->idev);
+ } else {
+ sz_push_full_space(sz, sz->buf_in[i]);
+ }
+ sz->decoder_state = PulseSpace;
+ break;
+ case IgnorePulse:
+ if ((sz->buf_in[i] & SZ_SPACE_MASK) ==
+ SZ_SPACE_MASK) {
+ sz->decoder_state = FullSpace;
+ continue;
+ }
+ sz_push_half_space(sz, sz->buf_in[i]);
+ sz->decoder_state = PulseSpace;
+ break;
}
}
@@ -446,12 +331,11 @@ static struct input_dev *streamzap_init_input_dev(struct streamzap_ir *sz)
props->priv = sz;
props->driver_type = RC_DRIVER_IR_RAW;
- /* FIXME: not sure about supported protocols, check on this */
- props->allowed_protos = IR_TYPE_RC5 | IR_TYPE_RC6;
+ props->allowed_protos = IR_TYPE_ALL;
sz->props = props;
- ret = ir_input_register(idev, RC_MAP_RC5_STREAMZAP, props, DRIVER_NAME);
+ ret = ir_input_register(idev, RC_MAP_STREAMZAP, props, DRIVER_NAME);
if (ret < 0) {
dev_err(dev, "remote input device register failed\n");
goto irdev_failed;
@@ -467,29 +351,6 @@ idev_alloc_failed:
return NULL;
}
-static int streamzap_delay_buf_init(struct streamzap_ir *sz)
-{
- int ret;
-
- ret = kfifo_alloc(&sz->fifo, sizeof(int) * SZ_BUF_LEN,
- GFP_KERNEL);
- if (ret == 0)
- sz->fifo_initialized = 1;
-
- return ret;
-}
-
-static void streamzap_start_flush_timer(struct streamzap_ir *sz)
-{
- sz->flush_timer.expires = jiffies + HZ;
- sz->flush = true;
- add_timer(&sz->flush_timer);
-
- sz->urb_in->dev = sz->usbdev;
- if (usb_submit_urb(sz->urb_in, GFP_ATOMIC))
- dev_err(sz->dev, "urb submit failed\n");
-}
-
/**
* streamzap_probe
*
@@ -575,35 +436,21 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
snprintf(name + strlen(name), sizeof(name) - strlen(name),
" %s", buf);
- retval = streamzap_delay_buf_init(sz);
- if (retval) {
- dev_err(&intf->dev, "%s: delay buffer init failed\n", __func__);
- goto free_urb_in;
- }
-
sz->idev = streamzap_init_input_dev(sz);
if (!sz->idev)
goto input_dev_fail;
sz->idle = true;
sz->decoder_state = PulseSpace;
+ /* FIXME: don't yet have a way to set this */
+ sz->timeout_enabled = true;
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
- sz->timeout_enabled = false;
- sz->min_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION * 1000;
- sz->max_timeout = STREAMZAP_TIMEOUT * STREAMZAP_RESOLUTION * 1000;
+ sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+ sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
#endif
- init_timer(&sz->delay_timer);
- sz->delay_timer.function = streamzap_delay_timeout;
- sz->delay_timer.data = (unsigned long)sz;
- spin_lock_init(&sz->timer_lock);
-
- init_timer(&sz->flush_timer);
- sz->flush_timer.function = streamzap_flush_timeout;
- sz->flush_timer.data = (unsigned long)sz;
-
do_gettimeofday(&sz->signal_start);
/* Complete final initialisations */
@@ -615,16 +462,18 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
usb_set_intfdata(intf, sz);
- streamzap_start_flush_timer(sz);
+ if (usb_submit_urb(sz->urb_in, GFP_ATOMIC))
+ dev_err(sz->dev, "urb submit failed\n");
dev_info(sz->dev, "Registered %s on usb%d:%d\n", name,
usbdev->bus->busnum, usbdev->devnum);
+ /* Load the streamzap not-quite-rc5 decoder too */
+ load_rc5_sz_decode();
+
return 0;
input_dev_fail:
- kfifo_free(&sz->fifo);
-free_urb_in:
usb_free_urb(sz->urb_in);
free_buf_in:
usb_free_coherent(usbdev, maxp, sz->buf_in, sz->dma_in);
@@ -654,13 +503,6 @@ static void streamzap_disconnect(struct usb_interface *interface)
if (!sz)
return;
- if (sz->flush) {
- sz->flush = false;
- del_timer_sync(&sz->flush_timer);
- }
-
- streamzap_stop_timer(sz);
-
sz->usbdev = NULL;
ir_input_unregister(sz->idev);
usb_kill_urb(sz->urb_in);
@@ -674,13 +516,6 @@ static int streamzap_suspend(struct usb_interface *intf, pm_message_t message)
{
struct streamzap_ir *sz = usb_get_intfdata(intf);
- if (sz->flush) {
- sz->flush = false;
- del_timer_sync(&sz->flush_timer);
- }
-
- streamzap_stop_timer(sz);
-
usb_kill_urb(sz->urb_in);
return 0;
@@ -690,13 +525,6 @@ static int streamzap_resume(struct usb_interface *intf)
{
struct streamzap_ir *sz = usb_get_intfdata(intf);
- if (sz->fifo_initialized)
- kfifo_reset(&sz->fifo);
-
- sz->flush_timer.expires = jiffies + HZ;
- sz->flush = true;
- add_timer(&sz->flush_timer);
-
if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
dev_err(sz->dev, "Error sumbiting urb\n");
return -EIO;
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 4da2a54cb8bd..e3fedc60fe77 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -56,7 +56,7 @@ void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q,
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
index 48cb154c7a46..3d88542612ea 100644
--- a/drivers/media/common/saa7146_i2c.c
+++ b/drivers/media/common/saa7146_i2c.c
@@ -414,7 +414,6 @@ int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c
i2c_adapter->dev.parent = &dev->pci->dev;
i2c_adapter->algo = &saa7146_algo;
i2c_adapter->algo_data = NULL;
- i2c_adapter->id = I2C_HW_SAA7146;
i2c_adapter->timeout = SAA7146_I2C_TIMEOUT;
i2c_adapter->retries = SAA7146_I2C_RETRIES;
}
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 8224c301d050..2d4533ab22b7 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -412,7 +412,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
sizeof(struct saa7146_buf),
- file);
+ file, NULL);
init_timer(&fh->vbi_read_timeout);
fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index a212a91a30f0..741c5732b430 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1386,7 +1386,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct saa7146_buf),
- file);
+ file, NULL);
return 0;
}
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index b3ed5daaacf2..2385e6cca635 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -179,4 +179,11 @@ config MEDIA_TUNER_MAX2165
help
A driver for the silicon tuner MAX2165 from Maxim.
+config MEDIA_TUNER_TDA18218
+ tristate "NXP TDA18218 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ NXP TDA18218 silicon tuner driver.
+
endif # MEDIA_TUNER_CUSTOMISE
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index a5438523f30d..96da03d349ca 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_MEDIA_TUNER_MXL5005S) += mxl5005s.o
obj-$(CONFIG_MEDIA_TUNER_MXL5007T) += mxl5007t.o
obj-$(CONFIG_MEDIA_TUNER_MC44S803) += mc44s803.o
obj-$(CONFIG_MEDIA_TUNER_MAX2165) += max2165.o
+obj-$(CONFIG_MEDIA_TUNER_TDA18218) += tda18218.o
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
new file mode 100644
index 000000000000..8da1fdeddaa7
--- /dev/null
+++ b/drivers/media/common/tuners/tda18218.c
@@ -0,0 +1,334 @@
+/*
+ * NXP TDA18218HN silicon tuner driver
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "tda18218.h"
+#include "tda18218_priv.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+/* write multiple registers */
+static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+{
+ int ret;
+ u8 buf[1+len], quotient, remainder, i, msg_len, msg_len_max;
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_address,
+ .flags = 0,
+ .buf = buf,
+ }
+ };
+
+ msg_len_max = priv->cfg->i2c_wr_max - 1;
+ quotient = len / msg_len_max;
+ remainder = len % msg_len_max;
+ msg_len = msg_len_max;
+ for (i = 0; (i <= quotient && remainder); i++) {
+ if (i == quotient) /* set len of the last msg */
+ msg_len = remainder;
+
+ msg[0].len = msg_len + 1;
+ buf[0] = reg + i * msg_len_max;
+ memcpy(&buf[1], &val[i * msg_len_max], msg_len);
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret != 1)
+ break;
+ }
+
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ warn("i2c wr failed ret:%d reg:%02x len:%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
+{
+ int ret;
+ u8 buf[reg+len]; /* we must start read always from reg 0x00 */
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg->i2c_address,
+ .flags = 0,
+ .len = 1,
+ .buf = "\x00",
+ }, {
+ .addr = priv->cfg->i2c_address,
+ .flags = I2C_M_RD,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ memcpy(val, &buf[reg], len);
+ ret = 0;
+ } else {
+ warn("i2c rd failed ret:%d reg:%02x len:%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* write single register */
+static int tda18218_wr_reg(struct tda18218_priv *priv, u8 reg, u8 val)
+{
+ return tda18218_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+
+static int tda18218_rd_reg(struct tda18218_priv *priv, u8 reg, u8 *val)
+{
+ return tda18218_rd_regs(priv, reg, val, 1);
+}
+
+static int tda18218_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct tda18218_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 buf[3], i, BP_Filter, LP_Fc;
+ u32 LO_Frac;
+ /* TODO: find out correct AGC algorithm */
+ u8 agc[][2] = {
+ { R20_AGC11, 0x60 },
+ { R23_AGC21, 0x02 },
+ { R20_AGC11, 0xa0 },
+ { R23_AGC21, 0x09 },
+ { R20_AGC11, 0xe0 },
+ { R23_AGC21, 0x0c },
+ { R20_AGC11, 0x40 },
+ { R23_AGC21, 0x01 },
+ { R20_AGC11, 0x80 },
+ { R23_AGC21, 0x08 },
+ { R20_AGC11, 0xc0 },
+ { R23_AGC21, 0x0b },
+ { R24_AGC22, 0x1c },
+ { R24_AGC22, 0x0c },
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* low-pass filter cut-off frequency */
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ LP_Fc = 0;
+ LO_Frac = params->frequency + 4000000;
+ break;
+ case BANDWIDTH_7_MHZ:
+ LP_Fc = 1;
+ LO_Frac = params->frequency + 3500000;
+ break;
+ case BANDWIDTH_8_MHZ:
+ default:
+ LP_Fc = 2;
+ LO_Frac = params->frequency + 4000000;
+ break;
+ }
+
+ /* band-pass filter */
+ if (LO_Frac < 188000000)
+ BP_Filter = 3;
+ else if (LO_Frac < 253000000)
+ BP_Filter = 4;
+ else if (LO_Frac < 343000000)
+ BP_Filter = 5;
+ else
+ BP_Filter = 6;
+
+ buf[0] = (priv->regs[R1A_IF1] & ~7) | BP_Filter; /* BP_Filter */
+ buf[1] = (priv->regs[R1B_IF2] & ~3) | LP_Fc; /* LP_Fc */
+ buf[2] = priv->regs[R1C_AGC2B];
+ ret = tda18218_wr_regs(priv, R1A_IF1, buf, 3);
+ if (ret)
+ goto error;
+
+ buf[0] = (LO_Frac / 1000) >> 12; /* LO_Frac_0 */
+ buf[1] = (LO_Frac / 1000) >> 4; /* LO_Frac_1 */
+ buf[2] = (LO_Frac / 1000) << 4 |
+ (priv->regs[R0C_MD5] & 0x0f); /* LO_Frac_2 */
+ ret = tda18218_wr_regs(priv, R0A_MD3, buf, 3);
+ if (ret)
+ goto error;
+
+ buf[0] = priv->regs[R0F_MD8] | (1 << 6); /* Freq_prog_Start */
+ ret = tda18218_wr_regs(priv, R0F_MD8, buf, 1);
+ if (ret)
+ goto error;
+
+ buf[0] = priv->regs[R0F_MD8] & ~(1 << 6); /* Freq_prog_Start */
+ ret = tda18218_wr_regs(priv, R0F_MD8, buf, 1);
+ if (ret)
+ goto error;
+
+ /* trigger AGC */
+ for (i = 0; i < ARRAY_SIZE(agc); i++) {
+ ret = tda18218_wr_reg(priv, agc[i][0], agc[i][1]);
+ if (ret)
+ goto error;
+ }
+
+error:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ return ret;
+}
+
+static int tda18218_sleep(struct dvb_frontend *fe)
+{
+ struct tda18218_priv *priv = fe->tuner_priv;
+ int ret;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* standby */
+ ret = tda18218_wr_reg(priv, R17_PD1, priv->regs[R17_PD1] | (1 << 0));
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ return ret;
+}
+
+static int tda18218_init(struct dvb_frontend *fe)
+{
+ struct tda18218_priv *priv = fe->tuner_priv;
+ int ret;
+
+ /* TODO: calibrations */
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = tda18218_wr_regs(priv, R00_ID, priv->regs, TDA18218_NUM_REGS);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ return ret;
+}
+
+static int tda18218_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static const struct dvb_tuner_ops tda18218_tuner_ops = {
+ .info = {
+ .name = "NXP TDA18218",
+
+ .frequency_min = 174000000,
+ .frequency_max = 864000000,
+ .frequency_step = 1000,
+ },
+
+ .release = tda18218_release,
+ .init = tda18218_init,
+ .sleep = tda18218_sleep,
+
+ .set_params = tda18218_set_params,
+};
+
+struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tda18218_config *cfg)
+{
+ struct tda18218_priv *priv = NULL;
+ u8 val;
+ int ret;
+ /* chip default registers values */
+ static u8 def_regs[] = {
+ 0xc0, 0x88, 0x00, 0x8e, 0x03, 0x00, 0x00, 0xd0, 0x00, 0x40,
+ 0x00, 0x00, 0x07, 0xff, 0x84, 0x09, 0x00, 0x13, 0x00, 0x00,
+ 0x01, 0x84, 0x09, 0xf0, 0x19, 0x0a, 0x8e, 0x69, 0x98, 0x01,
+ 0x00, 0x58, 0x10, 0x40, 0x8c, 0x00, 0x0c, 0x48, 0x85, 0xc9,
+ 0xa7, 0x00, 0x00, 0x00, 0x30, 0x81, 0x80, 0x00, 0x39, 0x00,
+ 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xf6
+ };
+
+ priv = kzalloc(sizeof(struct tda18218_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->cfg = cfg;
+ priv->i2c = i2c;
+ fe->tuner_priv = priv;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* check if the tuner is there */
+ ret = tda18218_rd_reg(priv, R00_ID, &val);
+ dbg("%s: ret:%d chip ID:%02x", __func__, ret, val);
+ if (ret || val != def_regs[R00_ID]) {
+ kfree(priv);
+ return NULL;
+ }
+
+ info("NXP TDA18218HN successfully identified.");
+
+ memcpy(&fe->ops.tuner_ops, &tda18218_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ memcpy(priv->regs, def_regs, sizeof(def_regs));
+
+ /* loop-through enabled chip default register values */
+ if (priv->cfg->loop_through) {
+ priv->regs[R17_PD1] = 0xb0;
+ priv->regs[R18_PD2] = 0x59;
+ }
+
+ /* standby */
+ ret = tda18218_wr_reg(priv, R17_PD1, priv->regs[R17_PD1] | (1 << 0));
+ if (ret)
+ dbg("%s: failed ret:%d", __func__, ret);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ return fe;
+}
+EXPORT_SYMBOL(tda18218_attach);
+
+MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/tda18218.h b/drivers/media/common/tuners/tda18218.h
new file mode 100644
index 000000000000..b4180d180029
--- /dev/null
+++ b/drivers/media/common/tuners/tda18218.h
@@ -0,0 +1,45 @@
+/*
+ * NXP TDA18218HN silicon tuner driver
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef TDA18218_H
+#define TDA18218_H
+
+#include "dvb_frontend.h"
+
+struct tda18218_config {
+ u8 i2c_address;
+ u8 i2c_wr_max;
+ u8 loop_through:1;
+};
+
+#if defined(CONFIG_MEDIA_TUNER_TDA18218) || \
+ (defined(CONFIG_MEDIA_TUNER_TDA18218_MODULE) && defined(MODULE))
+extern struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tda18218_config *cfg);
+#else
+static inline struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tda18218_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/tda18218_priv.h b/drivers/media/common/tuners/tda18218_priv.h
new file mode 100644
index 000000000000..904e5365c78c
--- /dev/null
+++ b/drivers/media/common/tuners/tda18218_priv.h
@@ -0,0 +1,106 @@
+/*
+ * NXP TDA18218HN silicon tuner driver
+ *
+ * Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef TDA18218_PRIV_H
+#define TDA18218_PRIV_H
+
+#define LOG_PREFIX "tda18218"
+
+#undef dbg
+#define dbg(f, arg...) \
+ if (debug) \
+ printk(KERN_DEBUG LOG_PREFIX": " f "\n" , ## arg)
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+#define R00_ID 0x00 /* ID byte */
+#define R01_R1 0x01 /* Read byte 1 */
+#define R02_R2 0x02 /* Read byte 2 */
+#define R03_R3 0x03 /* Read byte 3 */
+#define R04_R4 0x04 /* Read byte 4 */
+#define R05_R5 0x05 /* Read byte 5 */
+#define R06_R6 0x06 /* Read byte 6 */
+#define R07_MD1 0x07 /* Main divider byte 1 */
+#define R08_PSM1 0x08 /* PSM byte 1 */
+#define R09_MD2 0x09 /* Main divider byte 2 */
+#define R0A_MD3 0x0a /* Main divider byte 1 */
+#define R0B_MD4 0x0b /* Main divider byte 4 */
+#define R0C_MD5 0x0c /* Main divider byte 5 */
+#define R0D_MD6 0x0d /* Main divider byte 6 */
+#define R0E_MD7 0x0e /* Main divider byte 7 */
+#define R0F_MD8 0x0f /* Main divider byte 8 */
+#define R10_CD1 0x10 /* Call divider byte 1 */
+#define R11_CD2 0x11 /* Call divider byte 2 */
+#define R12_CD3 0x12 /* Call divider byte 3 */
+#define R13_CD4 0x13 /* Call divider byte 4 */
+#define R14_CD5 0x14 /* Call divider byte 5 */
+#define R15_CD6 0x15 /* Call divider byte 6 */
+#define R16_CD7 0x16 /* Call divider byte 7 */
+#define R17_PD1 0x17 /* Power-down byte 1 */
+#define R18_PD2 0x18 /* Power-down byte 2 */
+#define R19_XTOUT 0x19 /* XTOUT byte */
+#define R1A_IF1 0x1a /* IF byte 1 */
+#define R1B_IF2 0x1b /* IF byte 2 */
+#define R1C_AGC2B 0x1c /* AGC2b byte */
+#define R1D_PSM2 0x1d /* PSM byte 2 */
+#define R1E_PSM3 0x1e /* PSM byte 3 */
+#define R1F_PSM4 0x1f /* PSM byte 4 */
+#define R20_AGC11 0x20 /* AGC1 byte 1 */
+#define R21_AGC12 0x21 /* AGC1 byte 2 */
+#define R22_AGC13 0x22 /* AGC1 byte 3 */
+#define R23_AGC21 0x23 /* AGC2 byte 1 */
+#define R24_AGC22 0x24 /* AGC2 byte 2 */
+#define R25_AAGC 0x25 /* Analog AGC byte */
+#define R26_RC 0x26 /* RC byte */
+#define R27_RSSI 0x27 /* RSSI byte */
+#define R28_IRCAL1 0x28 /* IR CAL byte 1 */
+#define R29_IRCAL2 0x29 /* IR CAL byte 2 */
+#define R2A_IRCAL3 0x2a /* IR CAL byte 3 */
+#define R2B_IRCAL4 0x2b /* IR CAL byte 4 */
+#define R2C_RFCAL1 0x2c /* RF CAL byte 1 */
+#define R2D_RFCAL2 0x2d /* RF CAL byte 2 */
+#define R2E_RFCAL3 0x2e /* RF CAL byte 3 */
+#define R2F_RFCAL4 0x2f /* RF CAL byte 4 */
+#define R30_RFCAL5 0x30 /* RF CAL byte 5 */
+#define R31_RFCAL6 0x31 /* RF CAL byte 6 */
+#define R32_RFCAL7 0x32 /* RF CAL byte 7 */
+#define R33_RFCAL8 0x33 /* RF CAL byte 8 */
+#define R34_RFCAL9 0x34 /* RF CAL byte 9 */
+#define R35_RFCAL10 0x35 /* RF CAL byte 10 */
+#define R36_RFCALRAM1 0x36 /* RF CAL RAM byte 1 */
+#define R37_RFCALRAM2 0x37 /* RF CAL RAM byte 2 */
+#define R38_MARGIN 0x38 /* Margin byte */
+#define R39_FMAX1 0x39 /* Fmax byte 1 */
+#define R3A_FMAX2 0x3a /* Fmax byte 2 */
+
+#define TDA18218_NUM_REGS 59
+
+struct tda18218_priv {
+ struct tda18218_config *cfg;
+ struct i2c_adapter *i2c;
+
+ u8 regs[TDA18218_NUM_REGS];
+};
+
+#endif
diff --git a/drivers/media/common/tuners/tda18271-common.c b/drivers/media/common/tuners/tda18271-common.c
index e1f678281a58..5466d47db899 100644
--- a/drivers/media/common/tuners/tda18271-common.c
+++ b/drivers/media/common/tuners/tda18271-common.c
@@ -193,25 +193,51 @@ int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
unsigned char *regs = priv->tda18271_regs;
unsigned char buf[TDA18271_NUM_REGS + 1];
struct i2c_msg msg = { .addr = priv->i2c_props.addr, .flags = 0,
- .buf = buf, .len = len + 1 };
- int i, ret;
+ .buf = buf };
+ int i, ret = 1, max;
BUG_ON((len == 0) || (idx + len > sizeof(buf)));
- buf[0] = idx;
- for (i = 1; i <= len; i++)
- buf[i] = regs[idx - 1 + i];
+
+ switch (priv->small_i2c) {
+ case TDA18271_03_BYTE_CHUNK_INIT:
+ max = 3;
+ break;
+ case TDA18271_08_BYTE_CHUNK_INIT:
+ max = 8;
+ break;
+ case TDA18271_16_BYTE_CHUNK_INIT:
+ max = 16;
+ break;
+ case TDA18271_39_BYTE_CHUNK_INIT:
+ default:
+ max = 39;
+ }
tda18271_i2c_gate_ctrl(fe, 1);
+ while (len) {
+ if (max > len)
+ max = len;
+
+ buf[0] = idx;
+ for (i = 1; i <= max; i++)
+ buf[i] = regs[idx - 1 + i];
- /* write registers */
- ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
+ msg.len = max + 1;
+ /* write registers */
+ ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
+ if (ret != 1)
+ break;
+
+ idx += max;
+ len -= max;
+ }
tda18271_i2c_gate_ctrl(fe, 0);
if (ret != 1)
tda_err("ERROR: idx = 0x%x, len = %d, "
- "i2c_transfer returned: %d\n", idx, len, ret);
+ "i2c_transfer returned: %d\n", idx, max, ret);
return (ret == 1 ? 0 : ret);
}
@@ -326,24 +352,7 @@ int tda18271_init_regs(struct dvb_frontend *fe)
regs[R_EB22] = 0x48;
regs[R_EB23] = 0xb0;
- switch (priv->small_i2c) {
- case TDA18271_08_BYTE_CHUNK_INIT:
- tda18271_write_regs(fe, 0x00, 0x08);
- tda18271_write_regs(fe, 0x08, 0x08);
- tda18271_write_regs(fe, 0x10, 0x08);
- tda18271_write_regs(fe, 0x18, 0x08);
- tda18271_write_regs(fe, 0x20, 0x07);
- break;
- case TDA18271_16_BYTE_CHUNK_INIT:
- tda18271_write_regs(fe, 0x00, 0x10);
- tda18271_write_regs(fe, 0x10, 0x10);
- tda18271_write_regs(fe, 0x20, 0x07);
- break;
- case TDA18271_39_BYTE_CHUNK_INIT:
- default:
- tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS);
- break;
- }
+ tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS);
/* setup agc1 gain */
regs[R_EB17] = 0x00;
diff --git a/drivers/media/common/tuners/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index 7955e49a3440..9ad4454a148d 100644
--- a/drivers/media/common/tuners/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
@@ -1156,7 +1156,6 @@ static int tda18271_get_id(struct dvb_frontend *fe)
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
char *name;
- int ret = 0;
mutex_lock(&priv->lock);
tda18271_read_regs(fe);
@@ -1172,17 +1171,16 @@ static int tda18271_get_id(struct dvb_frontend *fe)
priv->id = TDA18271HDC2;
break;
default:
- name = "Unknown device";
- ret = -EINVAL;
- break;
+ tda_info("Unknown device (%i) detected @ %d-%04x, device not supported.\n",
+ regs[R_ID], i2c_adapter_id(priv->i2c_props.adap),
+ priv->i2c_props.addr);
+ return -EINVAL;
}
- tda_info("%s detected @ %d-%04x%s\n", name,
- i2c_adapter_id(priv->i2c_props.adap),
- priv->i2c_props.addr,
- (0 == ret) ? "" : ", device not supported.");
+ tda_info("%s detected @ %d-%04x\n", name,
+ i2c_adapter_id(priv->i2c_props.adap), priv->i2c_props.addr);
- return ret;
+ return 0;
}
static int tda18271_setup_configuration(struct dvb_frontend *fe,
diff --git a/drivers/media/common/tuners/tda18271.h b/drivers/media/common/tuners/tda18271.h
index d7fcc36dc6e6..3abb221f3d07 100644
--- a/drivers/media/common/tuners/tda18271.h
+++ b/drivers/media/common/tuners/tda18271.h
@@ -80,8 +80,9 @@ enum tda18271_output_options {
enum tda18271_small_i2c {
TDA18271_39_BYTE_CHUNK_INIT = 0,
- TDA18271_16_BYTE_CHUNK_INIT = 1,
- TDA18271_08_BYTE_CHUNK_INIT = 2,
+ TDA18271_16_BYTE_CHUNK_INIT = 16,
+ TDA18271_08_BYTE_CHUNK_INIT = 8,
+ TDA18271_03_BYTE_CHUNK_INIT = 3,
};
struct tda18271_config {
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index d2b2c12a5561..76ac5cd84af7 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -1042,7 +1042,7 @@ static const struct dvb_tuner_ops xc5000_tuner_ops = {
struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg)
+ const struct xc5000_config *cfg)
{
struct xc5000_priv *priv = NULL;
int instance;
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index e6d7236c9ea1..3756e73649be 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -53,11 +53,11 @@ struct xc5000_config {
(defined(CONFIG_MEDIA_TUNER_XC5000_MODULE) && defined(MODULE))
extern struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg);
+ const struct xc5000_config *cfg);
#else
static inline struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg)
+ const struct xc5000_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
index fd1df2352764..965d5eb33752 100644
--- a/drivers/media/dvb/b2c2/flexcop-i2c.c
+++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
@@ -245,9 +245,6 @@ int flexcop_i2c_init(struct flexcop_device *fc)
i2c_set_adapdata(&fc->fc_i2c_adap[1].i2c_adap, &fc->fc_i2c_adap[1]);
i2c_set_adapdata(&fc->fc_i2c_adap[2].i2c_adap, &fc->fc_i2c_adap[2]);
- fc->fc_i2c_adap[0].i2c_adap.class =
- fc->fc_i2c_adap[1].i2c_adap.class =
- fc->fc_i2c_adap[2].i2c_adap.class = I2C_CLASS_TV_DIGITAL;
fc->fc_i2c_adap[0].i2c_adap.algo =
fc->fc_i2c_adap[1].i2c_adap.algo =
fc->fc_i2c_adap[2].i2c_adap.algo = &flexcop_algo;
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index bca07c0bcd01..5d404f1bf036 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -862,7 +862,6 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
i2c_set_adapdata(&dev->i2c_adap, dev);
strcpy(dev->i2c_adap.name, DRIVER_NAME);
dev->i2c_adap.owner = THIS_MODULE;
- dev->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
dev->i2c_adap.dev.parent = &pdev->dev;
dev->i2c_adap.algo = &dm1105_algo;
dev->i2c_adap.algo_data = dev;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 970c9b8882d4..1589d5a5cb46 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -702,7 +702,7 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
kthread_stop(fepriv->thread);
- init_MUTEX (&fepriv->sem);
+ sema_init(&fepriv->sem, 1);
fepriv->state = FESTATE_IDLE;
/* paranoia check in case a signal arrived */
@@ -2062,7 +2062,7 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
}
fepriv = fe->frontend_priv;
- init_MUTEX (&fepriv->sem);
+ sema_init(&fepriv->sem, 1);
init_waitqueue_head (&fepriv->wait_queue);
init_waitqueue_head (&fepriv->events.wait_queue);
mutex_init(&fepriv->events.mtx);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index bf0e6bed28dd..f9f19be77181 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -260,7 +260,7 @@ struct dvb_frontend_ops {
int (*init)(struct dvb_frontend* fe);
int (*sleep)(struct dvb_frontend* fe);
- int (*write)(struct dvb_frontend* fe, u8* buf, int len);
+ int (*write)(struct dvb_frontend* fe, const u8 buf[], int len);
/* if this is set, it overrides the default swzigzag */
int (*tune)(struct dvb_frontend* fe,
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index fdc19bba2128..2525d3b3c88d 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -314,6 +314,8 @@ config DVB_USB_AF9015
select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_TDA18218 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Afatech AF9015 based DVB-T USB2.0 receiver
@@ -346,3 +348,13 @@ config DVB_USB_AZ6027
select DVB_STB6100 if !DVB_FE_CUSTOMISE
help
Say Y here to support the AZ6027 device
+
+config DVB_USB_LME2510
+ tristate "LME DM04/QQBOX DVB-S USB2.0 support"
+ depends on DVB_USB
+ select DVB_TDA10086 if !DVB_FE_CUSTOMISE
+ select DVB_TDA826X if !DVB_FE_CUSTOMISE
+ select DVB_STV0288 if !DVB_FE_CUSTOMISE
+ select DVB_IX2505V if !DVB_FE_CUSTOMISE
+ help
+ Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 .
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 1a192453b0e7..5b1d12f2d591 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -88,6 +88,9 @@ obj-$(CONFIG_DVB_USB_EC168) += dvb-usb-ec168.o
dvb-usb-az6027-objs = az6027.o
obj-$(CONFIG_DVB_USB_AZ6027) += dvb-usb-az6027.o
+dvb-usb-lmedm04-objs = lmedm04.o
+obj-$(CONFIG_DVB_USB_LME2510) += dvb-usb-lmedm04.o
+
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
# due to tuner-xc3028
EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index ea1ed3b4592a..31c0a0ed39f5 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -31,6 +31,8 @@
#include "tda18271.h"
#include "mxl5005s.h"
#include "mc44s803.h"
+#include "tda18218.h"
+#include "mxl5007t.h"
static int dvb_usb_af9015_debug;
module_param_named(debug, dvb_usb_af9015_debug, int, 0644);
@@ -205,12 +207,18 @@ static int af9015_write_reg(struct dvb_usb_device *d, u16 addr, u8 val)
return af9015_write_regs(d, addr, &val, 1);
}
-static int af9015_read_reg(struct dvb_usb_device *d, u16 addr, u8 *val)
+static int af9015_read_regs(struct dvb_usb_device *d, u16 addr, u8 *val, u8 len)
{
- struct req_t req = {READ_MEMORY, AF9015_I2C_DEMOD, addr, 0, 0, 1, val};
+ struct req_t req = {READ_MEMORY, AF9015_I2C_DEMOD, addr, 0, 0, len,
+ val};
return af9015_ctrl_msg(d, &req);
}
+static int af9015_read_reg(struct dvb_usb_device *d, u16 addr, u8 *val)
+{
+ return af9015_read_regs(d, addr, val, 1);
+}
+
static int af9015_write_reg_i2c(struct dvb_usb_device *d, u8 addr, u16 reg,
u8 val)
{
@@ -241,7 +249,7 @@ static int af9015_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int ret = 0, i = 0;
u16 addr;
- u8 mbox, addr_len;
+ u8 uninitialized_var(mbox), addr_len;
struct req_t req;
/* TODO: implement bus lock
@@ -280,7 +288,7 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
} else {
addr = msg[i].buf[0];
addr_len = 1;
- mbox = 0;
+ /* mbox is don't care in that case */
}
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
@@ -494,7 +502,8 @@ static int af9015_copy_firmware(struct dvb_usb_device *d)
/* wait 2nd demodulator ready */
msleep(100);
- ret = af9015_read_reg_i2c(d, 0x3a, 0x98be, &val);
+ ret = af9015_read_reg_i2c(d,
+ af9015_af9013_config[1].demod_address, 0x98be, &val);
if (ret)
goto error;
else
@@ -597,37 +606,6 @@ free:
return ret;
}
-static int af9015_download_ir_table(struct dvb_usb_device *d)
-{
- int i, packets = 0, ret;
- u16 addr = 0x9a56; /* ir-table start address */
- struct req_t req = {WRITE_MEMORY, 0, 0, 0, 0, 1, NULL};
- u8 *data = NULL;
- deb_info("%s:\n", __func__);
-
- data = af9015_config.ir_table;
- packets = af9015_config.ir_table_size;
-
- /* no remote */
- if (!packets)
- goto exit;
-
- /* load remote ir-table */
- for (i = 0; i < packets; i++) {
- req.addr = addr + i;
- req.data = &data[i];
- ret = af9015_ctrl_msg(d, &req);
- if (ret) {
- err("ir-table download failed at packet %d with " \
- "code %d", i, ret);
- return ret;
- }
- }
-
-exit:
- return 0;
-}
-
static int af9015_init(struct dvb_usb_device *d)
{
int ret;
@@ -637,10 +615,6 @@ static int af9015_init(struct dvb_usb_device *d)
if (ret)
goto error;
- ret = af9015_download_ir_table(d);
- if (ret)
- goto error;
-
error:
return ret;
}
@@ -733,125 +707,102 @@ error:
return ret;
}
-struct af9015_setup {
+struct af9015_rc_setup {
unsigned int id;
- struct ir_scancode *rc_key_map;
- unsigned int rc_key_map_size;
- u8 *ir_table;
- unsigned int ir_table_size;
+ char *rc_codes;
};
-static const struct af9015_setup *af9015_setup_match(unsigned int id,
- const struct af9015_setup *table)
+static char *af9015_rc_setup_match(unsigned int id,
+ const struct af9015_rc_setup *table)
{
- for (; table->rc_key_map; table++)
+ for (; table->rc_codes; table++)
if (table->id == id)
- return table;
+ return table->rc_codes;
return NULL;
}
-static const struct af9015_setup af9015_setup_modparam[] = {
- { AF9015_REMOTE_A_LINK_DTU_M,
- ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
- af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
- { AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
- ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
- af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
- { AF9015_REMOTE_MYGICTV_U718,
- ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
- af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
- { AF9015_REMOTE_DIGITTRADE_DVB_T,
- ir_codes_af9015_table_digittrade, ARRAY_SIZE(ir_codes_af9015_table_digittrade),
- af9015_ir_table_digittrade, ARRAY_SIZE(af9015_ir_table_digittrade) },
- { AF9015_REMOTE_AVERMEDIA_KS,
- ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
- af9015_ir_table_avermedia_ks, ARRAY_SIZE(af9015_ir_table_avermedia_ks) },
+static const struct af9015_rc_setup af9015_rc_setup_modparam[] = {
+ { AF9015_REMOTE_A_LINK_DTU_M, RC_MAP_ALINK_DTU_M },
+ { AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3, RC_MAP_MSI_DIGIVOX_II },
+ { AF9015_REMOTE_MYGICTV_U718, RC_MAP_TOTAL_MEDIA_IN_HAND },
+ { AF9015_REMOTE_DIGITTRADE_DVB_T, RC_MAP_DIGITTRADE },
+ { AF9015_REMOTE_AVERMEDIA_KS, RC_MAP_AVERMEDIA_RM_KS },
{ }
};
-/* don't add new entries here anymore, use hashes instead */
-static const struct af9015_setup af9015_setup_usbids[] = {
- { USB_VID_LEADTEK,
- ir_codes_af9015_table_leadtek, ARRAY_SIZE(ir_codes_af9015_table_leadtek),
- af9015_ir_table_leadtek, ARRAY_SIZE(af9015_ir_table_leadtek) },
- { USB_VID_VISIONPLUS,
- ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
- af9015_ir_table_twinhan, ARRAY_SIZE(af9015_ir_table_twinhan) },
- { USB_VID_KWORLD_2, /* TODO: use correct rc keys */
- ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
- af9015_ir_table_kworld, ARRAY_SIZE(af9015_ir_table_kworld) },
- { USB_VID_AVERMEDIA,
- ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
- af9015_ir_table_avermedia, ARRAY_SIZE(af9015_ir_table_avermedia) },
- { USB_VID_MSI_2,
- ir_codes_af9015_table_msi_digivox_iii, ARRAY_SIZE(ir_codes_af9015_table_msi_digivox_iii),
- af9015_ir_table_msi_digivox_iii, ARRAY_SIZE(af9015_ir_table_msi_digivox_iii) },
+static const struct af9015_rc_setup af9015_rc_setup_hashes[] = {
+ { 0xb8feb708, RC_MAP_MSI_DIGIVOX_II },
+ { 0xa3703d00, RC_MAP_ALINK_DTU_M },
+ { 0x9b7dc64e, RC_MAP_TOTAL_MEDIA_IN_HAND }, /* MYGICTV U718 */
{ }
};
-static const struct af9015_setup af9015_setup_hashes[] = {
- { 0xb8feb708,
- ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
- af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
- { 0xa3703d00,
- ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
- af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
- { 0x9b7dc64e,
- ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
- af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
+static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
+ { (USB_VID_TERRATEC << 16) + USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC,
+ RC_MAP_TERRATEC_SLIM },
+ { (USB_VID_VISIONPLUS << 16) + USB_PID_AZUREWAVE_AD_TU700,
+ RC_MAP_AZUREWAVE_AD_TU700 },
+ { (USB_VID_VISIONPLUS << 16) + USB_PID_TINYTWIN,
+ RC_MAP_AZUREWAVE_AD_TU700 },
+ { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGI_VOX_MINI_III,
+ RC_MAP_MSI_DIGIVOX_III },
+ { (USB_VID_LEADTEK << 16) + USB_PID_WINFAST_DTV_DONGLE_GOLD,
+ RC_MAP_LEADTEK_Y04G0051 },
+ { (USB_VID_AVERMEDIA << 16) + USB_PID_AVERMEDIA_VOLAR_X,
+ RC_MAP_AVERMEDIA_M135A },
+ { (USB_VID_AFATECH << 16) + USB_PID_TREKSTOR_DVBT,
+ RC_MAP_TREKSTOR },
+ { (USB_VID_KWORLD_2 << 16) + USB_PID_TINYTWIN_2,
+ RC_MAP_DIGITALNOW_TINYTWIN },
+ { (USB_VID_GTEK << 16) + USB_PID_TINYTWIN_3,
+ RC_MAP_DIGITALNOW_TINYTWIN },
{ }
};
static void af9015_set_remote_config(struct usb_device *udev,
struct dvb_usb_device_properties *props)
{
- const struct af9015_setup *table = NULL;
-
- if (dvb_usb_af9015_remote) {
- /* load remote defined as module param */
- table = af9015_setup_match(dvb_usb_af9015_remote,
- af9015_setup_modparam);
- } else {
- u16 vendor = le16_to_cpu(udev->descriptor.idVendor);
-
- table = af9015_setup_match(af9015_config.eeprom_sum,
- af9015_setup_hashes);
-
- if (!table && vendor == USB_VID_AFATECH) {
- /* Check USB manufacturer and product strings and try
- to determine correct remote in case of chip vendor
- reference IDs are used.
- DO NOT ADD ANYTHING NEW HERE. Use hashes instead.
- */
- char manufacturer[10];
- memset(manufacturer, 0, sizeof(manufacturer));
- usb_string(udev, udev->descriptor.iManufacturer,
- manufacturer, sizeof(manufacturer));
- if (!strcmp("MSI", manufacturer)) {
- /* iManufacturer 1 MSI
- iProduct 2 MSI K-VOX */
- table = af9015_setup_match(
- AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
- af9015_setup_modparam);
- } else if (udev->descriptor.idProduct ==
- cpu_to_le16(USB_PID_TREKSTOR_DVBT)) {
- table = &(const struct af9015_setup){ 0,
- ir_codes_af9015_table_trekstor,
- ARRAY_SIZE(ir_codes_af9015_table_trekstor),
- af9015_ir_table_trekstor,
- ARRAY_SIZE(af9015_ir_table_trekstor)
- };
- }
- } else if (!table)
- table = af9015_setup_match(vendor, af9015_setup_usbids);
+ u16 vid = le16_to_cpu(udev->descriptor.idVendor);
+ u16 pid = le16_to_cpu(udev->descriptor.idProduct);
+
+ /* try to load remote based module param */
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ dvb_usb_af9015_remote, af9015_rc_setup_modparam);
+
+ /* try to load remote based eeprom hash */
+ if (!props->rc.core.rc_codes)
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ af9015_config.eeprom_sum, af9015_rc_setup_hashes);
+
+ /* try to load remote based USB ID */
+ if (!props->rc.core.rc_codes)
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ (vid << 16) + pid, af9015_rc_setup_usbids);
+
+ /* try to load remote based USB iManufacturer string */
+ if (!props->rc.core.rc_codes && vid == USB_VID_AFATECH) {
+ /* Check USB manufacturer and product strings and try
+ to determine correct remote in case of chip vendor
+ reference IDs are used.
+ DO NOT ADD ANYTHING NEW HERE. Use hashes instead. */
+ char manufacturer[10];
+ memset(manufacturer, 0, sizeof(manufacturer));
+ usb_string(udev, udev->descriptor.iManufacturer,
+ manufacturer, sizeof(manufacturer));
+ if (!strcmp("MSI", manufacturer)) {
+ /* iManufacturer 1 MSI
+ iProduct 2 MSI K-VOX */
+ props->rc.core.rc_codes = af9015_rc_setup_match(
+ AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
+ af9015_rc_setup_modparam);
+ }
}
- if (table) {
- props->rc.legacy.rc_key_map = table->rc_key_map;
- props->rc.legacy.rc_key_map_size = table->rc_key_map_size;
- af9015_config.ir_table = table->ir_table;
- af9015_config.ir_table_size = table->ir_table_size;
- }
+ /* finally load "empty" just for leaving IR receiver enabled */
+ if (!props->rc.core.rc_codes)
+ props->rc.core.rc_codes = RC_MAP_EMPTY;
+
+ return;
}
static int af9015_read_config(struct usb_device *udev)
@@ -877,10 +828,9 @@ static int af9015_read_config(struct usb_device *udev)
deb_info("%s: IR mode:%d\n", __func__, val);
for (i = 0; i < af9015_properties_count; i++) {
- if (val == AF9015_IR_MODE_DISABLED) {
- af9015_properties[i].rc.legacy.rc_key_map = NULL;
- af9015_properties[i].rc.legacy.rc_key_map_size = 0;
- } else
+ if (val == AF9015_IR_MODE_DISABLED)
+ af9015_properties[i].rc.core.rc_codes = NULL;
+ else
af9015_set_remote_config(udev, &af9015_properties[i]);
}
@@ -992,20 +942,19 @@ static int af9015_read_config(struct usb_device *udev)
case AF9013_TUNER_MT2060_2:
case AF9013_TUNER_TDA18271:
case AF9013_TUNER_QT1010A:
+ case AF9013_TUNER_TDA18218:
af9015_af9013_config[i].rf_spec_inv = 1;
break;
case AF9013_TUNER_MXL5003D:
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
+ case AF9013_TUNER_MXL5007T:
af9015_af9013_config[i].rf_spec_inv = 0;
break;
case AF9013_TUNER_MC44S803:
af9015_af9013_config[i].gpio[1] = AF9013_GPIO_LO;
af9015_af9013_config[i].rf_spec_inv = 1;
break;
- case AF9013_TUNER_TDA18218:
- warn("tuner NXP TDA18218 not supported yet");
- return -ENODEV;
default:
warn("tuner id:%d not supported, please report!", val);
return -ENODEV;
@@ -1020,9 +969,13 @@ error:
err("eeprom read failed:%d", ret);
/* AverMedia AVerTV Volar Black HD (A850) device have bad EEPROM
- content :-( Override some wrong values here. */
+ content :-( Override some wrong values here. Ditto for the
+ AVerTV Red HD+ (A850T) device. */
if (le16_to_cpu(udev->descriptor.idVendor) == USB_VID_AVERMEDIA &&
- le16_to_cpu(udev->descriptor.idProduct) == USB_PID_AVERMEDIA_A850) {
+ ((le16_to_cpu(udev->descriptor.idProduct) ==
+ USB_PID_AVERMEDIA_A850) ||
+ (le16_to_cpu(udev->descriptor.idProduct) ==
+ USB_PID_AVERMEDIA_A850T))) {
deb_info("%s: AverMedia A850: overriding config\n", __func__);
/* disable dual mode */
af9015_config.dual_mode = 0;
@@ -1059,36 +1012,53 @@ static int af9015_identify_state(struct usb_device *udev,
return ret;
}
-static int af9015_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int af9015_rc_query(struct dvb_usb_device *d)
{
- u8 buf[8];
- struct req_t req = {GET_IR_CODE, 0, 0, 0, 0, sizeof(buf), buf};
- struct ir_scancode *keymap = d->props.rc.legacy.rc_key_map;
- int i, ret;
-
- memset(buf, 0, sizeof(buf));
+ struct af9015_state *priv = d->priv;
+ int ret;
+ u8 buf[16];
- ret = af9015_ctrl_msg(d, &req);
+ /* read registers needed to detect remote controller code */
+ ret = af9015_read_regs(d, 0x98d9, buf, sizeof(buf));
if (ret)
- return ret;
+ goto error;
- *event = 0;
- *state = REMOTE_NO_KEY_PRESSED;
+ if (buf[14] || buf[15]) {
+ deb_rc("%s: key pressed %02x %02x %02x %02x\n", __func__,
+ buf[12], buf[13], buf[14], buf[15]);
- for (i = 0; i < d->props.rc.legacy.rc_key_map_size; i++) {
- if (!buf[1] && rc5_custom(&keymap[i]) == buf[0] &&
- rc5_data(&keymap[i]) == buf[2]) {
- *event = keymap[i].keycode;
- *state = REMOTE_KEY_PRESSED;
- break;
+ /* clean IR code from mem */
+ ret = af9015_write_regs(d, 0x98e5, "\x00\x00\x00\x00", 4);
+ if (ret)
+ goto error;
+
+ if (buf[14] == (u8) ~buf[15]) {
+ if (buf[12] == (u8) ~buf[13]) {
+ /* NEC */
+ priv->rc_keycode = buf[12] << 8 | buf[14];
+ } else {
+ /* NEC extended*/
+ priv->rc_keycode = buf[12] << 16 |
+ buf[13] << 8 | buf[14];
+ }
+ ir_keydown(d->rc_input_dev, priv->rc_keycode, 0);
+ } else {
+ priv->rc_keycode = 0; /* clear just for sure */
}
+ } else if (priv->rc_repeat != buf[6] || buf[0]) {
+ deb_rc("%s: key repeated\n", __func__);
+ ir_keydown(d->rc_input_dev, priv->rc_keycode, 0);
+ } else {
+ deb_rc("%s: no key press\n", __func__);
}
- if (!buf[1])
- deb_rc("%s: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- __func__, buf[0], buf[1], buf[2], buf[3], buf[4],
- buf[5], buf[6], buf[7]);
- return 0;
+ priv->rc_repeat = buf[6];
+
+error:
+ if (ret)
+ err("%s: failed:%d", __func__, ret);
+
+ return ret;
}
/* init 2nd I2C adapter */
@@ -1100,11 +1070,6 @@ static int af9015_i2c_init(struct dvb_usb_device *d)
strncpy(state->i2c_adap.name, d->desc->name,
sizeof(state->i2c_adap.name));
-#ifdef I2C_ADAP_CLASS_TV_DIGITAL
- state->i2c_adap.class = I2C_ADAP_CLASS_TV_DIGITAL,
-#else
- state->i2c_adap.class = I2C_CLASS_TV_DIGITAL,
-#endif
state->i2c_adap.algo = d->props.i2c_algo;
state->i2c_adap.algo_data = NULL;
state->i2c_adap.dev.parent = &d->udev->dev;
@@ -1166,7 +1131,7 @@ static struct qt1010_config af9015_qt1010_config = {
static struct tda18271_config af9015_tda18271_config = {
.gate = TDA18271_GATE_DIGITAL,
- .small_i2c = 1,
+ .small_i2c = TDA18271_16_BYTE_CHUNK_INIT,
};
static struct mxl5005s_config af9015_mxl5003_config = {
@@ -1208,12 +1173,22 @@ static struct mc44s803_config af9015_mc44s803_config = {
.dig_out = 1,
};
+static struct tda18218_config af9015_tda18218_config = {
+ .i2c_address = 0xc0,
+ .i2c_wr_max = 21, /* max wr bytes AF9015 I2C adap can handle at once */
+};
+
+static struct mxl5007t_config af9015_mxl5007t_config = {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+};
+
static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
{
struct af9015_state *state = adap->dev->priv;
struct i2c_adapter *i2c_adap;
int ret;
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
/* select I2C adapter */
if (adap->id == 0)
@@ -1238,6 +1213,10 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
ret = dvb_attach(tda18271_attach, adap->fe, 0xc0, i2c_adap,
&af9015_tda18271_config) == NULL ? -ENODEV : 0;
break;
+ case AF9013_TUNER_TDA18218:
+ ret = dvb_attach(tda18218_attach, adap->fe, i2c_adap,
+ &af9015_tda18218_config) == NULL ? -ENODEV : 0;
+ break;
case AF9013_TUNER_MXL5003D:
ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap,
&af9015_mxl5003_config) == NULL ? -ENODEV : 0;
@@ -1255,6 +1234,10 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
ret = dvb_attach(mc44s803_attach, adap->fe, i2c_adap,
&af9015_mc44s803_config) == NULL ? -ENODEV : 0;
break;
+ case AF9013_TUNER_MXL5007T:
+ ret = dvb_attach(mxl5007t_attach, adap->fe, i2c_adap,
+ 0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
+ break;
case AF9013_TUNER_UNKNOWN:
default:
ret = -ENODEV;
@@ -1300,10 +1283,16 @@ static struct usb_device_id af9015_usb_table[] = {
/* 30 */{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
+ {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK_RC)},
+ {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
+/* 35 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
+ {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
+#define AF9015_RC_INTERVAL 500
static struct dvb_usb_device_properties af9015_properties[] = {
{
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -1354,14 +1343,19 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.identify_state = af9015_identify_state,
- .rc.legacy = {
+ .rc.core = {
+ .protocol = IR_TYPE_NEC,
+ .module_name = "af9015",
.rc_query = af9015_rc_query,
- .rc_interval = 150,
+ .rc_interval = AF9015_RC_INTERVAL,
+ .rc_props = {
+ .allowed_protos = IR_TYPE_NEC,
+ },
},
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* max 9 */
+ .num_device_descs = 12, /* check max from dvb-usb.h */
.devices = {
{
.name = "Afatech AF9015 DVB-T USB2.0 stick",
@@ -1389,7 +1383,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
{
.name = "DigitalNow TinyTwin DVB-T Receiver",
.cold_ids = {&af9015_usb_table[5],
- &af9015_usb_table[28], NULL},
+ &af9015_usb_table[28],
+ &af9015_usb_table[36], NULL},
.warm_ids = {NULL},
},
{
@@ -1413,6 +1408,21 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {&af9015_usb_table[9], NULL},
.warm_ids = {NULL},
},
+ {
+ .name = "TerraTec Cinergy T Stick RC",
+ .cold_ids = {&af9015_usb_table[33], NULL},
+ .warm_ids = {NULL},
+ },
+ {
+ .name = "TerraTec Cinergy T Stick Dual RC",
+ .cold_ids = {&af9015_usb_table[34], NULL},
+ .warm_ids = {NULL},
+ },
+ {
+ .name = "AverMedia AVerTV Red HD+ (A850T)",
+ .cold_ids = {&af9015_usb_table[35], NULL},
+ .warm_ids = {NULL},
+ },
}
}, {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -1463,14 +1473,19 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.identify_state = af9015_identify_state,
- .rc.legacy = {
+ .rc.core = {
+ .protocol = IR_TYPE_NEC,
+ .module_name = "af9015",
.rc_query = af9015_rc_query,
- .rc_interval = 150,
+ .rc_interval = AF9015_RC_INTERVAL,
+ .rc_props = {
+ .allowed_protos = IR_TYPE_NEC,
+ },
},
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* max 9 */
+ .num_device_descs = 9, /* check max from dvb-usb.h */
.devices = {
{
.name = "Xtensions XD-380",
@@ -1572,14 +1587,19 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.identify_state = af9015_identify_state,
- .rc.legacy = {
+ .rc.core = {
+ .protocol = IR_TYPE_NEC,
+ .module_name = "af9015",
.rc_query = af9015_rc_query,
- .rc_interval = 150,
+ .rc_interval = AF9015_RC_INTERVAL,
+ .rc_props = {
+ .allowed_protos = IR_TYPE_NEC,
+ },
},
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* max 9 */
+ .num_device_descs = 9, /* check max from dvb-usb.h */
.devices = {
{
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
@@ -1672,7 +1692,7 @@ static int af9015_usb_probe(struct usb_interface *intf,
static void af9015_i2c_exit(struct dvb_usb_device *d)
{
struct af9015_state *state = d->priv;
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
/* remove 2nd I2C adapter */
if (d->state & DVB_USB_STATE_I2C)
@@ -1682,7 +1702,7 @@ static void af9015_i2c_exit(struct dvb_usb_device *d)
static void af9015_usb_device_exit(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
/* remove 2nd I2C adapter */
if (d != NULL && d->desc != NULL)
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index c8e9349742ee..f20cfa6ed690 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -100,6 +100,8 @@ enum af9015_ir_mode {
struct af9015_state {
struct i2c_adapter i2c_adap; /* I2C adapter for 2nd FE */
+ u8 rc_repeat;
+ u32 rc_keycode;
};
struct af9015_config {
@@ -108,8 +110,6 @@ struct af9015_config {
u16 firmware_size;
u16 firmware_checksum;
u32 eeprom_sum;
- u8 *ir_table;
- u16 ir_table_size;
};
enum af9015_remote {
@@ -121,735 +121,4 @@ enum af9015_remote {
/* 5 */ AF9015_REMOTE_AVERMEDIA_KS,
};
-/* LeadTek - Y04G0051 */
-/* Leadtek WinFast DTV Dongle Gold */
-static struct ir_scancode ir_codes_af9015_table_leadtek[] = {
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x0028, KEY_OK },
- { 0x004f, KEY_RIGHT },
- { 0x0050, KEY_LEFT },
- { 0x0051, KEY_DOWN },
- { 0x0052, KEY_UP },
- { 0x011a, KEY_POWER2 },
- { 0x04b4, KEY_TV },
- { 0x04b3, KEY_RED },
- { 0x04b2, KEY_GREEN },
- { 0x04b1, KEY_YELLOW },
- { 0x04b0, KEY_BLUE },
- { 0x003d, KEY_TEXT },
- { 0x0113, KEY_SLEEP },
- { 0x0010, KEY_MUTE },
- { 0x0105, KEY_ESC },
- { 0x0009, KEY_SCREEN },
- { 0x010f, KEY_MENU },
- { 0x003f, KEY_CHANNEL },
- { 0x0013, KEY_REWIND },
- { 0x0012, KEY_PLAY },
- { 0x0011, KEY_FASTFORWARD },
- { 0x0005, KEY_PREVIOUS },
- { 0x0029, KEY_STOP },
- { 0x002b, KEY_NEXT },
- { 0x0041, KEY_EPG },
- { 0x0019, KEY_VIDEO },
- { 0x0016, KEY_AUDIO },
- { 0x0037, KEY_DOT },
- { 0x002a, KEY_AGAIN },
- { 0x002c, KEY_CAMERA },
- { 0x003c, KEY_NEW },
- { 0x0115, KEY_RECORD },
- { 0x010b, KEY_TIME },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x004b, KEY_CHANNELUP },
- { 0x004e, KEY_CHANNELDOWN },
-};
-
-static u8 af9015_ir_table_leadtek[] = {
- 0x03, 0xfc, 0x00, 0xff, 0x1a, 0x01, 0x00, /* KEY_POWER2 */
- 0x03, 0xfc, 0x56, 0xa9, 0xb4, 0x04, 0x00, /* KEY_TV */
- 0x03, 0xfc, 0x4b, 0xb4, 0xb3, 0x04, 0x00, /* KEY_RED */
- 0x03, 0xfc, 0x4c, 0xb3, 0xb2, 0x04, 0x00, /* KEY_GREEN */
- 0x03, 0xfc, 0x4d, 0xb2, 0xb1, 0x04, 0x00, /* KEY_YELLOW */
- 0x03, 0xfc, 0x4e, 0xb1, 0xb0, 0x04, 0x00, /* KEY_BLUE */
- 0x03, 0xfc, 0x1f, 0xe0, 0x3d, 0x00, 0x00, /* KEY_TEXT */
- 0x03, 0xfc, 0x40, 0xbf, 0x13, 0x01, 0x00, /* KEY_SLEEP */
- 0x03, 0xfc, 0x14, 0xeb, 0x10, 0x00, 0x00, /* KEY_MUTE */
- 0x03, 0xfc, 0x49, 0xb6, 0x05, 0x01, 0x00, /* KEY_ESC */
- 0x03, 0xfc, 0x50, 0xaf, 0x29, 0x00, 0x00, /* KEY_STOP (1)*/
- 0x03, 0xfc, 0x0c, 0xf3, 0x52, 0x00, 0x00, /* KEY_UP */
- 0x03, 0xfc, 0x03, 0xfc, 0x09, 0x00, 0x00, /* KEY_SCREEN */
- 0x03, 0xfc, 0x08, 0xf7, 0x50, 0x00, 0x00, /* KEY_LEFT */
- 0x03, 0xfc, 0x13, 0xec, 0x28, 0x00, 0x00, /* KEY_OK (1) */
- 0x03, 0xfc, 0x04, 0xfb, 0x4f, 0x00, 0x00, /* KEY_RIGHT */
- 0x03, 0xfc, 0x4f, 0xb0, 0x0f, 0x01, 0x00, /* KEY_MENU */
- 0x03, 0xfc, 0x10, 0xef, 0x51, 0x00, 0x00, /* KEY_DOWN */
- 0x03, 0xfc, 0x51, 0xae, 0x3f, 0x00, 0x00, /* KEY_CHANNEL */
- 0x03, 0xfc, 0x42, 0xbd, 0x13, 0x00, 0x00, /* KEY_REWIND */
- 0x03, 0xfc, 0x43, 0xbc, 0x12, 0x00, 0x00, /* KEY_PLAY */
- 0x03, 0xfc, 0x44, 0xbb, 0x11, 0x00, 0x00, /* KEY_FASTFORWARD */
- 0x03, 0xfc, 0x52, 0xad, 0x19, 0x00, 0x00, /* KEY_VIDEO (1) */
- 0x03, 0xfc, 0x54, 0xab, 0x05, 0x00, 0x00, /* KEY_PREVIOUS */
- 0x03, 0xfc, 0x46, 0xb9, 0x29, 0x00, 0x00, /* KEY_STOP (2) */
- 0x03, 0xfc, 0x55, 0xaa, 0x2b, 0x00, 0x00, /* KEY_NEXT */
- 0x03, 0xfc, 0x53, 0xac, 0x41, 0x00, 0x00, /* KEY_EPG */
- 0x03, 0xfc, 0x05, 0xfa, 0x1e, 0x00, 0x00, /* KEY_1 */
- 0x03, 0xfc, 0x06, 0xf9, 0x1f, 0x00, 0x00, /* KEY_2 */
- 0x03, 0xfc, 0x07, 0xf8, 0x20, 0x00, 0x00, /* KEY_3 */
- 0x03, 0xfc, 0x1e, 0xe1, 0x19, 0x00, 0x00, /* KEY_VIDEO (2) */
- 0x03, 0xfc, 0x09, 0xf6, 0x21, 0x00, 0x00, /* KEY_4 */
- 0x03, 0xfc, 0x0a, 0xf5, 0x22, 0x00, 0x00, /* KEY_5 */
- 0x03, 0xfc, 0x0b, 0xf4, 0x23, 0x00, 0x00, /* KEY_6 */
- 0x03, 0xfc, 0x1b, 0xe4, 0x16, 0x00, 0x00, /* KEY_AUDIO */
- 0x03, 0xfc, 0x0d, 0xf2, 0x24, 0x00, 0x00, /* KEY_7 */
- 0x03, 0xfc, 0x0e, 0xf1, 0x25, 0x00, 0x00, /* KEY_8 */
- 0x03, 0xfc, 0x0f, 0xf0, 0x26, 0x00, 0x00, /* KEY_9 */
- 0x03, 0xfc, 0x16, 0xe9, 0x28, 0x00, 0x00, /* KEY_OK (2) */
- 0x03, 0xfc, 0x41, 0xbe, 0x37, 0x00, 0x00, /* KEY_DOT */
- 0x03, 0xfc, 0x12, 0xed, 0x27, 0x00, 0x00, /* KEY_0 */
- 0x03, 0xfc, 0x11, 0xee, 0x2a, 0x00, 0x00, /* KEY_AGAIN */
- 0x03, 0xfc, 0x48, 0xb7, 0x2c, 0x00, 0x00, /* KEY_CAMERA */
- 0x03, 0xfc, 0x4a, 0xb5, 0x3c, 0x00, 0x00, /* KEY_NEW */
- 0x03, 0xfc, 0x47, 0xb8, 0x15, 0x01, 0x00, /* KEY_RECORD */
- 0x03, 0xfc, 0x45, 0xba, 0x0b, 0x01, 0x00, /* KEY_TIME */
- 0x03, 0xfc, 0x5e, 0xa1, 0x43, 0x00, 0x00, /* KEY_VOLUMEUP */
- 0x03, 0xfc, 0x5a, 0xa5, 0x42, 0x00, 0x00, /* KEY_VOLUMEDOWN */
- 0x03, 0xfc, 0x5b, 0xa4, 0x4b, 0x00, 0x00, /* KEY_CHANNELUP */
- 0x03, 0xfc, 0x5f, 0xa0, 0x4e, 0x00, 0x00, /* KEY_CHANNELDOWN */
-};
-
-/* TwinHan AzureWave AD-TU700(704J) */
-static struct ir_scancode ir_codes_af9015_table_twinhan[] = {
- { 0x053f, KEY_POWER },
- { 0x0019, KEY_FAVORITES }, /* Favorite List */
- { 0x0004, KEY_TEXT }, /* Teletext */
- { 0x000e, KEY_POWER },
- { 0x000e, KEY_INFO }, /* Preview */
- { 0x0008, KEY_EPG }, /* Info/EPG */
- { 0x000f, KEY_LIST }, /* Record List */
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x0029, KEY_CANCEL }, /* Cancel */
- { 0x004c, KEY_CLEAR }, /* Clear */
- { 0x002a, KEY_BACK }, /* Back */
- { 0x002b, KEY_TAB }, /* Tab */
- { 0x0052, KEY_UP }, /* up arrow */
- { 0x0051, KEY_DOWN }, /* down arrow */
- { 0x004f, KEY_RIGHT }, /* right arrow */
- { 0x0050, KEY_LEFT }, /* left arrow */
- { 0x0028, KEY_ENTER }, /* Enter / ok */
- { 0x0252, KEY_VOLUMEUP },
- { 0x0251, KEY_VOLUMEDOWN },
- { 0x004e, KEY_CHANNELDOWN },
- { 0x004b, KEY_CHANNELUP },
- { 0x004a, KEY_RECORD },
- { 0x0111, KEY_PLAY },
- { 0x0017, KEY_PAUSE },
- { 0x000c, KEY_REWIND }, /* FR << */
- { 0x0011, KEY_FASTFORWARD }, /* FF >> */
- { 0x0115, KEY_PREVIOUS }, /* Replay */
- { 0x010e, KEY_NEXT }, /* Skip */
- { 0x0013, KEY_CAMERA }, /* Capture */
- { 0x010f, KEY_LANGUAGE }, /* SAP */
- { 0x0113, KEY_TV2 }, /* PIP */
- { 0x001d, KEY_ZOOM }, /* Full Screen */
- { 0x0117, KEY_SUBTITLE }, /* Subtitle / CC */
- { 0x0010, KEY_MUTE },
- { 0x0119, KEY_AUDIO }, /* L/R */ /* TODO better event */
- { 0x0116, KEY_SLEEP }, /* Hibernate */
- { 0x0116, KEY_SWITCHVIDEOMODE },
- /* A/V */ /* TODO does not work */
- { 0x0006, KEY_AGAIN }, /* Recall */
- { 0x0116, KEY_KPPLUS }, /* Zoom+ */ /* TODO does not work */
- { 0x0116, KEY_KPMINUS }, /* Zoom- */ /* TODO does not work */
- { 0x0215, KEY_RED },
- { 0x020a, KEY_GREEN },
- { 0x021c, KEY_YELLOW },
- { 0x0205, KEY_BLUE },
-};
-
-static u8 af9015_ir_table_twinhan[] = {
- 0x00, 0xff, 0x16, 0xe9, 0x3f, 0x05, 0x00,
- 0x00, 0xff, 0x07, 0xf8, 0x16, 0x01, 0x00,
- 0x00, 0xff, 0x14, 0xeb, 0x11, 0x01, 0x00,
- 0x00, 0xff, 0x1a, 0xe5, 0x4d, 0x00, 0x00,
- 0x00, 0xff, 0x4c, 0xb3, 0x17, 0x00, 0x00,
- 0x00, 0xff, 0x12, 0xed, 0x11, 0x00, 0x00,
- 0x00, 0xff, 0x40, 0xbf, 0x0c, 0x00, 0x00,
- 0x00, 0xff, 0x11, 0xee, 0x4a, 0x00, 0x00,
- 0x00, 0xff, 0x54, 0xab, 0x13, 0x00, 0x00,
- 0x00, 0xff, 0x41, 0xbe, 0x15, 0x01, 0x00,
- 0x00, 0xff, 0x42, 0xbd, 0x0e, 0x01, 0x00,
- 0x00, 0xff, 0x43, 0xbc, 0x17, 0x01, 0x00,
- 0x00, 0xff, 0x50, 0xaf, 0x0f, 0x01, 0x00,
- 0x00, 0xff, 0x4d, 0xb2, 0x1d, 0x00, 0x00,
- 0x00, 0xff, 0x47, 0xb8, 0x13, 0x01, 0x00,
- 0x00, 0xff, 0x05, 0xfa, 0x4b, 0x00, 0x00,
- 0x00, 0xff, 0x02, 0xfd, 0x4e, 0x00, 0x00,
- 0x00, 0xff, 0x0e, 0xf1, 0x06, 0x00, 0x00,
- 0x00, 0xff, 0x1e, 0xe1, 0x52, 0x02, 0x00,
- 0x00, 0xff, 0x0a, 0xf5, 0x51, 0x02, 0x00,
- 0x00, 0xff, 0x10, 0xef, 0x10, 0x00, 0x00,
- 0x00, 0xff, 0x49, 0xb6, 0x19, 0x01, 0x00,
- 0x00, 0xff, 0x15, 0xea, 0x27, 0x00, 0x00,
- 0x00, 0xff, 0x03, 0xfc, 0x1e, 0x00, 0x00,
- 0x00, 0xff, 0x01, 0xfe, 0x1f, 0x00, 0x00,
- 0x00, 0xff, 0x06, 0xf9, 0x20, 0x00, 0x00,
- 0x00, 0xff, 0x09, 0xf6, 0x21, 0x00, 0x00,
- 0x00, 0xff, 0x1d, 0xe2, 0x22, 0x00, 0x00,
- 0x00, 0xff, 0x1f, 0xe0, 0x23, 0x00, 0x00,
- 0x00, 0xff, 0x0d, 0xf2, 0x24, 0x00, 0x00,
- 0x00, 0xff, 0x19, 0xe6, 0x25, 0x00, 0x00,
- 0x00, 0xff, 0x1b, 0xe4, 0x26, 0x00, 0x00,
- 0x00, 0xff, 0x00, 0xff, 0x2b, 0x00, 0x00,
- 0x00, 0xff, 0x4a, 0xb5, 0x4c, 0x00, 0x00,
- 0x00, 0xff, 0x4b, 0xb4, 0x52, 0x00, 0x00,
- 0x00, 0xff, 0x51, 0xae, 0x51, 0x00, 0x00,
- 0x00, 0xff, 0x52, 0xad, 0x4f, 0x00, 0x00,
- 0x00, 0xff, 0x4e, 0xb1, 0x50, 0x00, 0x00,
- 0x00, 0xff, 0x0c, 0xf3, 0x29, 0x00, 0x00,
- 0x00, 0xff, 0x4f, 0xb0, 0x28, 0x00, 0x00,
- 0x00, 0xff, 0x13, 0xec, 0x2a, 0x00, 0x00,
- 0x00, 0xff, 0x17, 0xe8, 0x19, 0x00, 0x00,
- 0x00, 0xff, 0x04, 0xfb, 0x0f, 0x00, 0x00,
- 0x00, 0xff, 0x48, 0xb7, 0x0e, 0x00, 0x00,
- 0x00, 0xff, 0x0f, 0xf0, 0x04, 0x00, 0x00,
- 0x00, 0xff, 0x1c, 0xe3, 0x08, 0x00, 0x00,
- 0x00, 0xff, 0x18, 0xe7, 0x15, 0x02, 0x00,
- 0x00, 0xff, 0x53, 0xac, 0x0a, 0x02, 0x00,
- 0x00, 0xff, 0x5e, 0xa1, 0x1c, 0x02, 0x00,
- 0x00, 0xff, 0x5f, 0xa0, 0x05, 0x02, 0x00,
-};
-
-/* A-Link DTU(m) */
-static struct ir_scancode ir_codes_af9015_table_a_link[] = {
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x002e, KEY_CHANNELUP },
- { 0x002d, KEY_CHANNELDOWN },
- { 0x0428, KEY_ZOOM },
- { 0x0041, KEY_MUTE },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0044, KEY_GOTO }, /* jump */
- { 0x0545, KEY_POWER },
-};
-
-static u8 af9015_ir_table_a_link[] = {
- 0x08, 0xf7, 0x12, 0xed, 0x45, 0x05, 0x00, /* power */
- 0x08, 0xf7, 0x1a, 0xe5, 0x41, 0x00, 0x00, /* mute */
- 0x08, 0xf7, 0x01, 0xfe, 0x1e, 0x00, 0x00, /* 1 */
- 0x08, 0xf7, 0x1c, 0xe3, 0x21, 0x00, 0x00, /* 4 */
- 0x08, 0xf7, 0x03, 0xfc, 0x24, 0x00, 0x00, /* 7 */
- 0x08, 0xf7, 0x05, 0xfa, 0x28, 0x04, 0x00, /* zoom */
- 0x08, 0xf7, 0x00, 0xff, 0x43, 0x00, 0x00, /* volume up */
- 0x08, 0xf7, 0x16, 0xe9, 0x42, 0x00, 0x00, /* volume down */
- 0x08, 0xf7, 0x0f, 0xf0, 0x1f, 0x00, 0x00, /* 2 */
- 0x08, 0xf7, 0x0d, 0xf2, 0x22, 0x00, 0x00, /* 5 */
- 0x08, 0xf7, 0x1b, 0xe4, 0x25, 0x00, 0x00, /* 8 */
- 0x08, 0xf7, 0x06, 0xf9, 0x27, 0x00, 0x00, /* 0 */
- 0x08, 0xf7, 0x14, 0xeb, 0x2e, 0x00, 0x00, /* channel up */
- 0x08, 0xf7, 0x1d, 0xe2, 0x2d, 0x00, 0x00, /* channel down */
- 0x08, 0xf7, 0x02, 0xfd, 0x20, 0x00, 0x00, /* 3 */
- 0x08, 0xf7, 0x18, 0xe7, 0x23, 0x00, 0x00, /* 6 */
- 0x08, 0xf7, 0x04, 0xfb, 0x26, 0x00, 0x00, /* 9 */
- 0x08, 0xf7, 0x07, 0xf8, 0x44, 0x00, 0x00, /* jump */
-};
-
-/* MSI DIGIVOX mini II V3.0 */
-static struct ir_scancode ir_codes_af9015_table_msi[] = {
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x030f, KEY_CHANNELUP },
- { 0x030e, KEY_CHANNELDOWN },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0545, KEY_POWER },
- { 0x0052, KEY_UP }, /* up */
- { 0x0051, KEY_DOWN }, /* down */
- { 0x0028, KEY_ENTER },
-};
-
-static u8 af9015_ir_table_msi[] = {
- 0x03, 0xfc, 0x17, 0xe8, 0x45, 0x05, 0x00, /* power */
- 0x03, 0xfc, 0x0d, 0xf2, 0x51, 0x00, 0x00, /* down */
- 0x03, 0xfc, 0x03, 0xfc, 0x52, 0x00, 0x00, /* up */
- 0x03, 0xfc, 0x1a, 0xe5, 0x1e, 0x00, 0x00, /* 1 */
- 0x03, 0xfc, 0x02, 0xfd, 0x1f, 0x00, 0x00, /* 2 */
- 0x03, 0xfc, 0x04, 0xfb, 0x20, 0x00, 0x00, /* 3 */
- 0x03, 0xfc, 0x1c, 0xe3, 0x21, 0x00, 0x00, /* 4 */
- 0x03, 0xfc, 0x08, 0xf7, 0x22, 0x00, 0x00, /* 5 */
- 0x03, 0xfc, 0x1d, 0xe2, 0x23, 0x00, 0x00, /* 6 */
- 0x03, 0xfc, 0x11, 0xee, 0x24, 0x00, 0x00, /* 7 */
- 0x03, 0xfc, 0x0b, 0xf4, 0x25, 0x00, 0x00, /* 8 */
- 0x03, 0xfc, 0x10, 0xef, 0x26, 0x00, 0x00, /* 9 */
- 0x03, 0xfc, 0x09, 0xf6, 0x27, 0x00, 0x00, /* 0 */
- 0x03, 0xfc, 0x14, 0xeb, 0x43, 0x00, 0x00, /* volume up */
- 0x03, 0xfc, 0x1f, 0xe0, 0x42, 0x00, 0x00, /* volume down */
- 0x03, 0xfc, 0x15, 0xea, 0x0f, 0x03, 0x00, /* channel up */
- 0x03, 0xfc, 0x05, 0xfa, 0x0e, 0x03, 0x00, /* channel down */
- 0x03, 0xfc, 0x16, 0xe9, 0x28, 0x00, 0x00, /* enter */
-};
-
-/* MYGICTV U718 */
-static struct ir_scancode ir_codes_af9015_table_mygictv[] = {
- { 0x003d, KEY_SWITCHVIDEOMODE },
- /* TV / AV */
- { 0x0545, KEY_POWER },
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0027, KEY_0 },
- { 0x0041, KEY_MUTE },
- { 0x002a, KEY_ESC }, /* Esc */
- { 0x002e, KEY_CHANNELUP },
- { 0x002d, KEY_CHANNELDOWN },
- { 0x0042, KEY_VOLUMEDOWN },
- { 0x0043, KEY_VOLUMEUP },
- { 0x0052, KEY_UP }, /* up arrow */
- { 0x0051, KEY_DOWN }, /* down arrow */
- { 0x004f, KEY_RIGHT }, /* right arrow */
- { 0x0050, KEY_LEFT }, /* left arrow */
- { 0x0028, KEY_ENTER }, /* ok */
- { 0x0115, KEY_RECORD },
- { 0x0313, KEY_PLAY },
- { 0x0113, KEY_PAUSE },
- { 0x0116, KEY_STOP },
- { 0x0307, KEY_REWIND }, /* FR << */
- { 0x0309, KEY_FASTFORWARD }, /* FF >> */
- { 0x003b, KEY_TIME }, /* TimeShift */
- { 0x003e, KEY_CAMERA }, /* Snapshot */
- { 0x0316, KEY_CYCLEWINDOWS }, /* yellow, min / max */
- { 0x0000, KEY_ZOOM }, /* 'select' (?) */
- { 0x0316, KEY_SHUFFLE }, /* Shuffle */
- { 0x0345, KEY_POWER },
-};
-
-static u8 af9015_ir_table_mygictv[] = {
- 0x02, 0xbd, 0x0c, 0xf3, 0x3d, 0x00, 0x00, /* TV / AV */
- 0x02, 0xbd, 0x14, 0xeb, 0x45, 0x05, 0x00, /* power */
- 0x02, 0xbd, 0x00, 0xff, 0x1e, 0x00, 0x00, /* 1 */
- 0x02, 0xbd, 0x01, 0xfe, 0x1f, 0x00, 0x00, /* 2 */
- 0x02, 0xbd, 0x02, 0xfd, 0x20, 0x00, 0x00, /* 3 */
- 0x02, 0xbd, 0x03, 0xfc, 0x21, 0x00, 0x00, /* 4 */
- 0x02, 0xbd, 0x04, 0xfb, 0x22, 0x00, 0x00, /* 5 */
- 0x02, 0xbd, 0x05, 0xfa, 0x23, 0x00, 0x00, /* 6 */
- 0x02, 0xbd, 0x06, 0xf9, 0x24, 0x00, 0x00, /* 7 */
- 0x02, 0xbd, 0x07, 0xf8, 0x25, 0x00, 0x00, /* 8 */
- 0x02, 0xbd, 0x08, 0xf7, 0x26, 0x00, 0x00, /* 9 */
- 0x02, 0xbd, 0x09, 0xf6, 0x27, 0x00, 0x00, /* 0 */
- 0x02, 0xbd, 0x0a, 0xf5, 0x41, 0x00, 0x00, /* mute */
- 0x02, 0xbd, 0x1c, 0xe3, 0x2a, 0x00, 0x00, /* esc */
- 0x02, 0xbd, 0x1f, 0xe0, 0x43, 0x00, 0x00, /* volume up */
- 0x02, 0xbd, 0x12, 0xed, 0x52, 0x00, 0x00, /* up arrow */
- 0x02, 0xbd, 0x11, 0xee, 0x50, 0x00, 0x00, /* left arrow */
- 0x02, 0xbd, 0x15, 0xea, 0x28, 0x00, 0x00, /* ok */
- 0x02, 0xbd, 0x10, 0xef, 0x4f, 0x00, 0x00, /* right arrow */
- 0x02, 0xbd, 0x13, 0xec, 0x51, 0x00, 0x00, /* down arrow */
- 0x02, 0xbd, 0x0e, 0xf1, 0x42, 0x00, 0x00, /* volume down */
- 0x02, 0xbd, 0x19, 0xe6, 0x15, 0x01, 0x00, /* record */
- 0x02, 0xbd, 0x1e, 0xe1, 0x13, 0x03, 0x00, /* play */
- 0x02, 0xbd, 0x16, 0xe9, 0x16, 0x01, 0x00, /* stop */
- 0x02, 0xbd, 0x0b, 0xf4, 0x28, 0x04, 0x00, /* yellow, min / max */
- 0x02, 0xbd, 0x0f, 0xf0, 0x3b, 0x00, 0x00, /* time shift */
- 0x02, 0xbd, 0x18, 0xe7, 0x2e, 0x00, 0x00, /* channel up */
- 0x02, 0xbd, 0x1a, 0xe5, 0x2d, 0x00, 0x00, /* channel down */
- 0x02, 0xbd, 0x17, 0xe8, 0x3e, 0x00, 0x00, /* snapshot */
- 0x02, 0xbd, 0x40, 0xbf, 0x13, 0x01, 0x00, /* pause */
- 0x02, 0xbd, 0x41, 0xbe, 0x09, 0x03, 0x00, /* FF >> */
- 0x02, 0xbd, 0x42, 0xbd, 0x07, 0x03, 0x00, /* FR << */
- 0x02, 0xbd, 0x43, 0xbc, 0x00, 0x00, 0x00, /* 'select' (?) */
- 0x02, 0xbd, 0x44, 0xbb, 0x16, 0x03, 0x00, /* shuffle */
- 0x02, 0xbd, 0x45, 0xba, 0x45, 0x03, 0x00, /* power */
-};
-
-/* KWorld PlusTV Dual DVB-T Stick (DVB-T 399U) */
-static u8 af9015_ir_table_kworld[] = {
- 0x86, 0x6b, 0x0c, 0xf3, 0x2e, 0x07, 0x00,
- 0x86, 0x6b, 0x16, 0xe9, 0x2d, 0x07, 0x00,
- 0x86, 0x6b, 0x1d, 0xe2, 0x37, 0x07, 0x00,
- 0x86, 0x6b, 0x00, 0xff, 0x1e, 0x07, 0x00,
- 0x86, 0x6b, 0x01, 0xfe, 0x1f, 0x07, 0x00,
- 0x86, 0x6b, 0x02, 0xfd, 0x20, 0x07, 0x00,
- 0x86, 0x6b, 0x03, 0xfc, 0x21, 0x07, 0x00,
- 0x86, 0x6b, 0x04, 0xfb, 0x22, 0x07, 0x00,
- 0x86, 0x6b, 0x05, 0xfa, 0x23, 0x07, 0x00,
- 0x86, 0x6b, 0x06, 0xf9, 0x24, 0x07, 0x00,
- 0x86, 0x6b, 0x07, 0xf8, 0x25, 0x07, 0x00,
- 0x86, 0x6b, 0x08, 0xf7, 0x26, 0x07, 0x00,
- 0x86, 0x6b, 0x09, 0xf6, 0x4d, 0x07, 0x00,
- 0x86, 0x6b, 0x0a, 0xf5, 0x4e, 0x07, 0x00,
- 0x86, 0x6b, 0x14, 0xeb, 0x4f, 0x07, 0x00,
- 0x86, 0x6b, 0x1e, 0xe1, 0x50, 0x07, 0x00,
- 0x86, 0x6b, 0x17, 0xe8, 0x52, 0x07, 0x00,
- 0x86, 0x6b, 0x1f, 0xe0, 0x51, 0x07, 0x00,
- 0x86, 0x6b, 0x0e, 0xf1, 0x0b, 0x07, 0x00,
- 0x86, 0x6b, 0x20, 0xdf, 0x0c, 0x07, 0x00,
- 0x86, 0x6b, 0x42, 0xbd, 0x0d, 0x07, 0x00,
- 0x86, 0x6b, 0x0b, 0xf4, 0x0e, 0x07, 0x00,
- 0x86, 0x6b, 0x43, 0xbc, 0x0f, 0x07, 0x00,
- 0x86, 0x6b, 0x10, 0xef, 0x10, 0x07, 0x00,
- 0x86, 0x6b, 0x21, 0xde, 0x11, 0x07, 0x00,
- 0x86, 0x6b, 0x13, 0xec, 0x12, 0x07, 0x00,
- 0x86, 0x6b, 0x11, 0xee, 0x13, 0x07, 0x00,
- 0x86, 0x6b, 0x12, 0xed, 0x14, 0x07, 0x00,
- 0x86, 0x6b, 0x19, 0xe6, 0x15, 0x07, 0x00,
- 0x86, 0x6b, 0x1a, 0xe5, 0x16, 0x07, 0x00,
- 0x86, 0x6b, 0x1b, 0xe4, 0x17, 0x07, 0x00,
- 0x86, 0x6b, 0x4b, 0xb4, 0x18, 0x07, 0x00,
- 0x86, 0x6b, 0x40, 0xbf, 0x19, 0x07, 0x00,
- 0x86, 0x6b, 0x44, 0xbb, 0x1a, 0x07, 0x00,
- 0x86, 0x6b, 0x41, 0xbe, 0x1b, 0x07, 0x00,
- 0x86, 0x6b, 0x22, 0xdd, 0x1c, 0x07, 0x00,
- 0x86, 0x6b, 0x15, 0xea, 0x1d, 0x07, 0x00,
- 0x86, 0x6b, 0x0f, 0xf0, 0x3f, 0x07, 0x00,
- 0x86, 0x6b, 0x1c, 0xe3, 0x40, 0x07, 0x00,
- 0x86, 0x6b, 0x4a, 0xb5, 0x41, 0x07, 0x00,
- 0x86, 0x6b, 0x48, 0xb7, 0x42, 0x07, 0x00,
- 0x86, 0x6b, 0x49, 0xb6, 0x43, 0x07, 0x00,
- 0x86, 0x6b, 0x18, 0xe7, 0x44, 0x07, 0x00,
- 0x86, 0x6b, 0x23, 0xdc, 0x45, 0x07, 0x00,
-};
-
-/* AverMedia Volar X */
-static struct ir_scancode ir_codes_af9015_table_avermedia[] = {
- { 0x053d, KEY_PROG1 }, /* SOURCE */
- { 0x0512, KEY_POWER }, /* POWER */
- { 0x051e, KEY_1 }, /* 1 */
- { 0x051f, KEY_2 }, /* 2 */
- { 0x0520, KEY_3 }, /* 3 */
- { 0x0521, KEY_4 }, /* 4 */
- { 0x0522, KEY_5 }, /* 5 */
- { 0x0523, KEY_6 }, /* 6 */
- { 0x0524, KEY_7 }, /* 7 */
- { 0x0525, KEY_8 }, /* 8 */
- { 0x0526, KEY_9 }, /* 9 */
- { 0x053f, KEY_LEFT }, /* L / DISPLAY */
- { 0x0527, KEY_0 }, /* 0 */
- { 0x050f, KEY_RIGHT }, /* R / CH RTN */
- { 0x0518, KEY_PROG2 }, /* SNAP SHOT */
- { 0x051c, KEY_PROG3 }, /* 16-CH PREV */
- { 0x052d, KEY_VOLUMEDOWN }, /* VOL DOWN */
- { 0x053e, KEY_ZOOM }, /* FULL SCREEN */
- { 0x052e, KEY_VOLUMEUP }, /* VOL UP */
- { 0x0510, KEY_MUTE }, /* MUTE */
- { 0x0504, KEY_AUDIO }, /* AUDIO */
- { 0x0515, KEY_RECORD }, /* RECORD */
- { 0x0511, KEY_PLAY }, /* PLAY */
- { 0x0516, KEY_STOP }, /* STOP */
- { 0x050c, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */
- { 0x0505, KEY_BACK }, /* << / RED */
- { 0x0509, KEY_FORWARD }, /* >> / YELLOW */
- { 0x0517, KEY_TEXT }, /* TELETEXT */
- { 0x050a, KEY_EPG }, /* EPG */
- { 0x0513, KEY_MENU }, /* MENU */
-
- { 0x050e, KEY_CHANNELUP }, /* CH UP */
- { 0x050d, KEY_CHANNELDOWN }, /* CH DOWN */
- { 0x0519, KEY_FIRST }, /* |<< / GREEN */
- { 0x0508, KEY_LAST }, /* >>| / BLUE */
-};
-
-static u8 af9015_ir_table_avermedia[] = {
- 0x02, 0xfd, 0x00, 0xff, 0x12, 0x05, 0x00,
- 0x02, 0xfd, 0x01, 0xfe, 0x3d, 0x05, 0x00,
- 0x02, 0xfd, 0x03, 0xfc, 0x17, 0x05, 0x00,
- 0x02, 0xfd, 0x04, 0xfb, 0x0a, 0x05, 0x00,
- 0x02, 0xfd, 0x05, 0xfa, 0x1e, 0x05, 0x00,
- 0x02, 0xfd, 0x06, 0xf9, 0x1f, 0x05, 0x00,
- 0x02, 0xfd, 0x07, 0xf8, 0x20, 0x05, 0x00,
- 0x02, 0xfd, 0x09, 0xf6, 0x21, 0x05, 0x00,
- 0x02, 0xfd, 0x0a, 0xf5, 0x22, 0x05, 0x00,
- 0x02, 0xfd, 0x0b, 0xf4, 0x23, 0x05, 0x00,
- 0x02, 0xfd, 0x0d, 0xf2, 0x24, 0x05, 0x00,
- 0x02, 0xfd, 0x0e, 0xf1, 0x25, 0x05, 0x00,
- 0x02, 0xfd, 0x0f, 0xf0, 0x26, 0x05, 0x00,
- 0x02, 0xfd, 0x11, 0xee, 0x27, 0x05, 0x00,
- 0x02, 0xfd, 0x08, 0xf7, 0x04, 0x05, 0x00,
- 0x02, 0xfd, 0x0c, 0xf3, 0x3e, 0x05, 0x00,
- 0x02, 0xfd, 0x10, 0xef, 0x1c, 0x05, 0x00,
- 0x02, 0xfd, 0x12, 0xed, 0x3f, 0x05, 0x00,
- 0x02, 0xfd, 0x13, 0xec, 0x0f, 0x05, 0x00,
- 0x02, 0xfd, 0x14, 0xeb, 0x10, 0x05, 0x00,
- 0x02, 0xfd, 0x15, 0xea, 0x13, 0x05, 0x00,
- 0x02, 0xfd, 0x17, 0xe8, 0x18, 0x05, 0x00,
- 0x02, 0xfd, 0x18, 0xe7, 0x11, 0x05, 0x00,
- 0x02, 0xfd, 0x19, 0xe6, 0x15, 0x05, 0x00,
- 0x02, 0xfd, 0x1a, 0xe5, 0x0c, 0x05, 0x00,
- 0x02, 0xfd, 0x1b, 0xe4, 0x16, 0x05, 0x00,
- 0x02, 0xfd, 0x1c, 0xe3, 0x09, 0x05, 0x00,
- 0x02, 0xfd, 0x1d, 0xe2, 0x05, 0x05, 0x00,
- 0x02, 0xfd, 0x1e, 0xe1, 0x2d, 0x05, 0x00,
- 0x02, 0xfd, 0x1f, 0xe0, 0x2e, 0x05, 0x00,
- 0x03, 0xfc, 0x00, 0xff, 0x08, 0x05, 0x00,
- 0x03, 0xfc, 0x01, 0xfe, 0x19, 0x05, 0x00,
- 0x03, 0xfc, 0x02, 0xfd, 0x0d, 0x05, 0x00,
- 0x03, 0xfc, 0x03, 0xfc, 0x0e, 0x05, 0x00,
-};
-
-static u8 af9015_ir_table_avermedia_ks[] = {
- 0x05, 0xfa, 0x01, 0xfe, 0x12, 0x05, 0x00,
- 0x05, 0xfa, 0x02, 0xfd, 0x0e, 0x05, 0x00,
- 0x05, 0xfa, 0x03, 0xfc, 0x0d, 0x05, 0x00,
- 0x05, 0xfa, 0x04, 0xfb, 0x2e, 0x05, 0x00,
- 0x05, 0xfa, 0x05, 0xfa, 0x2d, 0x05, 0x00,
- 0x05, 0xfa, 0x06, 0xf9, 0x10, 0x05, 0x00,
- 0x05, 0xfa, 0x07, 0xf8, 0x0f, 0x05, 0x00,
- 0x05, 0xfa, 0x08, 0xf7, 0x3d, 0x05, 0x00,
- 0x05, 0xfa, 0x09, 0xf6, 0x1e, 0x05, 0x00,
- 0x05, 0xfa, 0x0a, 0xf5, 0x1f, 0x05, 0x00,
- 0x05, 0xfa, 0x0b, 0xf4, 0x20, 0x05, 0x00,
- 0x05, 0xfa, 0x0c, 0xf3, 0x21, 0x05, 0x00,
- 0x05, 0xfa, 0x0d, 0xf2, 0x22, 0x05, 0x00,
- 0x05, 0xfa, 0x0e, 0xf1, 0x23, 0x05, 0x00,
- 0x05, 0xfa, 0x0f, 0xf0, 0x24, 0x05, 0x00,
- 0x05, 0xfa, 0x10, 0xef, 0x25, 0x05, 0x00,
- 0x05, 0xfa, 0x11, 0xee, 0x26, 0x05, 0x00,
- 0x05, 0xfa, 0x12, 0xed, 0x27, 0x05, 0x00,
- 0x05, 0xfa, 0x13, 0xec, 0x04, 0x05, 0x00,
- 0x05, 0xfa, 0x15, 0xea, 0x0a, 0x05, 0x00,
- 0x05, 0xfa, 0x16, 0xe9, 0x11, 0x05, 0x00,
- 0x05, 0xfa, 0x17, 0xe8, 0x15, 0x05, 0x00,
- 0x05, 0xfa, 0x18, 0xe7, 0x16, 0x05, 0x00,
- 0x05, 0xfa, 0x1c, 0xe3, 0x05, 0x05, 0x00,
- 0x05, 0xfa, 0x1d, 0xe2, 0x09, 0x05, 0x00,
- 0x05, 0xfa, 0x4d, 0xb2, 0x3f, 0x05, 0x00,
- 0x05, 0xfa, 0x56, 0xa9, 0x3e, 0x05, 0x00
-};
-
-/* Digittrade DVB-T USB Stick */
-static struct ir_scancode ir_codes_af9015_table_digittrade[] = {
- { 0x010f, KEY_LAST }, /* RETURN */
- { 0x0517, KEY_TEXT }, /* TELETEXT */
- { 0x0108, KEY_EPG }, /* EPG */
- { 0x0513, KEY_POWER }, /* POWER */
- { 0x0109, KEY_ZOOM }, /* FULLSCREEN */
- { 0x0040, KEY_AUDIO }, /* DUAL SOUND */
- { 0x002c, KEY_PRINT }, /* SNAPSHOT */
- { 0x0516, KEY_SUBTITLE }, /* SUBTITLE */
- { 0x0052, KEY_CHANNELUP }, /* CH Up */
- { 0x0051, KEY_CHANNELDOWN },/* Ch Dn */
- { 0x0057, KEY_VOLUMEUP }, /* Vol Up */
- { 0x0056, KEY_VOLUMEDOWN }, /* Vol Dn */
- { 0x0110, KEY_MUTE }, /* MUTE */
- { 0x0027, KEY_0 },
- { 0x001e, KEY_1 },
- { 0x001f, KEY_2 },
- { 0x0020, KEY_3 },
- { 0x0021, KEY_4 },
- { 0x0022, KEY_5 },
- { 0x0023, KEY_6 },
- { 0x0024, KEY_7 },
- { 0x0025, KEY_8 },
- { 0x0026, KEY_9 },
- { 0x0117, KEY_PLAYPAUSE }, /* TIMESHIFT */
- { 0x0115, KEY_RECORD }, /* RECORD */
- { 0x0313, KEY_PLAY }, /* PLAY */
- { 0x0116, KEY_STOP }, /* STOP */
- { 0x0113, KEY_PAUSE }, /* PAUSE */
-};
-
-static u8 af9015_ir_table_digittrade[] = {
- 0x00, 0xff, 0x06, 0xf9, 0x13, 0x05, 0x00,
- 0x00, 0xff, 0x4d, 0xb2, 0x17, 0x01, 0x00,
- 0x00, 0xff, 0x1f, 0xe0, 0x2c, 0x00, 0x00,
- 0x00, 0xff, 0x0a, 0xf5, 0x15, 0x01, 0x00,
- 0x00, 0xff, 0x0e, 0xf1, 0x16, 0x01, 0x00,
- 0x00, 0xff, 0x09, 0xf6, 0x09, 0x01, 0x00,
- 0x00, 0xff, 0x01, 0xfe, 0x08, 0x01, 0x00,
- 0x00, 0xff, 0x05, 0xfa, 0x10, 0x01, 0x00,
- 0x00, 0xff, 0x02, 0xfd, 0x56, 0x00, 0x00,
- 0x00, 0xff, 0x40, 0xbf, 0x57, 0x00, 0x00,
- 0x00, 0xff, 0x19, 0xe6, 0x52, 0x00, 0x00,
- 0x00, 0xff, 0x17, 0xe8, 0x51, 0x00, 0x00,
- 0x00, 0xff, 0x10, 0xef, 0x0f, 0x01, 0x00,
- 0x00, 0xff, 0x54, 0xab, 0x27, 0x00, 0x00,
- 0x00, 0xff, 0x1b, 0xe4, 0x1e, 0x00, 0x00,
- 0x00, 0xff, 0x11, 0xee, 0x1f, 0x00, 0x00,
- 0x00, 0xff, 0x15, 0xea, 0x20, 0x00, 0x00,
- 0x00, 0xff, 0x12, 0xed, 0x21, 0x00, 0x00,
- 0x00, 0xff, 0x16, 0xe9, 0x22, 0x00, 0x00,
- 0x00, 0xff, 0x4c, 0xb3, 0x23, 0x00, 0x00,
- 0x00, 0xff, 0x48, 0xb7, 0x24, 0x00, 0x00,
- 0x00, 0xff, 0x04, 0xfb, 0x25, 0x00, 0x00,
- 0x00, 0xff, 0x00, 0xff, 0x26, 0x00, 0x00,
- 0x00, 0xff, 0x1e, 0xe1, 0x13, 0x03, 0x00,
- 0x00, 0xff, 0x1a, 0xe5, 0x13, 0x01, 0x00,
- 0x00, 0xff, 0x03, 0xfc, 0x17, 0x05, 0x00,
- 0x00, 0xff, 0x0d, 0xf2, 0x16, 0x05, 0x00,
- 0x00, 0xff, 0x1d, 0xe2, 0x40, 0x00, 0x00,
-};
-
-/* TREKSTOR DVB-T USB Stick */
-static struct ir_scancode ir_codes_af9015_table_trekstor[] = {
- { 0x0704, KEY_AGAIN }, /* Home */
- { 0x0705, KEY_MUTE }, /* Mute */
- { 0x0706, KEY_UP }, /* Up */
- { 0x0707, KEY_DOWN }, /* Down */
- { 0x0709, KEY_RIGHT }, /* Right */
- { 0x070a, KEY_ENTER }, /* OK */
- { 0x070b, KEY_FASTFORWARD }, /* Fast forward */
- { 0x070c, KEY_REWIND }, /* Rewind */
- { 0x070d, KEY_PLAY }, /* Play/Pause */
- { 0x070e, KEY_VOLUMEUP }, /* Volume + */
- { 0x070f, KEY_VOLUMEDOWN }, /* Volume - */
- { 0x0710, KEY_RECORD }, /* Record */
- { 0x0711, KEY_STOP }, /* Stop */
- { 0x0712, KEY_ZOOM }, /* TV */
- { 0x0713, KEY_EPG }, /* Info/EPG */
- { 0x0714, KEY_CHANNELDOWN }, /* Channel - */
- { 0x0715, KEY_CHANNELUP }, /* Channel + */
- { 0x071e, KEY_1 },
- { 0x071f, KEY_2 },
- { 0x0720, KEY_3 },
- { 0x0721, KEY_4 },
- { 0x0722, KEY_5 },
- { 0x0723, KEY_6 },
- { 0x0724, KEY_7 },
- { 0x0725, KEY_8 },
- { 0x0726, KEY_9 },
- { 0x0708, KEY_LEFT }, /* LEFT */
- { 0x0727, KEY_0 },
-};
-
-static u8 af9015_ir_table_trekstor[] = {
- 0x00, 0xff, 0x86, 0x79, 0x04, 0x07, 0x00,
- 0x00, 0xff, 0x85, 0x7a, 0x05, 0x07, 0x00,
- 0x00, 0xff, 0x87, 0x78, 0x06, 0x07, 0x00,
- 0x00, 0xff, 0x8c, 0x73, 0x07, 0x07, 0x00,
- 0x00, 0xff, 0x89, 0x76, 0x09, 0x07, 0x00,
- 0x00, 0xff, 0x88, 0x77, 0x0a, 0x07, 0x00,
- 0x00, 0xff, 0x8a, 0x75, 0x0b, 0x07, 0x00,
- 0x00, 0xff, 0x9e, 0x61, 0x0c, 0x07, 0x00,
- 0x00, 0xff, 0x8d, 0x72, 0x0d, 0x07, 0x00,
- 0x00, 0xff, 0x8b, 0x74, 0x0e, 0x07, 0x00,
- 0x00, 0xff, 0x9b, 0x64, 0x0f, 0x07, 0x00,
- 0x00, 0xff, 0x9d, 0x62, 0x10, 0x07, 0x00,
- 0x00, 0xff, 0x8e, 0x71, 0x11, 0x07, 0x00,
- 0x00, 0xff, 0x9c, 0x63, 0x12, 0x07, 0x00,
- 0x00, 0xff, 0x8f, 0x70, 0x13, 0x07, 0x00,
- 0x00, 0xff, 0x93, 0x6c, 0x14, 0x07, 0x00,
- 0x00, 0xff, 0x97, 0x68, 0x15, 0x07, 0x00,
- 0x00, 0xff, 0x92, 0x6d, 0x1e, 0x07, 0x00,
- 0x00, 0xff, 0x96, 0x69, 0x1f, 0x07, 0x00,
- 0x00, 0xff, 0x9a, 0x65, 0x20, 0x07, 0x00,
- 0x00, 0xff, 0x91, 0x6e, 0x21, 0x07, 0x00,
- 0x00, 0xff, 0x95, 0x6a, 0x22, 0x07, 0x00,
- 0x00, 0xff, 0x99, 0x66, 0x23, 0x07, 0x00,
- 0x00, 0xff, 0x90, 0x6f, 0x24, 0x07, 0x00,
- 0x00, 0xff, 0x94, 0x6b, 0x25, 0x07, 0x00,
- 0x00, 0xff, 0x98, 0x67, 0x26, 0x07, 0x00,
- 0x00, 0xff, 0x9f, 0x60, 0x08, 0x07, 0x00,
- 0x00, 0xff, 0x84, 0x7b, 0x27, 0x07, 0x00,
-};
-
-/* MSI DIGIVOX mini III */
-static struct ir_scancode ir_codes_af9015_table_msi_digivox_iii[] = {
- { 0x0713, KEY_POWER }, /* [red power button] */
- { 0x073b, KEY_VIDEO }, /* Source */
- { 0x073e, KEY_ZOOM }, /* Zoom */
- { 0x070b, KEY_POWER2 }, /* ShutDown */
- { 0x071e, KEY_1 },
- { 0x071f, KEY_2 },
- { 0x0720, KEY_3 },
- { 0x0721, KEY_4 },
- { 0x0722, KEY_5 },
- { 0x0723, KEY_6 },
- { 0x0724, KEY_7 },
- { 0x0725, KEY_8 },
- { 0x0726, KEY_9 },
- { 0x0727, KEY_0 },
- { 0x0752, KEY_CHANNELUP }, /* CH+ */
- { 0x0751, KEY_CHANNELDOWN }, /* CH- */
- { 0x0750, KEY_VOLUMEUP }, /* Vol+ */
- { 0x074f, KEY_VOLUMEDOWN }, /* Vol- */
- { 0x0705, KEY_ESC }, /* [back up arrow] */
- { 0x0708, KEY_OK }, /* [enter arrow] */
- { 0x073f, KEY_RECORD }, /* Rec */
- { 0x0716, KEY_STOP }, /* Stop */
- { 0x072a, KEY_PLAY }, /* Play */
- { 0x073c, KEY_MUTE }, /* Mute */
- { 0x0718, KEY_UP },
- { 0x0707, KEY_DOWN },
- { 0x070f, KEY_LEFT },
- { 0x0715, KEY_RIGHT },
- { 0x0736, KEY_RED },
- { 0x0737, KEY_GREEN },
- { 0x072d, KEY_YELLOW },
- { 0x072e, KEY_BLUE },
-};
-
-static u8 af9015_ir_table_msi_digivox_iii[] = {
- 0x61, 0xd6, 0x43, 0xbc, 0x13, 0x07, 0x00, /* KEY_POWER */
- 0x61, 0xd6, 0x01, 0xfe, 0x3b, 0x07, 0x00, /* KEY_VIDEO */
- 0x61, 0xd6, 0x0b, 0xf4, 0x3e, 0x07, 0x00, /* KEY_ZOOM */
- 0x61, 0xd6, 0x03, 0xfc, 0x0b, 0x07, 0x00, /* KEY_POWER2 */
- 0x61, 0xd6, 0x04, 0xfb, 0x1e, 0x07, 0x00, /* KEY_1 */
- 0x61, 0xd6, 0x08, 0xf7, 0x1f, 0x07, 0x00, /* KEY_2 */
- 0x61, 0xd6, 0x02, 0xfd, 0x20, 0x07, 0x00, /* KEY_3 */
- 0x61, 0xd6, 0x0f, 0xf0, 0x21, 0x07, 0x00, /* KEY_4 */
- 0x61, 0xd6, 0x05, 0xfa, 0x22, 0x07, 0x00, /* KEY_5 */
- 0x61, 0xd6, 0x06, 0xf9, 0x23, 0x07, 0x00, /* KEY_6 */
- 0x61, 0xd6, 0x0c, 0xf3, 0x24, 0x07, 0x00, /* KEY_7 */
- 0x61, 0xd6, 0x0d, 0xf2, 0x25, 0x07, 0x00, /* KEY_8 */
- 0x61, 0xd6, 0x0a, 0xf5, 0x26, 0x07, 0x00, /* KEY_9 */
- 0x61, 0xd6, 0x11, 0xee, 0x27, 0x07, 0x00, /* KEY_0 */
- 0x61, 0xd6, 0x09, 0xf6, 0x52, 0x07, 0x00, /* KEY_CHANNELUP */
- 0x61, 0xd6, 0x07, 0xf8, 0x51, 0x07, 0x00, /* KEY_CHANNELDOWN */
- 0x61, 0xd6, 0x0e, 0xf1, 0x50, 0x07, 0x00, /* KEY_VOLUMEUP */
- 0x61, 0xd6, 0x13, 0xec, 0x4f, 0x07, 0x00, /* KEY_VOLUMEDOWN */
- 0x61, 0xd6, 0x10, 0xef, 0x05, 0x07, 0x00, /* KEY_ESC */
- 0x61, 0xd6, 0x12, 0xed, 0x08, 0x07, 0x00, /* KEY_OK */
- 0x61, 0xd6, 0x14, 0xeb, 0x3f, 0x07, 0x00, /* KEY_RECORD */
- 0x61, 0xd6, 0x15, 0xea, 0x16, 0x07, 0x00, /* KEY_STOP */
- 0x61, 0xd6, 0x16, 0xe9, 0x2a, 0x07, 0x00, /* KEY_PLAY */
- 0x61, 0xd6, 0x17, 0xe8, 0x3c, 0x07, 0x00, /* KEY_MUTE */
- 0x61, 0xd6, 0x18, 0xe7, 0x18, 0x07, 0x00, /* KEY_UP */
- 0x61, 0xd6, 0x19, 0xe6, 0x07, 0x07, 0x00, /* KEY_DOWN */
- 0x61, 0xd6, 0x1a, 0xe5, 0x0f, 0x07, 0x00, /* KEY_LEFT */
- 0x61, 0xd6, 0x1b, 0xe4, 0x15, 0x07, 0x00, /* KEY_RIGHT */
- 0x61, 0xd6, 0x1c, 0xe3, 0x36, 0x07, 0x00, /* KEY_RED */
- 0x61, 0xd6, 0x1d, 0xe2, 0x37, 0x07, 0x00, /* KEY_GREEN */
- 0x61, 0xd6, 0x1e, 0xe1, 0x2d, 0x07, 0x00, /* KEY_YELLOW */
- 0x61, 0xd6, 0x1f, 0xe0, 0x2e, 0x07, 0x00, /* KEY_BLUE */
-};
-
#endif
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 4685259e1614..1759d26bca42 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -354,7 +354,7 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
{
struct anysee_state *state = adap->dev->priv;
- deb_info("%s: \n", __func__);
+ deb_info("%s:\n", __func__);
switch (state->tuner) {
case DVB_PLL_THOMSON_DTT7579:
@@ -374,78 +374,32 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int anysee_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int anysee_rc_query(struct dvb_usb_device *d)
{
u8 buf[] = {CMD_GET_IR_CODE};
- struct ir_scancode *keymap = d->props.rc.legacy.rc_key_map;
u8 ircode[2];
- int i, ret;
+ int ret;
+
+ /* Remote controller is basic NEC using address byte 0x08.
+ Anysee device RC query returns only two bytes, status and code,
+ address byte is dropped. Also it does not return any value for
+ NEC RCs having address byte other than 0x08. Due to that, we
+ cannot use that device as standard NEC receiver.
+ It could be possible make hack which reads whole code directly
+ from device memory... */
- ret = anysee_ctrl_msg(d, buf, sizeof(buf), &ircode[0], 2);
+ ret = anysee_ctrl_msg(d, buf, sizeof(buf), ircode, sizeof(ircode));
if (ret)
return ret;
- *event = 0;
- *state = REMOTE_NO_KEY_PRESSED;
-
- for (i = 0; i < d->props.rc.legacy.rc_key_map_size; i++) {
- if (rc5_custom(&keymap[i]) == ircode[0] &&
- rc5_data(&keymap[i]) == ircode[1]) {
- *event = keymap[i].keycode;
- *state = REMOTE_KEY_PRESSED;
- return 0;
- }
+ if (ircode[0]) {
+ deb_rc("%s: key pressed %02x\n", __func__, ircode[1]);
+ ir_keydown(d->rc_input_dev, 0x08 << 8 | ircode[1], 0);
}
+
return 0;
}
-static struct ir_scancode ir_codes_anysee_table[] = {
- { 0x0100, KEY_0 },
- { 0x0101, KEY_1 },
- { 0x0102, KEY_2 },
- { 0x0103, KEY_3 },
- { 0x0104, KEY_4 },
- { 0x0105, KEY_5 },
- { 0x0106, KEY_6 },
- { 0x0107, KEY_7 },
- { 0x0108, KEY_8 },
- { 0x0109, KEY_9 },
- { 0x010a, KEY_POWER },
- { 0x010b, KEY_DOCUMENTS }, /* * */
- { 0x0119, KEY_FAVORITES },
- { 0x0120, KEY_SLEEP },
- { 0x0121, KEY_MODE }, /* 4:3 / 16:9 select */
- { 0x0122, KEY_ZOOM },
- { 0x0147, KEY_TEXT },
- { 0x0116, KEY_TV }, /* TV / radio select */
- { 0x011e, KEY_LANGUAGE }, /* Second Audio Program */
- { 0x011a, KEY_SUBTITLE },
- { 0x011b, KEY_CAMERA }, /* screenshot */
- { 0x0142, KEY_MUTE },
- { 0x010e, KEY_MENU },
- { 0x010f, KEY_EPG },
- { 0x0117, KEY_INFO },
- { 0x0110, KEY_EXIT },
- { 0x0113, KEY_VOLUMEUP },
- { 0x0112, KEY_VOLUMEDOWN },
- { 0x0111, KEY_CHANNELUP },
- { 0x0114, KEY_CHANNELDOWN },
- { 0x0115, KEY_OK },
- { 0x011d, KEY_RED },
- { 0x011f, KEY_GREEN },
- { 0x011c, KEY_YELLOW },
- { 0x0144, KEY_BLUE },
- { 0x010c, KEY_SHUFFLE }, /* snapshot */
- { 0x0148, KEY_STOP },
- { 0x0150, KEY_PLAY },
- { 0x0151, KEY_PAUSE },
- { 0x0149, KEY_RECORD },
- { 0x0118, KEY_PREVIOUS }, /* |<< */
- { 0x010d, KEY_NEXT }, /* >>| */
- { 0x0124, KEY_PROG1 }, /* F1 */
- { 0x0125, KEY_PROG2 }, /* F2 */
-};
-
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties anysee_properties;
@@ -520,11 +474,12 @@ static struct dvb_usb_device_properties anysee_properties = {
}
},
- .rc.legacy = {
- .rc_key_map = ir_codes_anysee_table,
- .rc_key_map_size = ARRAY_SIZE(ir_codes_anysee_table),
+ .rc.core = {
+ .rc_codes = RC_MAP_ANYSEE,
+ .protocol = IR_TYPE_OTHER,
+ .module_name = "anysee",
.rc_query = anysee_rc_query,
- .rc_interval = 200, /* windows driver uses 500ms */
+ .rc_interval = 250, /* windows driver uses 500ms */
},
.i2c_algo = &anysee_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
index cead089bbb4f..88e4a62abc44 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
@@ -20,7 +20,6 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
}
strlcpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name));
- d->i2c_adap.class = I2C_CLASS_TV_DIGITAL,
d->i2c_adap.algo = d->props.i2c_algo;
d->i2c_adap.algo_data = NULL;
d->i2c_adap.dev.parent = &d->udev->dev;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 1a774d58d664..192a40ce583d 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -32,6 +32,7 @@
#define USB_VID_EMPIA 0xeb1a
#define USB_VID_GENPIX 0x09c0
#define USB_VID_GRANDTEC 0x5032
+#define USB_VID_GTEK 0x1f4d
#define USB_VID_HANFTEK 0x15f4
#define USB_VID_HAUPPAUGE 0x2040
#define USB_VID_HYPER_PALTEK 0x1025
@@ -133,6 +134,8 @@
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
+#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
+#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
#define USB_PID_TWINHAN_VP7041_WARM 0x3202
#define USB_PID_TWINHAN_VP7020_COLD 0x3203
@@ -143,6 +146,7 @@
#define USB_PID_TWINHAN_VP7021_WARM 0x3208
#define USB_PID_TINYTWIN 0x3226
#define USB_PID_TINYTWIN_2 0xe402
+#define USB_PID_TINYTWIN_3 0x9016
#define USB_PID_DNTV_TINYUSB2_COLD 0x3223
#define USB_PID_DNTV_TINYUSB2_WARM 0x3224
#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
@@ -196,6 +200,7 @@
#define USB_PID_AVERMEDIA_A309 0xa309
#define USB_PID_AVERMEDIA_A310 0xa310
#define USB_PID_AVERMEDIA_A850 0x850a
+#define USB_PID_AVERMEDIA_A850T 0x850b
#define USB_PID_AVERMEDIA_A805 0xa805
#define USB_PID_AVERMEDIA_A815M 0x815a
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
@@ -268,6 +273,7 @@
#define USB_PID_GENPIX_8PSK_REV_2 0x0202
#define USB_PID_GENPIX_SKYWALKER_1 0x0203
#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
+#define USB_PID_GENPIX_SKYWALKER_2 0x0206
#define USB_PID_SIGMATEK_DVB_110 0x6610
#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513
#define USB_PID_MSI_DIGIVOX_DUO 0x8801
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index 93c21ddd0b77..015b4e8af1a5 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -75,7 +75,7 @@ static int jdvbt90502_single_reg_write(struct jdvbt90502_state *state,
return 0;
}
-static int _jdvbt90502_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int _jdvbt90502_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct jdvbt90502_state *state = fe->demodulator_priv;
int err, i;
diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
index dbdb5347b2a8..60d11e57e7d0 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
@@ -109,7 +109,7 @@ static int gp8psk_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength
static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
{
- tune->min_delay_ms = 200;
+ tune->min_delay_ms = 800;
return 0;
}
@@ -334,7 +334,7 @@ success:
static struct dvb_frontend_ops gp8psk_fe_ops = {
.info = {
- .name = "Genpix 8psk-to-USB2 DVB-S",
+ .name = "Genpix DVB-S",
.type = FE_QPSK,
.frequency_min = 800000,
.frequency_max = 2250000,
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 45106ac49674..c821293dbc22 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -227,6 +227,7 @@ static struct usb_device_id gp8psk_usb_table [] = {
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_1_WARM) },
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_2) },
{ USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_1) },
+ { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_2) },
/* { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_CW3K) }, */
{ 0 },
};
@@ -258,7 +259,7 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
- .num_device_descs = 3,
+ .num_device_descs = 4,
.devices = {
{ .name = "Genpix 8PSK-to-USB2 Rev.1 DVB-S receiver",
.cold_ids = { &gp8psk_usb_table[0], NULL },
@@ -272,6 +273,10 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.cold_ids = { NULL },
.warm_ids = { &gp8psk_usb_table[3], NULL },
},
+ { .name = "Genpix SkyWalker-2 DVB-S receiver",
+ .cold_ids = { NULL },
+ .warm_ids = { &gp8psk_usb_table[4], NULL },
+ },
{ NULL },
}
};
@@ -306,6 +311,6 @@ module_init(gp8psk_usb_module_init);
module_exit(gp8psk_usb_module_exit);
MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
-MODULE_DESCRIPTION("Driver for Genpix 8psk-to-USB2 DVB-S");
+MODULE_DESCRIPTION("Driver for Genpix DVB-S");
MODULE_VERSION("1.1");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
new file mode 100644
index 000000000000..d939fbbf9fe6
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -0,0 +1,1088 @@
+/* DVB USB compliant linux driver for
+ *
+ * DM04/QQBOX DVB-S USB BOX LME2510C + SHARP:BS2F7HZ7395
+ * LME2510C + LG TDQY-P001F
+ * LME2510 + LG TDQY-P001F
+ *
+ * MVB7395 (LME2510C+SHARP:BS2F7HZ7395)
+ * SHARP:BS2F7HZ7395 = (STV0288+Sharp IX2505V)
+ *
+ * MV001F (LME2510+LGTDQY-P001F)
+ * LG TDQY - P001F =(TDA8263 + TDA10086H)
+ *
+ * MVB0001F (LME2510C+LGTDQT-P001F)
+ *
+ * For firmware see Documentation/dvb/lmedm04.txt
+ *
+ * I2C addresses:
+ * 0xd0 - STV0288 - Demodulator
+ * 0xc0 - Sharp IX2505V - Tuner
+ * --or--
+ * 0x1c - TDA10086 - Demodulator
+ * 0xc0 - TDA8263 - Tuner
+ *
+ * ***Please Note***
+ * There are other variants of the DM04
+ * ***NOT SUPPORTED***
+ * MV0194 (LME2510+SHARP0194)
+ * MVB0194 (LME2510C+SHARP0194)
+ *
+ *
+ * VID = 3344 PID LME2510=1122 LME2510C=1120
+ *
+ * Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
+ * LME2510(C)(C) Leaguerme (Shenzhen) MicroElectronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ *
+ * Known Issues :
+ * LME2510: Non Intel USB chipsets fail to maintain High Speed on
+ * Boot or Hot Plug.
+ *
+ * QQbox suffers from noise on LNB voltage.
+ *
+ * PID functions have been removed from this driver version due to
+ * problems with different firmware and application versions.
+ */
+#define DVB_USB_LOG_PREFIX "LME2510(C)"
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <media/ir-core.h>
+
+#include "dvb-usb.h"
+#include "lmedm04.h"
+#include "tda826x.h"
+#include "tda10086.h"
+#include "stv0288.h"
+#include "ix2505v.h"
+
+
+
+/* debug */
+static int dvb_usb_lme2510_debug;
+#define l_dprintk(var, level, args...) do { \
+ if ((var >= level)) \
+ printk(KERN_DEBUG DVB_USB_LOG_PREFIX ": " args); \
+} while (0)
+
+#define deb_info(level, args...) l_dprintk(dvb_usb_lme2510_debug, level, args)
+#define debug_data_snipet(level, name, p) \
+ deb_info(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \
+ *p, *(p+1), *(p+2), *(p+3), *(p+4), \
+ *(p+5), *(p+6), *(p+7));
+
+
+module_param_named(debug, dvb_usb_lme2510_debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."
+ DVB_USB_DEBUG_STATUS);
+
+static int dvb_usb_lme2510_firmware;
+module_param_named(firmware, dvb_usb_lme2510_firmware, int, 0644);
+MODULE_PARM_DESC(firmware, "set default firmware 0=Sharp7395 1=LG");
+
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+#define TUNER_LG 0x1
+#define TUNER_S7395 0x2
+
+struct lme2510_state {
+ u8 id;
+ u8 tuner_config;
+ u8 signal_lock;
+ u8 signal_level;
+ u8 signal_sn;
+ u8 time_key;
+ u8 i2c_talk_onoff;
+ u8 i2c_gate;
+ u8 i2c_tuner_gate_w;
+ u8 i2c_tuner_gate_r;
+ u8 i2c_tuner_addr;
+ u8 stream_on;
+ u8 one_tune;
+ void *buffer;
+ struct urb *lme_urb;
+ void *usb_buffer;
+
+};
+
+static int lme2510_bulk_write(struct usb_device *dev,
+ u8 *snd, int len, u8 pipe)
+{
+ int ret, actual_l;
+
+ ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
+ snd, len , &actual_l, 500);
+ return ret;
+}
+
+static int lme2510_bulk_read(struct usb_device *dev,
+ u8 *rev, int len, u8 pipe)
+{
+ int ret, actual_l;
+
+ ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
+ rev, len , &actual_l, 500);
+ return ret;
+}
+
+static int lme2510_usb_talk(struct dvb_usb_device *d,
+ u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ struct lme2510_state *st = d->priv;
+ u8 *buff;
+ int ret = 0;
+
+ if (st->usb_buffer == NULL) {
+ st->usb_buffer = kmalloc(512, GFP_KERNEL);
+ if (st->usb_buffer == NULL) {
+ info("MEM Error no memory");
+ return -ENOMEM;
+ }
+ }
+ buff = st->usb_buffer;
+
+ /* the read/write capped at 512 */
+ memcpy(buff, wbuf, (wlen > 512) ? 512 : wlen);
+
+ ret = mutex_lock_interruptible(&d->usb_mutex);
+
+ if (ret < 0)
+ return -EAGAIN;
+
+ ret |= usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, 0x01));
+
+ ret |= lme2510_bulk_write(d->udev, buff, wlen , 0x01);
+
+ msleep(12);
+
+ ret |= usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, 0x01));
+
+ ret |= lme2510_bulk_read(d->udev, buff, (rlen > 512) ?
+ 512 : rlen , 0x01);
+
+ if (rlen > 0)
+ memcpy(rbuf, buff, rlen);
+
+ mutex_unlock(&d->usb_mutex);
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int lme2510_usb_talk_restart(struct dvb_usb_device *d,
+ u8 *wbuf, int wlen, u8 *rbuf, int rlen) {
+ static u8 stream_on[] = LME_ST_ON_W;
+ int ret;
+ u8 rbuff[10];
+ /*Send Normal Command*/
+ ret = lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
+ /*Restart Stream Command*/
+ ret |= lme2510_usb_talk(d, stream_on, sizeof(stream_on),
+ rbuff, sizeof(rbuff));
+ return ret;
+}
+static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u16 keypress)
+{
+ struct dvb_usb_device *d = adap->dev;
+
+ deb_info(1, "INT Key Keypress =%04x", keypress);
+
+ if (keypress > 0)
+ ir_keydown(d->rc_input_dev, keypress, 0);
+
+ return 0;
+}
+
+static void lme2510_int_response(struct urb *lme_urb)
+{
+ struct dvb_usb_adapter *adap = lme_urb->context;
+ struct lme2510_state *st = adap->dev->priv;
+ static u8 *ibuf, *rbuf;
+ int i = 0, offset;
+
+ switch (lme_urb->status) {
+ case 0:
+ case -ETIMEDOUT:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default:
+ info("Error %x", lme_urb->status);
+ break;
+ }
+
+ rbuf = (u8 *) lme_urb->transfer_buffer;
+
+ offset = ((lme_urb->actual_length/8) > 4)
+ ? 4 : (lme_urb->actual_length/8) ;
+
+ for (i = 0; i < offset; ++i) {
+ ibuf = (u8 *)&rbuf[i*8];
+ deb_info(5, "INT O/S C =%02x C/O=%02x Type =%02x%02x",
+ offset, i, ibuf[0], ibuf[1]);
+
+ switch (ibuf[0]) {
+ case 0xaa:
+ debug_data_snipet(1, "INT Remote data snipet in", ibuf);
+ lme2510_remote_keypress(adap,
+ (u16)(ibuf[4]<<8)+ibuf[5]);
+ break;
+ case 0xbb:
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ if (ibuf[2] > 0)
+ st->signal_lock = ibuf[2];
+ st->signal_level = ibuf[4];
+ st->signal_sn = ibuf[3];
+ st->time_key = ibuf[7];
+ break;
+ case TUNER_S7395:
+ /* Tweak for earlier firmware*/
+ if (ibuf[1] == 0x03) {
+ st->signal_level = ibuf[3];
+ st->signal_sn = ibuf[4];
+ } else {
+ st->signal_level = ibuf[4];
+ st->signal_sn = ibuf[5];
+ }
+ break;
+ default:
+ break;
+ }
+ debug_data_snipet(5, "INT Remote data snipet in", ibuf);
+ break;
+ case 0xcc:
+ debug_data_snipet(1, "INT Control data snipet", ibuf);
+ break;
+ default:
+ debug_data_snipet(1, "INT Unknown data snipet", ibuf);
+ break;
+ }
+ }
+ usb_submit_urb(lme_urb, GFP_ATOMIC);
+}
+
+static int lme2510_int_read(struct dvb_usb_adapter *adap)
+{
+ struct lme2510_state *lme_int = adap->dev->priv;
+
+ lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
+
+ if (lme_int->lme_urb == NULL)
+ return -ENOMEM;
+
+ lme_int->buffer = usb_alloc_coherent(adap->dev->udev, 5000, GFP_ATOMIC,
+ &lme_int->lme_urb->transfer_dma);
+
+ if (lme_int->buffer == NULL)
+ return -ENOMEM;
+
+ usb_fill_int_urb(lme_int->lme_urb,
+ adap->dev->udev,
+ usb_rcvintpipe(adap->dev->udev, 0xa),
+ lme_int->buffer,
+ 4096,
+ lme2510_int_response,
+ adap,
+ 11);
+
+ lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+ info("INT Interupt Service Started");
+
+ return 0;
+}
+
+static int lme2510_return_status(struct usb_device *dev)
+{
+ int ret = 0;
+ u8 data[10] = {0};
+
+ ret |= usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
+ info("Firmware Status: %x (%x)", ret , data[2]);
+
+ return (ret < 0) ? -ENODEV : data[2];
+}
+
+static int lme2510_msg(struct dvb_usb_device *d,
+ u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ int ret = 0;
+ struct lme2510_state *st = d->priv;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ if (st->i2c_talk_onoff == 1) {
+
+ ret = lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen);
+
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ if (wbuf[2] == 0x1c) {
+ if (wbuf[3] == 0x0e) {
+ st->signal_lock = rbuf[1];
+ if ((st->stream_on & 1) &&
+ (st->signal_lock & 0x10)) {
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ st->i2c_talk_onoff = 0;
+ }
+ msleep(80);
+ }
+ }
+ break;
+ case TUNER_S7395:
+ if (wbuf[2] == 0xd0) {
+ if (wbuf[3] == 0x24) {
+ st->signal_lock = rbuf[1];
+ if ((st->stream_on & 1) &&
+ (st->signal_lock & 0x8)) {
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ st->i2c_talk_onoff = 0;
+ }
+ }
+ if ((wbuf[3] != 0x6) & (wbuf[3] != 0x5))
+ msleep(5);
+
+
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (st->tuner_config) {
+ case TUNER_LG:
+ switch (wbuf[3]) {
+ case 0x0e:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_lock;
+ break;
+ case 0x43:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_level;
+ break;
+ case 0x1c:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_sn;
+ break;
+ /*DiSEqC functions as per TDA10086*/
+ case 0x36:
+ case 0x48:
+ case 0x49:
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ if (wbuf[2] == 0x1c)
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ default:
+ break;
+ }
+ break;
+ case TUNER_S7395:
+ switch (wbuf[3]) {
+ case 0x10:
+ rbuf[0] = 0x55;
+ rbuf[1] = (st->signal_level & 0x80)
+ ? 0 : (st->signal_level * 2);
+ break;
+ case 0x2d:
+ rbuf[0] = 0x55;
+ rbuf[1] = st->signal_sn;
+ break;
+ case 0x24:
+ rbuf[0] = 0x55;
+ rbuf[1] = (st->signal_level & 0x80)
+ ? 0 : st->signal_lock;
+ break;
+ case 0x6:
+ if (wbuf[2] == 0xd0)
+ lme2510_usb_talk(d,
+ wbuf, wlen, rbuf, rlen);
+ break;
+ case 0x1:
+ if (st->one_tune > 0)
+ break;
+ st->one_tune++;
+ st->i2c_talk_onoff = 1;
+ /*DiSEqC functions as per STV0288*/
+ case 0x5:
+ case 0x7:
+ case 0x8:
+ case 0x9:
+ case 0xa:
+ case 0xb:
+ if (wbuf[2] == 0xd0)
+ lme2510_usb_talk_restart(d,
+ wbuf, wlen, rbuf, rlen);
+ break;
+ default:
+ rbuf[0] = 0x55;
+ rbuf[1] = 0x00;
+ break;
+ }
+ break;
+ default:
+ break;
+
+ }
+
+ deb_info(4, "I2C From Interupt Message out(%02x) in(%02x)",
+ wbuf[3], rbuf[1]);
+
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+
+ return ret;
+}
+
+
+static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct lme2510_state *st = d->priv;
+ static u8 obuf[64], ibuf[512];
+ int i, read, read_o;
+ u16 len;
+ u8 gate = st->i2c_gate;
+
+ if (gate == 0)
+ gate = 5;
+
+ if (num > 2)
+ warn("more than 2 i2c messages"
+ "at a time is not handled yet. TODO.");
+
+ for (i = 0; i < num; i++) {
+ read_o = 1 & (msg[i].flags & I2C_M_RD);
+ read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
+ read |= read_o;
+ gate = (msg[i].addr == st->i2c_tuner_addr)
+ ? (read) ? st->i2c_tuner_gate_r
+ : st->i2c_tuner_gate_w
+ : st->i2c_gate;
+ obuf[0] = gate | (read << 7);
+
+ if (gate == 5)
+ obuf[1] = (read) ? 2 : msg[i].len + 1;
+ else
+ obuf[1] = msg[i].len + read + 1;
+
+ obuf[2] = msg[i].addr;
+ if (read) {
+ if (read_o)
+ len = 3;
+ else {
+ memcpy(&obuf[3], msg[i].buf, msg[i].len);
+ obuf[msg[i].len+3] = msg[i+1].len;
+ len = msg[i].len+4;
+ }
+ } else {
+ memcpy(&obuf[3], msg[i].buf, msg[i].len);
+ len = msg[i].len+3;
+ }
+
+ if (lme2510_msg(d, obuf, len, ibuf, 512) < 0) {
+ deb_info(1, "i2c transfer failed.");
+ return -EAGAIN;
+ }
+
+ if (read) {
+ if (read_o)
+ memcpy(msg[i].buf, &ibuf[1], msg[i].len);
+ else {
+ memcpy(msg[i+1].buf, &ibuf[1], msg[i+1].len);
+ i++;
+ }
+ }
+ }
+ return i;
+}
+
+static u32 lme2510_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm lme2510_i2c_algo = {
+ .master_xfer = lme2510_i2c_xfer,
+ .functionality = lme2510_i2c_func,
+};
+
+/* Callbacks for DVB USB */
+static int lme2510_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
+{
+ if (lme2510_return_status(udev) == 0x44)
+ *cold = 1;
+ else
+ *cold = 0;
+ return 0;
+}
+
+static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ struct lme2510_state *st = adap->dev->priv;
+ static u8 stream_on[] = LME_ST_ON_W;
+ static u8 clear_reg_3[] = LME_CLEAR_PID;
+ static u8 rbuf[1];
+ static u8 timeout;
+ int ret = 0, len = 2, rlen = sizeof(rbuf);
+
+ deb_info(1, "STM (%02x)", onoff);
+
+ if (onoff == 1) {
+ st->i2c_talk_onoff = 0;
+ timeout = 0;
+ /* wait for i2C to be free */
+ while (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0) {
+ timeout++;
+ if (timeout > 5)
+ return -ENODEV;
+ }
+ msleep(100);
+ ret |= lme2510_usb_talk(adap->dev,
+ stream_on, len, rbuf, rlen);
+ st->stream_on = 1;
+ st->one_tune = 0;
+ mutex_unlock(&adap->dev->i2c_mutex);
+ } else {
+ deb_info(1, "STM Steam Off");
+ ret |= lme2510_usb_talk(adap->dev, clear_reg_3,
+ sizeof(clear_reg_3), rbuf, rlen);
+ st->stream_on = 0;
+ st->i2c_talk_onoff = 1;
+ }
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int lme2510_int_service(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct input_dev *input_dev;
+ char *ir_codes = RC_MAP_LME2510;
+ int ret = 0;
+
+ info("STA Configuring Remote");
+
+ usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
+
+ strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ input_dev->name = "LME2510 Remote Control";
+ input_dev->phys = d->rc_phys;
+
+ usb_to_input_id(d->udev, &input_dev->id);
+
+ ret |= ir_input_register(input_dev, ir_codes, NULL, "LME 2510");
+
+ if (ret) {
+ input_free_device(input_dev);
+ return ret;
+ }
+
+ d->rc_input_dev = input_dev;
+ /* Start the Interupt */
+ ret = lme2510_int_read(adap);
+
+ if (ret < 0) {
+ ir_input_unregister(input_dev);
+ input_free_device(input_dev);
+ }
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static u8 check_sum(u8 *p, u8 len)
+{
+ u8 sum = 0;
+ while (len--)
+ sum += *p++;
+ return sum;
+}
+
+static int lme2510_download_firmware(struct usb_device *dev,
+ const struct firmware *fw)
+{
+ int ret = 0;
+ u8 data[512] = {0};
+ u16 j, wlen, len_in, start, end;
+ u8 packet_size, dlen, i;
+ u8 *fw_data;
+
+ packet_size = 0x31;
+ len_in = 1;
+
+
+ info("FRM Starting Firmware Download");
+
+ for (i = 1; i < 3; i++) {
+ start = (i == 1) ? 0 : 512;
+ end = (i == 1) ? 512 : fw->size;
+ for (j = start; j < end; j += (packet_size+1)) {
+ fw_data = (u8 *)(fw->data + j);
+ if ((end - j) > packet_size) {
+ data[0] = i;
+ dlen = packet_size;
+ } else {
+ data[0] = i | 0x80;
+ dlen = (u8)(end - j)-1;
+ }
+ data[1] = dlen;
+ memcpy(&data[2], fw_data, dlen+1);
+ wlen = (u8) dlen + 4;
+ data[wlen-1] = check_sum(fw_data, dlen+1);
+ deb_info(1, "Data S=%02x:E=%02x CS= %02x", data[3],
+ data[dlen+2], data[dlen+3]);
+ ret |= lme2510_bulk_write(dev, data, wlen, 1);
+ ret |= lme2510_bulk_read(dev, data, len_in , 1);
+ ret |= (data[0] == 0x88) ? 0 : -1;
+ }
+ }
+ usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
+
+
+ data[0] = 0x8a;
+ len_in = 1;
+ msleep(2000);
+ ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Resetting*/
+ ret |= lme2510_bulk_read(dev, data, len_in, 1);
+ msleep(400);
+
+ if (ret < 0)
+ info("FRM Firmware Download Failed (%04x)" , ret);
+ else
+ info("FRM Firmware Download Completed - Resetting Device");
+
+
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+/* Default firmware for LME2510C */
+const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
+
+static void lme_coldreset(struct usb_device *dev)
+{
+ int ret = 0, len_in;
+ u8 data[512] = {0};
+
+ data[0] = 0x0a;
+ len_in = 1;
+ info("FRM Firmware Cold Reset");
+ ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Cold Resetting*/
+ ret |= lme2510_bulk_read(dev, data, len_in, 1);
+ return;
+}
+
+static void lme_firmware_switch(struct usb_device *udev, int cold)
+{
+ const struct firmware *fw = NULL;
+ char lme2510c_s7395[] = "dvb-usb-lme2510c-s7395.fw";
+ char lme2510c_lg[] = "dvb-usb-lme2510c-lg.fw";
+ char *firm_msg[] = {"Loading", "Switching to"};
+ int ret;
+
+ if (udev->descriptor.idProduct == 0x1122)
+ return;
+
+ switch (dvb_usb_lme2510_firmware) {
+ case 0:
+ default:
+ memcpy(&lme_firmware, lme2510c_s7395, sizeof(lme2510c_s7395));
+ ret = request_firmware(&fw, lme_firmware, &udev->dev);
+ if (ret == 0) {
+ info("FRM %s S7395 Firmware", firm_msg[cold]);
+ break;
+ }
+ if (cold == 0)
+ dvb_usb_lme2510_firmware = 1;
+ else
+ cold = 0;
+ case 1:
+ memcpy(&lme_firmware, lme2510c_lg, sizeof(lme2510c_lg));
+ ret = request_firmware(&fw, lme_firmware, &udev->dev);
+ if (ret == 0) {
+ info("FRM %s LG Firmware", firm_msg[cold]);
+ break;
+ }
+ info("FRM No Firmware Found - please install");
+ dvb_usb_lme2510_firmware = 0;
+ cold = 0;
+ break;
+ }
+ release_firmware(fw);
+ if (cold)
+ lme_coldreset(udev);
+ return;
+}
+
+static int lme2510_kill_urb(struct usb_data_stream *stream)
+{
+ int i;
+ for (i = 0; i < stream->urbs_submitted; i++) {
+ deb_info(3, "killing URB no. %d.", i);
+
+ /* stop the URB */
+ usb_kill_urb(stream->urb_list[i]);
+ }
+ stream->urbs_submitted = 0;
+ return 0;
+}
+
+static struct tda10086_config tda10086_config = {
+ .demod_address = 0x1c,
+ .invert = 0,
+ .diseqc_tone = 1,
+ .xtal_freq = TDA10086_XTAL_16M,
+};
+
+static struct stv0288_config lme_config = {
+ .demod_address = 0xd0,
+ .min_delay_ms = 15,
+ .inittab = s7395_inittab,
+};
+
+static struct ix2505v_config lme_tuner = {
+ .tuner_address = 0xc0,
+ .min_delay_ms = 100,
+ .tuner_gain = 0x0,
+ .tuner_chargepump = 0x3,
+};
+
+static int dm04_lme2510_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct lme2510_state *st = adap->dev->priv;
+ static u8 voltage_low[] = LME_VOLTAGE_L;
+ static u8 voltage_high[] = LME_VOLTAGE_H;
+ static u8 lnb_on[] = LNB_ON;
+ static u8 lnb_off[] = LNB_OFF;
+ static u8 rbuf[1];
+ int ret = 0, len = 3, rlen = 1;
+
+ if (st->stream_on == 1)
+ return 0;
+
+ ret |= lme2510_usb_talk(adap->dev, lnb_on, len, rbuf, rlen);
+
+ switch (voltage) {
+ case SEC_VOLTAGE_18:
+ ret |= lme2510_usb_talk(adap->dev,
+ voltage_high, len, rbuf, rlen);
+ break;
+
+ case SEC_VOLTAGE_OFF:
+ ret |= lme2510_usb_talk(adap->dev,
+ lnb_off, len, rbuf, rlen);
+ case SEC_VOLTAGE_13:
+ default:
+ ret |= lme2510_usb_talk(adap->dev,
+ voltage_low, len, rbuf, rlen);
+ break;
+
+
+ };
+ st->i2c_talk_onoff = 1;
+ return (ret < 0) ? -ENODEV : 0;
+}
+
+static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ int ret = 0;
+ struct lme2510_state *st = adap->dev->priv;
+
+ /* Interupt Start */
+ ret = lme2510_int_service(adap);
+ if (ret < 0) {
+ info("INT Unable to start Interupt Service");
+ return -ENODEV;
+ }
+
+ st->i2c_talk_onoff = 1;
+ st->i2c_gate = 4;
+
+ adap->fe = dvb_attach(tda10086_attach, &tda10086_config,
+ &adap->dev->i2c_adap);
+
+ if (adap->fe) {
+ info("TUN Found Frontend TDA10086");
+ memcpy(&adap->fe->ops.info.name,
+ &"DM04_LG_TDQY-P001F DVB-S", 24);
+ adap->fe->ops.set_voltage = dm04_lme2510_set_voltage;
+ st->i2c_tuner_gate_w = 4;
+ st->i2c_tuner_gate_r = 4;
+ st->i2c_tuner_addr = 0xc0;
+ if (dvb_attach(tda826x_attach, adap->fe, 0xc0,
+ &adap->dev->i2c_adap, 1)) {
+ info("TUN TDA8263 Found");
+ st->tuner_config = TUNER_LG;
+ if (dvb_usb_lme2510_firmware != 1) {
+ dvb_usb_lme2510_firmware = 1;
+ lme_firmware_switch(adap->dev->udev, 1);
+ }
+ return 0;
+ }
+ kfree(adap->fe);
+ adap->fe = NULL;
+ }
+ st->i2c_gate = 5;
+ adap->fe = dvb_attach(stv0288_attach, &lme_config,
+ &adap->dev->i2c_adap);
+
+ if (adap->fe) {
+ info("FE Found Stv0288");
+ memcpy(&adap->fe->ops.info.name,
+ &"DM04_SHARP:BS2F7HZ7395", 22);
+ adap->fe->ops.set_voltage = dm04_lme2510_set_voltage;
+ st->i2c_tuner_gate_w = 4;
+ st->i2c_tuner_gate_r = 5;
+ st->i2c_tuner_addr = 0xc0;
+ if (dvb_attach(ix2505v_attach , adap->fe, &lme_tuner,
+ &adap->dev->i2c_adap)) {
+ st->tuner_config = TUNER_S7395;
+ info("TUN Sharp IX2505V silicon tuner");
+ if (dvb_usb_lme2510_firmware != 0) {
+ dvb_usb_lme2510_firmware = 0;
+ lme_firmware_switch(adap->dev->udev, 1);
+ }
+ return 0;
+ }
+ kfree(adap->fe);
+ adap->fe = NULL;
+ }
+
+ info("DM04 Not Supported");
+ return -ENODEV;
+}
+
+static int lme2510_powerup(struct dvb_usb_device *d, int onoff)
+{
+ struct lme2510_state *st = d->priv;
+ st->i2c_talk_onoff = 1;
+ return 0;
+}
+
+/* DVB USB Driver stuff */
+static struct dvb_usb_device_properties lme2510_properties;
+static struct dvb_usb_device_properties lme2510c_properties;
+
+static int lme2510_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int ret = 0;
+
+ usb_reset_configuration(udev);
+
+ usb_set_interface(udev, intf->cur_altsetting->desc.bInterfaceNumber, 1);
+
+ if (udev->speed != USB_SPEED_HIGH) {
+ ret = usb_reset_device(udev);
+ info("DEV Failed to connect in HIGH SPEED mode");
+ return -ENODEV;
+ }
+
+ lme_firmware_switch(udev, 0);
+
+ if (0 == dvb_usb_device_init(intf, &lme2510_properties,
+ THIS_MODULE, NULL, adapter_nr)) {
+ info("DEV registering device driver");
+ return 0;
+ }
+ if (0 == dvb_usb_device_init(intf, &lme2510c_properties,
+ THIS_MODULE, NULL, adapter_nr)) {
+ info("DEV registering device driver");
+ return 0;
+ }
+
+ info("DEV lme2510 Error");
+ return -ENODEV;
+
+}
+
+static struct usb_device_id lme2510_table[] = {
+ { USB_DEVICE(0x3344, 0x1122) }, /* LME2510 */
+ { USB_DEVICE(0x3344, 0x1120) }, /* LME2510C */
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, lme2510_table);
+
+static struct dvb_usb_device_properties lme2510_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = lme2510_download_firmware,
+ .firmware = "dvb-usb-lme2510-lg.fw",
+
+ .size_of_priv = sizeof(struct lme2510_state),
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .streaming_ctrl = lme2510_streaming_ctrl,
+ .frontend_attach = dm04_lme2510_frontend_attach,
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 10,
+ .endpoint = 0x06,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+
+ }
+ }
+ }
+ }
+ },
+ .power_ctrl = lme2510_powerup,
+ .identify_state = lme2510_identify_state,
+ .i2c_algo = &lme2510_i2c_algo,
+ .generic_bulk_ctrl_endpoint = 0,
+ .num_device_descs = 1,
+ .devices = {
+ { "DM04 LME2510 DVB-S USB 2.0",
+ { &lme2510_table[0], NULL },
+ },
+
+ }
+};
+
+static struct dvb_usb_device_properties lme2510c_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = lme2510_download_firmware,
+ .firmware = lme_firmware,
+ .size_of_priv = sizeof(struct lme2510_state),
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .streaming_ctrl = lme2510_streaming_ctrl,
+ .frontend_attach = dm04_lme2510_frontend_attach,
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 10,
+ .endpoint = 0x8,
+ .u = {
+ .bulk = {
+ .buffersize = 4096,
+
+ }
+ }
+ }
+ }
+ },
+ .power_ctrl = lme2510_powerup,
+ .identify_state = lme2510_identify_state,
+ .i2c_algo = &lme2510_i2c_algo,
+ .generic_bulk_ctrl_endpoint = 0,
+ .num_device_descs = 1,
+ .devices = {
+ { "DM04 LME2510C USB2.0",
+ { &lme2510_table[1], NULL },
+ },
+ }
+};
+
+void *lme2510_exit_int(struct dvb_usb_device *d)
+{
+ struct lme2510_state *st = d->priv;
+ struct dvb_usb_adapter *adap = &d->adapter[0];
+ void *buffer = NULL;
+
+ if (adap != NULL) {
+ lme2510_kill_urb(&adap->stream);
+ adap->feedcount = 0;
+ }
+
+ if (st->lme_urb != NULL) {
+ st->i2c_talk_onoff = 1;
+ st->signal_lock = 0;
+ st->signal_level = 0;
+ st->signal_sn = 0;
+ buffer = st->usb_buffer;
+ usb_kill_urb(st->lme_urb);
+ usb_free_coherent(d->udev, 5000, st->buffer,
+ st->lme_urb->transfer_dma);
+ info("Interupt Service Stopped");
+ ir_input_unregister(d->rc_input_dev);
+ info("Remote Stopped");
+ }
+ return buffer;
+}
+
+void lme2510_exit(struct usb_interface *intf)
+{
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+ void *usb_buffer;
+
+ if (d != NULL) {
+ usb_buffer = lme2510_exit_int(d);
+ dvb_usb_device_exit(intf);
+ kfree(usb_buffer);
+ }
+}
+
+static struct usb_driver lme2510_driver = {
+ .name = "LME2510C_DVBS",
+ .probe = lme2510_probe,
+ .disconnect = lme2510_exit,
+ .id_table = lme2510_table,
+};
+
+/* module stuff */
+static int __init lme2510_module_init(void)
+{
+ int result = usb_register(&lme2510_driver);
+ if (result) {
+ err("usb_register failed. Error number %d", result);
+ return result;
+ }
+
+ return 0;
+}
+
+static void __exit lme2510_module_exit(void)
+{
+ /* deregister this driver from the USB subsystem */
+ usb_deregister(&lme2510_driver);
+}
+
+module_init(lme2510_module_init);
+module_exit(lme2510_module_exit);
+
+MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
+MODULE_DESCRIPTION("LM2510(C) DVB-S USB2.0");
+MODULE_VERSION("1.60");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.h b/drivers/media/dvb/dvb-usb/lmedm04.h
new file mode 100644
index 000000000000..e6af16c1e3e5
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/lmedm04.h
@@ -0,0 +1,173 @@
+/* DVB USB compliant linux driver for
+ *
+ * DM04/QQBOX DVB-S USB BOX LME2510C + SHARP:BS2F7HZ7395
+ * LME2510C + LG TDQY-P001F
+ * LME2510 + LG TDQY-P001F
+ *
+ * MVB7395 (LME2510C+SHARP:BS2F7HZ7395)
+ * SHARP:BS2F7HZ7395 = (STV0288+Sharp IX2505V)
+ *
+ * MVB001F (LME2510+LGTDQT-P001F)
+ * LG TDQY - P001F =(TDA8263 + TDA10086H)
+ *
+ * MVB0001F (LME2510C+LGTDQT-P001F)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
+ * *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+#ifndef _DVB_USB_LME2510_H_
+#define _DVB_USB_LME2510_H_
+
+/* Streamer & PID
+ *
+ * Note: These commands do not actually stop the streaming
+ * but form some kind of packet filtering/stream count
+ * or tuning related functions.
+ * 06 XX
+ * offset 1 = 00 Enable Streaming
+ *
+ *
+ * PID
+ * 03 XX XX ----> reg number ---> setting....20 XX
+ * offset 1 = length
+ * offset 2 = start of data
+ * end byte -1 = 20
+ * end byte = clear pid always a0, other wise 9c, 9a ??
+ *
+*/
+#define LME_ST_ON_W {0x06, 0x00}
+#define LME_CLEAR_PID {0x03, 0x02, 0x20, 0xa0}
+
+/* LNB Voltage
+ * 07 XX XX
+ * offset 1 = 01
+ * offset 2 = 00=Voltage low 01=Voltage high
+ *
+ * LNB Power
+ * 03 01 XX
+ * offset 2 = 00=ON 01=OFF
+ */
+
+#define LME_VOLTAGE_L {0x07, 0x01, 0x00}
+#define LME_VOLTAGE_H {0x07, 0x01, 0x01}
+#define LNB_ON {0x3a, 0x01, 0x00}
+#define LNB_OFF {0x3a, 0x01, 0x01}
+
+/* Initial stv0288 settings for 7395 Frontend */
+static u8 s7395_inittab[] = {
+ 0x01, 0x15,
+ 0x02, 0x20,
+ 0x03, 0xa0,
+ 0x04, 0xa0,
+ 0x05, 0x12,
+ 0x06, 0x00,
+ 0x09, 0x00,
+ 0x0a, 0x04,
+ 0x0b, 0x00,
+ 0x0c, 0x00,
+ 0x0d, 0x00,
+ 0x0e, 0xc1,
+ 0x0f, 0x54,
+ 0x11, 0x7a,
+ 0x12, 0x03,
+ 0x13, 0x48,
+ 0x14, 0x84,
+ 0x15, 0xc5,
+ 0x16, 0xb8,
+ 0x17, 0x9c,
+ 0x18, 0x00,
+ 0x19, 0xa6,
+ 0x1a, 0x88,
+ 0x1b, 0x8f,
+ 0x1c, 0xf0,
+ 0x20, 0x0b,
+ 0x21, 0x54,
+ 0x22, 0xff,
+ 0x23, 0x01,
+ 0x28, 0x46,
+ 0x29, 0x66,
+ 0x2a, 0x90,
+ 0x2b, 0xfa,
+ 0x2c, 0xd9,
+ 0x30, 0x0,
+ 0x31, 0x1e,
+ 0x32, 0x14,
+ 0x33, 0x0f,
+ 0x34, 0x09,
+ 0x35, 0x0c,
+ 0x36, 0x05,
+ 0x37, 0x2f,
+ 0x38, 0x16,
+ 0x39, 0xbd,
+ 0x3a, 0x0,
+ 0x3b, 0x13,
+ 0x3c, 0x11,
+ 0x3d, 0x30,
+ 0x40, 0x63,
+ 0x41, 0x04,
+ 0x42, 0x60,
+ 0x43, 0x00,
+ 0x44, 0x00,
+ 0x45, 0x00,
+ 0x46, 0x00,
+ 0x47, 0x00,
+ 0x4a, 0x00,
+ 0x50, 0x12,
+ 0x51, 0x36,
+ 0x52, 0x21,
+ 0x53, 0x94,
+ 0x54, 0xb2,
+ 0x55, 0x29,
+ 0x56, 0x64,
+ 0x57, 0x2b,
+ 0x58, 0x54,
+ 0x59, 0x86,
+ 0x5a, 0x00,
+ 0x5b, 0x9b,
+ 0x5c, 0x08,
+ 0x5d, 0x7f,
+ 0x5e, 0xff,
+ 0x5f, 0x8d,
+ 0x70, 0x0,
+ 0x71, 0x0,
+ 0x72, 0x0,
+ 0x74, 0x0,
+ 0x75, 0x0,
+ 0x76, 0x0,
+ 0x81, 0x0,
+ 0x82, 0x3f,
+ 0x83, 0x3f,
+ 0x84, 0x0,
+ 0x85, 0x0,
+ 0x88, 0x0,
+ 0x89, 0x0,
+ 0x8a, 0x0,
+ 0x8b, 0x0,
+ 0x8c, 0x0,
+ 0x90, 0x0,
+ 0x91, 0x0,
+ 0x92, 0x0,
+ 0x93, 0x0,
+ 0x94, 0x1c,
+ 0x97, 0x0,
+ 0xa0, 0x48,
+ 0xa1, 0x0,
+ 0xb0, 0xb8,
+ 0xb1, 0x3a,
+ 0xb2, 0x10,
+ 0xb3, 0x82,
+ 0xb4, 0x80,
+ 0xb5, 0x82,
+ 0xb6, 0x82,
+ 0xb7, 0x82,
+ 0xb8, 0x20,
+ 0xb9, 0x0,
+ 0xf0, 0x0,
+ 0xf1, 0x0,
+ 0xf2, 0xc0,
+ 0xff, 0xff,
+};
+#endif
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 28294af752db..f0f1842fab60 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -24,6 +24,8 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <dvb_frontend.h>
+
#include "firedtv.h"
#define FCP_COMMAND_REGISTER 0xfffff0000b00ULL
@@ -130,6 +132,20 @@ MODULE_PARM_DESC(debug, "Verbose logging (none = 0"
", FCP payloads = " __stringify(AVC_DEBUG_FCP_PAYLOADS)
", or a combination, or all = -1)");
+/*
+ * This is a workaround since there is no vendor specific command to retrieve
+ * ca_info using AVC. If this parameter is not used, ca_system_id will be
+ * filled with application_manufacturer from ca_app_info.
+ * Digital Everywhere have said that adding ca_info is on their TODO list.
+ */
+static unsigned int num_fake_ca_system_ids;
+static int fake_ca_system_ids[4] = { -1, -1, -1, -1 };
+module_param_array(fake_ca_system_ids, int, &num_fake_ca_system_ids, 0644);
+MODULE_PARM_DESC(fake_ca_system_ids, "If your CAM application manufacturer "
+ "does not have the same ca_system_id as your CAS, you can "
+ "override what ca_system_ids are presented to the "
+ "application by setting this field to an array of ids.");
+
static const char *debug_fcp_ctype(unsigned int ctype)
{
static const char *ctypes[] = {
@@ -368,10 +384,30 @@ static int avc_tuner_tuneqpsk(struct firedtv *fdtv,
c->operand[12] = 0;
if (fdtv->type == FIREDTV_DVB_S2) {
- c->operand[13] = 0x1;
- c->operand[14] = 0xff;
- c->operand[15] = 0xff;
-
+ if (fdtv->fe.dtv_property_cache.delivery_system == SYS_DVBS2) {
+ switch (fdtv->fe.dtv_property_cache.modulation) {
+ case QAM_16: c->operand[13] = 0x1; break;
+ case QPSK: c->operand[13] = 0x2; break;
+ case PSK_8: c->operand[13] = 0x3; break;
+ default: c->operand[13] = 0x2; break;
+ }
+ switch (fdtv->fe.dtv_property_cache.rolloff) {
+ case ROLLOFF_AUTO: c->operand[14] = 0x2; break;
+ case ROLLOFF_35: c->operand[14] = 0x2; break;
+ case ROLLOFF_20: c->operand[14] = 0x0; break;
+ case ROLLOFF_25: c->operand[14] = 0x1; break;
+ /* case ROLLOFF_NONE: c->operand[14] = 0xff; break; */
+ }
+ switch (fdtv->fe.dtv_property_cache.pilot) {
+ case PILOT_AUTO: c->operand[15] = 0x0; break;
+ case PILOT_OFF: c->operand[15] = 0x0; break;
+ case PILOT_ON: c->operand[15] = 0x1; break;
+ }
+ } else {
+ c->operand[13] = 0x1; /* auto modulation */
+ c->operand[14] = 0xff; /* disable rolloff */
+ c->operand[15] = 0xff; /* disable pilot */
+ }
return 16;
} else {
return 13;
@@ -977,7 +1013,7 @@ int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
- int pos, ret;
+ int i, pos, ret;
mutex_lock(&fdtv->avc_mutex);
@@ -1004,9 +1040,18 @@ int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
app_info[0] = (EN50221_TAG_CA_INFO >> 16) & 0xff;
app_info[1] = (EN50221_TAG_CA_INFO >> 8) & 0xff;
app_info[2] = (EN50221_TAG_CA_INFO >> 0) & 0xff;
- app_info[3] = 2;
- app_info[4] = r->operand[pos + 0];
- app_info[5] = r->operand[pos + 1];
+ if (num_fake_ca_system_ids == 0) {
+ app_info[3] = 2;
+ app_info[4] = r->operand[pos + 0];
+ app_info[5] = r->operand[pos + 1];
+ } else {
+ app_info[3] = num_fake_ca_system_ids * 2;
+ for (i = 0; i < num_fake_ca_system_ids; i++) {
+ app_info[4 + i * 2] =
+ (fake_ca_system_ids[i] >> 8) & 0xff;
+ app_info[5 + i * 2] = fake_ca_system_ids[i] & 0xff;
+ }
+ }
*len = app_info[3] + 4;
out:
mutex_unlock(&fdtv->avc_mutex);
diff --git a/drivers/media/dvb/firewire/firedtv-fe.c b/drivers/media/dvb/firewire/firedtv-fe.c
index e49cdc88b0c7..d10920e2f3a2 100644
--- a/drivers/media/dvb/firewire/firedtv-fe.c
+++ b/drivers/media/dvb/firewire/firedtv-fe.c
@@ -155,6 +155,16 @@ static int fdtv_get_frontend(struct dvb_frontend *fe,
return -EOPNOTSUPP;
}
+static int fdtv_get_property(struct dvb_frontend *fe, struct dtv_property *tvp)
+{
+ return 0;
+}
+
+static int fdtv_set_property(struct dvb_frontend *fe, struct dtv_property *tvp)
+{
+ return 0;
+}
+
void fdtv_frontend_init(struct firedtv *fdtv)
{
struct dvb_frontend_ops *ops = &fdtv->fe.ops;
@@ -166,6 +176,9 @@ void fdtv_frontend_init(struct firedtv *fdtv)
ops->set_frontend = fdtv_set_frontend;
ops->get_frontend = fdtv_get_frontend;
+ ops->get_property = fdtv_get_property;
+ ops->set_property = fdtv_set_property;
+
ops->read_status = fdtv_read_status;
ops->read_ber = fdtv_read_ber;
ops->read_signal_strength = fdtv_read_signal_strength;
@@ -179,7 +192,6 @@ void fdtv_frontend_init(struct firedtv *fdtv)
switch (fdtv->type) {
case FIREDTV_DVB_S:
- case FIREDTV_DVB_S2:
fi->type = FE_QPSK;
fi->frequency_min = 950000;
@@ -188,7 +200,7 @@ void fdtv_frontend_init(struct firedtv *fdtv)
fi->symbol_rate_min = 1000000;
fi->symbol_rate_max = 40000000;
- fi->caps = FE_CAN_INVERSION_AUTO |
+ fi->caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -198,6 +210,26 @@ void fdtv_frontend_init(struct firedtv *fdtv)
FE_CAN_QPSK;
break;
+ case FIREDTV_DVB_S2:
+ fi->type = FE_QPSK;
+
+ fi->frequency_min = 950000;
+ fi->frequency_max = 2150000;
+ fi->frequency_stepsize = 125;
+ fi->symbol_rate_min = 1000000;
+ fi->symbol_rate_max = 40000000;
+
+ fi->caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_2G_MODULATION;
+ break;
+
case FIREDTV_DVB_C:
fi->type = FE_QAM;
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index b5f6a04f9c12..e9062b08a485 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -257,6 +257,13 @@ config DVB_CX22702
help
A DVB-T tuner module. Say Y when you want to support this frontend.
+config DVB_S5H1432
+ tristate "Samsung s5h1432 demodulator (OFDM)"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
+
config DVB_DRX397XD
tristate "Micronas DRX3975D/DRX3977D based"
depends on DVB_CORE && I2C
@@ -455,16 +462,8 @@ config DVB_LGDT330X
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
-config DVB_LGDT3304
- tristate "LG Electronics LGDT3304"
- depends on DVB_CORE && I2C
- default m if DVB_FE_CUSTOMISE
- help
- An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
- to support this frontend.
-
config DVB_LGDT3305
- tristate "LG Electronics LGDT3305 based"
+ tristate "LG Electronics LGDT3304 and LGDT3305 based"
depends on DVB_CORE && I2C
default m if DVB_FE_CUSTOMISE
help
@@ -607,6 +606,13 @@ config DVB_TDA665x
Currently supported tuners:
* Panasonic ENV57H12D5 (ET-50DT)
+config DVB_IX2505V
+ tristate "Sharp IX2505V silicon tuner"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-S tuner module. Say Y when you want to support this frontend.
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 874e8ada4d1d..9a31985c0dfb 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DVB_STB0899) += stb0899.o
obj-$(CONFIG_DVB_STB6100) += stb6100.o
obj-$(CONFIG_DVB_SP8870) += sp8870.o
obj-$(CONFIG_DVB_CX22700) += cx22700.o
+obj-$(CONFIG_DVB_S5H1432) += s5h1432.o
obj-$(CONFIG_DVB_CX24110) += cx24110.o
obj-$(CONFIG_DVB_TDA8083) += tda8083.o
obj-$(CONFIG_DVB_L64781) += l64781.o
@@ -45,7 +46,6 @@ obj-$(CONFIG_DVB_OR51132) += or51132.o
obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
-obj-$(CONFIG_DVB_LGDT3304) += lgdt3304.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
@@ -82,3 +82,4 @@ obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
+obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index dac917f7bb7f..e2a95c07bab4 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -42,6 +42,8 @@ struct af9013_state {
struct af9013_config config;
+ /* tuner/demod RF and IF AGC limits used for signal strength calc */
+ u8 signal_strength_en, rf_50, rf_80, if_50, if_80;
u16 signal_strength;
u32 ber;
u32 ucblocks;
@@ -220,184 +222,37 @@ static u32 af913_div(u32 a, u32 b, u32 x)
static int af9013_set_coeff(struct af9013_state *state, fe_bandwidth_t bw)
{
- int ret = 0;
- u8 i = 0;
- u8 buf[24];
- u32 uninitialized_var(ns_coeff1_2048nu);
- u32 uninitialized_var(ns_coeff1_8191nu);
- u32 uninitialized_var(ns_coeff1_8192nu);
- u32 uninitialized_var(ns_coeff1_8193nu);
- u32 uninitialized_var(ns_coeff2_2k);
- u32 uninitialized_var(ns_coeff2_8k);
-
+ int ret, i, j, found;
deb_info("%s: adc_clock:%d bw:%d\n", __func__,
state->config.adc_clock, bw);
- switch (state->config.adc_clock) {
- case 28800: /* 28.800 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x01e79e7a;
- ns_coeff1_8191nu = 0x0079eb6e;
- ns_coeff1_8192nu = 0x0079e79e;
- ns_coeff1_8193nu = 0x0079e3cf;
- ns_coeff2_2k = 0x00f3cf3d;
- ns_coeff2_8k = 0x003cf3cf;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x0238e38e;
- ns_coeff1_8191nu = 0x008e3d55;
- ns_coeff1_8192nu = 0x008e38e4;
- ns_coeff1_8193nu = 0x008e3472;
- ns_coeff2_2k = 0x011c71c7;
- ns_coeff2_8k = 0x00471c72;
+ /* lookup coeff from table */
+ for (i = 0, found = 0; i < ARRAY_SIZE(coeff_table); i++) {
+ if (coeff_table[i].adc_clock == state->config.adc_clock &&
+ coeff_table[i].bw == bw) {
+ found = 1;
break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x028a28a3;
- ns_coeff1_8191nu = 0x00a28f3d;
- ns_coeff1_8192nu = 0x00a28a29;
- ns_coeff1_8193nu = 0x00a28514;
- ns_coeff2_2k = 0x01451451;
- ns_coeff2_8k = 0x00514514;
- break;
- default:
- ret = -EINVAL;
}
- break;
- case 20480: /* 20.480 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x02adb6dc;
- ns_coeff1_8191nu = 0x00ab7313;
- ns_coeff1_8192nu = 0x00ab6db7;
- ns_coeff1_8193nu = 0x00ab685c;
- ns_coeff2_2k = 0x0156db6e;
- ns_coeff2_8k = 0x0055b6dc;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x03200001;
- ns_coeff1_8191nu = 0x00c80640;
- ns_coeff1_8192nu = 0x00c80000;
- ns_coeff1_8193nu = 0x00c7f9c0;
- ns_coeff2_2k = 0x01900000;
- ns_coeff2_8k = 0x00640000;
- break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x03924926;
- ns_coeff1_8191nu = 0x00e4996e;
- ns_coeff1_8192nu = 0x00e49249;
- ns_coeff1_8193nu = 0x00e48b25;
- ns_coeff2_2k = 0x01c92493;
- ns_coeff2_8k = 0x00724925;
- break;
- default:
- ret = -EINVAL;
- }
- break;
- case 28000: /* 28.000 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x01f58d10;
- ns_coeff1_8191nu = 0x007d672f;
- ns_coeff1_8192nu = 0x007d6344;
- ns_coeff1_8193nu = 0x007d5f59;
- ns_coeff2_2k = 0x00fac688;
- ns_coeff2_8k = 0x003eb1a2;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x02492492;
- ns_coeff1_8191nu = 0x00924db7;
- ns_coeff1_8192nu = 0x00924925;
- ns_coeff1_8193nu = 0x00924492;
- ns_coeff2_2k = 0x01249249;
- ns_coeff2_8k = 0x00492492;
- break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x029cbc15;
- ns_coeff1_8191nu = 0x00a7343f;
- ns_coeff1_8192nu = 0x00a72f05;
- ns_coeff1_8193nu = 0x00a729cc;
- ns_coeff2_2k = 0x014e5e0a;
- ns_coeff2_8k = 0x00539783;
- break;
- default:
- ret = -EINVAL;
- }
- break;
- case 25000: /* 25.000 MHz */
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- ns_coeff1_2048nu = 0x0231bcb5;
- ns_coeff1_8191nu = 0x008c7391;
- ns_coeff1_8192nu = 0x008c6f2d;
- ns_coeff1_8193nu = 0x008c6aca;
- ns_coeff2_2k = 0x0118de5b;
- ns_coeff2_8k = 0x00463797;
- break;
- case BANDWIDTH_7_MHZ:
- ns_coeff1_2048nu = 0x028f5c29;
- ns_coeff1_8191nu = 0x00a3dc29;
- ns_coeff1_8192nu = 0x00a3d70a;
- ns_coeff1_8193nu = 0x00a3d1ec;
- ns_coeff2_2k = 0x0147ae14;
- ns_coeff2_8k = 0x0051eb85;
- break;
- case BANDWIDTH_8_MHZ:
- ns_coeff1_2048nu = 0x02ecfb9d;
- ns_coeff1_8191nu = 0x00bb44c1;
- ns_coeff1_8192nu = 0x00bb3ee7;
- ns_coeff1_8193nu = 0x00bb390d;
- ns_coeff2_2k = 0x01767dce;
- ns_coeff2_8k = 0x005d9f74;
- break;
- default:
- ret = -EINVAL;
- }
- break;
- default:
- err("invalid xtal");
- return -EINVAL;
}
- if (ret) {
- err("invalid bandwidth");
- return ret;
+
+ if (!found) {
+ err("invalid bw or clock");
+ ret = -EINVAL;
+ goto error;
}
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x00ff0000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_2048nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff2_2k & 0x01c00000) >> 22);
- buf[i++] = (u8) ((ns_coeff2_2k & 0x003fc000) >> 14);
- buf[i++] = (u8) ((ns_coeff2_2k & 0x00003fc0) >> 6);
- buf[i++] = (u8) ((ns_coeff2_2k & 0x0000003f));
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x00ffc000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_8191nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x00ffc000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_8192nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x03000000) >> 24);
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x00ffc000) >> 16);
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x0000ff00) >> 8);
- buf[i++] = (u8) ((ns_coeff1_8193nu & 0x000000ff));
- buf[i++] = (u8) ((ns_coeff2_8k & 0x01c00000) >> 22);
- buf[i++] = (u8) ((ns_coeff2_8k & 0x003fc000) >> 14);
- buf[i++] = (u8) ((ns_coeff2_8k & 0x00003fc0) >> 6);
- buf[i++] = (u8) ((ns_coeff2_8k & 0x0000003f));
-
- deb_info("%s: coeff:", __func__);
- debug_dump(buf, sizeof(buf), deb_info);
+ deb_info("%s: coeff: ", __func__);
+ debug_dump(coeff_table[i].val, sizeof(coeff_table[i].val), deb_info);
/* program */
- for (i = 0; i < sizeof(buf); i++) {
- ret = af9013_write_reg(state, 0xae00 + i, buf[i]);
+ for (j = 0; j < sizeof(coeff_table[i].val); j++) {
+ ret = af9013_write_reg(state, 0xae00 + j,
+ coeff_table[i].val[j]);
if (ret)
break;
}
+error:
return ret;
}
@@ -486,6 +341,19 @@ static int af9013_set_freq_ctrl(struct af9013_state *state, fe_bandwidth_t bw)
if_sample_freq = 4300000; /* 4.3 MHz */
break;
}
+ } else if (state->config.tuner == AF9013_TUNER_TDA18218) {
+ switch (bw) {
+ case BANDWIDTH_6_MHZ:
+ if_sample_freq = 3000000; /* 3 MHz */
+ break;
+ case BANDWIDTH_7_MHZ:
+ if_sample_freq = 3500000; /* 3.5 MHz */
+ break;
+ case BANDWIDTH_8_MHZ:
+ default:
+ if_sample_freq = 4000000; /* 4 MHz */
+ break;
+ }
}
while (if_sample_freq > (adc_freq / 2))
@@ -1097,45 +965,31 @@ static int af9013_update_signal_strength(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
int ret;
- u8 tmp0;
- u8 rf_gain, rf_50, rf_80, if_gain, if_50, if_80;
+ u8 rf_gain, if_gain;
int signal_strength;
deb_info("%s\n", __func__);
- state->signal_strength = 0;
-
- ret = af9013_read_reg_bits(state, 0x9bee, 0, 1, &tmp0);
- if (ret)
- goto error;
- if (tmp0) {
- ret = af9013_read_reg(state, 0x9bbd, &rf_50);
- if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9bd0, &rf_80);
- if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9be2, &if_50);
- if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9be4, &if_80);
- if (ret)
- goto error;
+ if (state->signal_strength_en) {
ret = af9013_read_reg(state, 0xd07c, &rf_gain);
if (ret)
goto error;
ret = af9013_read_reg(state, 0xd07d, &if_gain);
if (ret)
goto error;
- signal_strength = (0xffff / (9 * (rf_50 + if_50) - \
- 11 * (rf_80 + if_80))) * (10 * (rf_gain + if_gain) - \
- 11 * (rf_80 + if_80));
+ signal_strength = (0xffff / \
+ (9 * (state->rf_50 + state->if_50) - \
+ 11 * (state->rf_80 + state->if_80))) * \
+ (10 * (rf_gain + if_gain) - \
+ 11 * (state->rf_80 + state->if_80));
if (signal_strength < 0)
signal_strength = 0;
else if (signal_strength > 0xffff)
signal_strength = 0xffff;
state->signal_strength = signal_strength;
+ } else {
+ state->signal_strength = 0;
}
error:
@@ -1368,6 +1222,7 @@ static int af9013_init(struct dvb_frontend *fe)
break;
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
+ case AF9013_TUNER_MXL5007T:
len = ARRAY_SIZE(tuner_init_mxl5005);
init = tuner_init_mxl5005;
break;
@@ -1393,6 +1248,7 @@ static int af9013_init(struct dvb_frontend *fe)
init = tuner_init_mt2060_2;
break;
case AF9013_TUNER_TDA18271:
+ case AF9013_TUNER_TDA18218:
len = ARRAY_SIZE(tuner_init_tda18271);
init = tuner_init_tda18271;
break;
@@ -1438,6 +1294,27 @@ static int af9013_init(struct dvb_frontend *fe)
if (ret)
goto error;
+ /* read values needed for signal strength calculation */
+ ret = af9013_read_reg_bits(state, 0x9bee, 0, 1,
+ &state->signal_strength_en);
+ if (ret)
+ goto error;
+
+ if (state->signal_strength_en) {
+ ret = af9013_read_reg(state, 0x9bbd, &state->rf_50);
+ if (ret)
+ goto error;
+ ret = af9013_read_reg(state, 0x9bd0, &state->rf_80);
+ if (ret)
+ goto error;
+ ret = af9013_read_reg(state, 0x9be2, &state->if_50);
+ if (ret)
+ goto error;
+ ret = af9013_read_reg(state, 0x9be4, &state->if_80);
+ if (ret)
+ goto error;
+ }
+
error:
return ret;
}
diff --git a/drivers/media/dvb/frontends/af9013.h b/drivers/media/dvb/frontends/af9013.h
index 72c71bb5d117..e53d873f7555 100644
--- a/drivers/media/dvb/frontends/af9013.h
+++ b/drivers/media/dvb/frontends/af9013.h
@@ -44,6 +44,7 @@ enum af9013_tuner {
AF9013_TUNER_MT2060_2 = 147, /* Microtune */
AF9013_TUNER_TDA18271 = 156, /* NXP */
AF9013_TUNER_QT1010A = 162, /* Quantek */
+ AF9013_TUNER_MXL5007T = 177, /* MaxLinear */
AF9013_TUNER_TDA18218 = 179, /* NXP */
};
diff --git a/drivers/media/dvb/frontends/af9013_priv.h b/drivers/media/dvb/frontends/af9013_priv.h
index 0fd42b7e248e..e00b2a4a2db6 100644
--- a/drivers/media/dvb/frontends/af9013_priv.h
+++ b/drivers/media/dvb/frontends/af9013_priv.h
@@ -60,6 +60,56 @@ struct snr_table {
u8 snr;
};
+struct coeff {
+ u32 adc_clock;
+ fe_bandwidth_t bw;
+ u8 val[24];
+};
+
+/* pre-calculated coeff lookup table */
+static struct coeff coeff_table[] = {
+ /* 28.800 MHz */
+ { 28800, BANDWIDTH_8_MHZ, { 0x02, 0x8a, 0x28, 0xa3, 0x05, 0x14,
+ 0x51, 0x11, 0x00, 0xa2, 0x8f, 0x3d, 0x00, 0xa2, 0x8a,
+ 0x29, 0x00, 0xa2, 0x85, 0x14, 0x01, 0x45, 0x14, 0x14 } },
+ { 28800, BANDWIDTH_7_MHZ, { 0x02, 0x38, 0xe3, 0x8e, 0x04, 0x71,
+ 0xc7, 0x07, 0x00, 0x8e, 0x3d, 0x55, 0x00, 0x8e, 0x38,
+ 0xe4, 0x00, 0x8e, 0x34, 0x72, 0x01, 0x1c, 0x71, 0x32 } },
+ { 28800, BANDWIDTH_6_MHZ, { 0x01, 0xe7, 0x9e, 0x7a, 0x03, 0xcf,
+ 0x3c, 0x3d, 0x00, 0x79, 0xeb, 0x6e, 0x00, 0x79, 0xe7,
+ 0x9e, 0x00, 0x79, 0xe3, 0xcf, 0x00, 0xf3, 0xcf, 0x0f } },
+ /* 20.480 MHz */
+ { 20480, BANDWIDTH_8_MHZ, { 0x03, 0x92, 0x49, 0x26, 0x07, 0x24,
+ 0x92, 0x13, 0x00, 0xe4, 0x99, 0x6e, 0x00, 0xe4, 0x92,
+ 0x49, 0x00, 0xe4, 0x8b, 0x25, 0x01, 0xc9, 0x24, 0x25 } },
+ { 20480, BANDWIDTH_7_MHZ, { 0x03, 0x20, 0x00, 0x01, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0xc8, 0x06, 0x40, 0x00, 0xc8, 0x00,
+ 0x00, 0x00, 0xc7, 0xf9, 0xc0, 0x01, 0x90, 0x00, 0x00 } },
+ { 20480, BANDWIDTH_6_MHZ, { 0x02, 0xad, 0xb6, 0xdc, 0x05, 0x5b,
+ 0x6d, 0x2e, 0x00, 0xab, 0x73, 0x13, 0x00, 0xab, 0x6d,
+ 0xb7, 0x00, 0xab, 0x68, 0x5c, 0x01, 0x56, 0xdb, 0x1c } },
+ /* 28.000 MHz */
+ { 28000, BANDWIDTH_8_MHZ, { 0x02, 0x9c, 0xbc, 0x15, 0x05, 0x39,
+ 0x78, 0x0a, 0x00, 0xa7, 0x34, 0x3f, 0x00, 0xa7, 0x2f,
+ 0x05, 0x00, 0xa7, 0x29, 0xcc, 0x01, 0x4e, 0x5e, 0x03 } },
+ { 28000, BANDWIDTH_7_MHZ, { 0x02, 0x49, 0x24, 0x92, 0x04, 0x92,
+ 0x49, 0x09, 0x00, 0x92, 0x4d, 0xb7, 0x00, 0x92, 0x49,
+ 0x25, 0x00, 0x92, 0x44, 0x92, 0x01, 0x24, 0x92, 0x12 } },
+ { 28000, BANDWIDTH_6_MHZ, { 0x01, 0xf5, 0x8d, 0x10, 0x03, 0xeb,
+ 0x1a, 0x08, 0x00, 0x7d, 0x67, 0x2f, 0x00, 0x7d, 0x63,
+ 0x44, 0x00, 0x7d, 0x5f, 0x59, 0x00, 0xfa, 0xc6, 0x22 } },
+ /* 25.000 MHz */
+ { 25000, BANDWIDTH_8_MHZ, { 0x02, 0xec, 0xfb, 0x9d, 0x05, 0xd9,
+ 0xf7, 0x0e, 0x00, 0xbb, 0x44, 0xc1, 0x00, 0xbb, 0x3e,
+ 0xe7, 0x00, 0xbb, 0x39, 0x0d, 0x01, 0x76, 0x7d, 0x34 } },
+ { 25000, BANDWIDTH_7_MHZ, { 0x02, 0x8f, 0x5c, 0x29, 0x05, 0x1e,
+ 0xb8, 0x14, 0x00, 0xa3, 0xdc, 0x29, 0x00, 0xa3, 0xd7,
+ 0x0a, 0x00, 0xa3, 0xd1, 0xec, 0x01, 0x47, 0xae, 0x05 } },
+ { 25000, BANDWIDTH_6_MHZ, { 0x02, 0x31, 0xbc, 0xb5, 0x04, 0x63,
+ 0x79, 0x1b, 0x00, 0x8c, 0x73, 0x91, 0x00, 0x8c, 0x6f,
+ 0x2d, 0x00, 0x8c, 0x6a, 0xca, 0x01, 0x18, 0xde, 0x17 } },
+};
+
/* QPSK SNR lookup table */
static struct snr_table qpsk_snr_table[] = {
{ 0x0b4771, 0 },
@@ -480,9 +530,10 @@ static struct regdesc tuner_init_mxl5003d[] = {
{ 0x9bd9, 0, 8, 0x08 },
};
-/* MaxLinear MXL5005 tuner init
+/* MaxLinear MXL5005S & MXL5007T tuner init
AF9013_TUNER_MXL5005D = 13
- AF9013_TUNER_MXL5005R = 30 */
+ AF9013_TUNER_MXL5005R = 30
+ AF9013_TUNER_MXL5007T = 177 */
static struct regdesc tuner_init_mxl5005[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x07 },
@@ -791,8 +842,9 @@ static struct regdesc tuner_init_unknown[] = {
{ 0x9bd9, 0, 8, 0x08 },
};
-/* NXP TDA18271 tuner init
- AF9013_TUNER_TDA18271 = 156 */
+/* NXP TDA18271 & TDA18218 tuner init
+ AF9013_TUNER_TDA18271 = 156
+ AF9013_TUNER_TDA18218 = 179 */
static struct regdesc tuner_init_tda18271[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x04 },
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 29cdbfe36852..6d9c5943eb3d 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-device.h>
#include "au8522.h"
#include "au8522_priv.h"
@@ -831,9 +830,25 @@ static const struct i2c_device_id au8522_id[] = {
MODULE_DEVICE_TABLE(i2c, au8522_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "au8522",
- .probe = au8522_probe,
- .remove = au8522_remove,
- .id_table = au8522_id,
+static struct i2c_driver au8522_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "au8522",
+ },
+ .probe = au8522_probe,
+ .remove = au8522_remove,
+ .id_table = au8522_id,
};
+
+static __init int init_au8522(void)
+{
+ return i2c_add_driver(&au8522_driver);
+}
+
+static __exit void exit_au8522(void)
+{
+ i2c_del_driver(&au8522_driver);
+}
+
+module_init(init_au8522);
+module_exit(exit_au8522);
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 00b5c7e91d5d..ff6c4983051c 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(debug, "Enable verbose debug messages");
#define dprintk if (debug) printk
/* Register values to initialise the demod */
-static u8 init_tab[] = {
+static const u8 init_tab[] = {
0x00, 0x00, /* Stop aquisition */
0x0B, 0x06,
0x09, 0x01,
@@ -92,52 +92,56 @@ static int cx22702_writereg(struct cx22702_state *state, u8 reg, u8 data)
ret = i2c_transfer(state->i2c, &msg, 1);
- if (ret != 1)
+ if (unlikely(ret != 1)) {
printk(KERN_ERR
"%s: error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
__func__, reg, data, ret);
+ return -1;
+ }
- return (ret != 1) ? -1 : 0;
+ return 0;
}
static u8 cx22702_readreg(struct cx22702_state *state, u8 reg)
{
int ret;
- u8 b0[] = { reg };
- u8 b1[] = { 0 };
+ u8 data;
struct i2c_msg msg[] = {
{ .addr = state->config->demod_address, .flags = 0,
- .buf = b0, .len = 1 },
+ .buf = &reg, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD,
- .buf = b1, .len = 1 } };
+ .buf = &data, .len = 1 } };
ret = i2c_transfer(state->i2c, msg, 2);
- if (ret != 2)
- printk(KERN_ERR "%s: readreg error (ret == %i)\n",
- __func__, ret);
+ if (unlikely(ret != 2)) {
+ printk(KERN_ERR "%s: error (reg == 0x%02x, ret == %i)\n",
+ __func__, reg, ret);
+ return 0;
+ }
- return b1[0];
+ return data;
}
static int cx22702_set_inversion(struct cx22702_state *state, int inversion)
{
u8 val;
+ val = cx22702_readreg(state, 0x0C);
switch (inversion) {
case INVERSION_AUTO:
return -EOPNOTSUPP;
case INVERSION_ON:
- val = cx22702_readreg(state, 0x0C);
- return cx22702_writereg(state, 0x0C, val | 0x01);
+ val |= 0x01;
+ break;
case INVERSION_OFF:
- val = cx22702_readreg(state, 0x0C);
- return cx22702_writereg(state, 0x0C, val & 0xfe);
+ val &= 0xfe;
+ break;
default:
return -EINVAL;
}
-
+ return cx22702_writereg(state, 0x0C, val);
}
/* Retrieve the demod settings */
@@ -244,13 +248,15 @@ static int cx22702_get_tps(struct cx22702_state *state,
static int cx22702_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct cx22702_state *state = fe->demodulator_priv;
+ u8 val;
+
dprintk("%s(%d)\n", __func__, enable);
+ val = cx22702_readreg(state, 0x0D);
if (enable)
- return cx22702_writereg(state, 0x0D,
- cx22702_readreg(state, 0x0D) & 0xfe);
+ val &= 0xfe;
else
- return cx22702_writereg(state, 0x0D,
- cx22702_readreg(state, 0x0D) | 1);
+ val |= 0x01;
+ return cx22702_writereg(state, 0x0D, val);
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
@@ -270,23 +276,21 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
cx22702_set_inversion(state, p->inversion);
/* set bandwidth */
+ val = cx22702_readreg(state, 0x0C) & 0xcf;
switch (p->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
- cx22702_writereg(state, 0x0C,
- (cx22702_readreg(state, 0x0C) & 0xcf) | 0x20);
+ val |= 0x20;
break;
case BANDWIDTH_7_MHZ:
- cx22702_writereg(state, 0x0C,
- (cx22702_readreg(state, 0x0C) & 0xcf) | 0x10);
+ val |= 0x10;
break;
case BANDWIDTH_8_MHZ:
- cx22702_writereg(state, 0x0C,
- cx22702_readreg(state, 0x0C) & 0xcf);
break;
default:
dprintk("%s: invalid bandwidth\n", __func__);
return -EINVAL;
}
+ cx22702_writereg(state, 0x0C, val);
p->u.ofdm.code_rate_LP = FEC_AUTO; /* temp hack as manual not working */
@@ -312,33 +316,31 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
/* manually programmed values */
- val = 0;
- switch (p->u.ofdm.constellation) {
+ switch (p->u.ofdm.constellation) { /* mask 0x18 */
case QPSK:
- val = (val & 0xe7);
+ val = 0x00;
break;
case QAM_16:
- val = (val & 0xe7) | 0x08;
+ val = 0x08;
break;
case QAM_64:
- val = (val & 0xe7) | 0x10;
+ val = 0x10;
break;
default:
dprintk("%s: invalid constellation\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.hierarchy_information) {
+ switch (p->u.ofdm.hierarchy_information) { /* mask 0x07 */
case HIERARCHY_NONE:
- val = (val & 0xf8);
break;
case HIERARCHY_1:
- val = (val & 0xf8) | 1;
+ val |= 0x01;
break;
case HIERARCHY_2:
- val = (val & 0xf8) | 2;
+ val |= 0x02;
break;
case HIERARCHY_4:
- val = (val & 0xf8) | 3;
+ val |= 0x03;
break;
default:
dprintk("%s: invalid hierarchy\n", __func__);
@@ -346,44 +348,42 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
cx22702_writereg(state, 0x06, val);
- val = 0;
- switch (p->u.ofdm.code_rate_HP) {
+ switch (p->u.ofdm.code_rate_HP) { /* mask 0x38 */
case FEC_NONE:
case FEC_1_2:
- val = (val & 0xc7);
+ val = 0x00;
break;
case FEC_2_3:
- val = (val & 0xc7) | 0x08;
+ val = 0x08;
break;
case FEC_3_4:
- val = (val & 0xc7) | 0x10;
+ val = 0x10;
break;
case FEC_5_6:
- val = (val & 0xc7) | 0x18;
+ val = 0x18;
break;
case FEC_7_8:
- val = (val & 0xc7) | 0x20;
+ val = 0x20;
break;
default:
dprintk("%s: invalid code_rate_HP\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.code_rate_LP) {
+ switch (p->u.ofdm.code_rate_LP) { /* mask 0x07 */
case FEC_NONE:
case FEC_1_2:
- val = (val & 0xf8);
break;
case FEC_2_3:
- val = (val & 0xf8) | 1;
+ val |= 0x01;
break;
case FEC_3_4:
- val = (val & 0xf8) | 2;
+ val |= 0x02;
break;
case FEC_5_6:
- val = (val & 0xf8) | 3;
+ val |= 0x03;
break;
case FEC_7_8:
- val = (val & 0xf8) | 4;
+ val |= 0x04;
break;
default:
dprintk("%s: invalid code_rate_LP\n", __func__);
@@ -391,30 +391,28 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
cx22702_writereg(state, 0x07, val);
- val = 0;
- switch (p->u.ofdm.guard_interval) {
+ switch (p->u.ofdm.guard_interval) { /* mask 0x0c */
case GUARD_INTERVAL_1_32:
- val = (val & 0xf3);
+ val = 0x00;
break;
case GUARD_INTERVAL_1_16:
- val = (val & 0xf3) | 0x04;
+ val = 0x04;
break;
case GUARD_INTERVAL_1_8:
- val = (val & 0xf3) | 0x08;
+ val = 0x08;
break;
case GUARD_INTERVAL_1_4:
- val = (val & 0xf3) | 0x0c;
+ val = 0x0c;
break;
default:
dprintk("%s: invalid guard_interval\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.transmission_mode) {
+ switch (p->u.ofdm.transmission_mode) { /* mask 0x03 */
case TRANSMISSION_MODE_2K:
- val = (val & 0xfc);
break;
case TRANSMISSION_MODE_8K:
- val = (val & 0xfc) | 1;
+ val |= 0x1;
break;
default:
dprintk("%s: invalid transmission_mode\n", __func__);
@@ -505,7 +503,7 @@ static int cx22702_read_signal_strength(struct dvb_frontend *fe,
{
struct cx22702_state *state = fe->demodulator_priv;
- u16 rs_ber = 0;
+ u16 rs_ber;
rs_ber = cx22702_readreg(state, 0x23);
*signal_strength = (rs_ber << 8) | rs_ber;
@@ -516,7 +514,7 @@ static int cx22702_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct cx22702_state *state = fe->demodulator_priv;
- u16 rs_ber = 0;
+ u16 rs_ber;
if (cx22702_readreg(state, 0xE4) & 0x02) {
/* Realtime statistics */
rs_ber = (cx22702_readreg(state, 0xDE) & 0x7F) << 7
@@ -572,7 +570,7 @@ static void cx22702_release(struct dvb_frontend *fe)
kfree(state);
}
-static struct dvb_frontend_ops cx22702_ops;
+static const struct dvb_frontend_ops cx22702_ops;
struct dvb_frontend *cx22702_attach(const struct cx22702_config *config,
struct i2c_adapter *i2c)
@@ -587,7 +585,6 @@ struct dvb_frontend *cx22702_attach(const struct cx22702_config *config,
/* setup the state */
state->config = config;
state->i2c = i2c;
- state->prevUCBlocks = 0;
/* check if the demod is there */
if (cx22702_readreg(state, 0x1f) != 0x3)
@@ -605,7 +602,7 @@ error:
}
EXPORT_SYMBOL(cx22702_attach);
-static struct dvb_frontend_ops cx22702_ops = {
+static const struct dvb_frontend_ops cx22702_ops = {
.info = {
.name = "Conexant CX22702 DVB-T",
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 00a4e8f03304..7a1a5bc337d8 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -310,7 +310,7 @@ static int cx24110_set_symbolrate (struct cx24110_state* state, u32 srate)
}
-static int _cx24110_pll_write (struct dvb_frontend* fe, u8 *buf, int len)
+static int _cx24110_pll_write (struct dvb_frontend* fe, const u8 buf[], int len)
{
struct cx24110_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index d8f921b6fafd..fad6a990a39b 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -1108,7 +1108,6 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
strlcpy(state->tuner_i2c_adapter.name, "CX24123 tuner I2C bus",
sizeof(state->tuner_i2c_adapter.name));
- state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL,
state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo;
state->tuner_i2c_adapter.algo_data = NULL;
i2c_set_adapdata(&state->tuner_i2c_adapter, state);
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
index 980e02f1575e..a4991026254d 100644
--- a/drivers/media/dvb/frontends/dibx000_common.c
+++ b/drivers/media/dvb/frontends/dibx000_common.c
@@ -130,7 +130,6 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
struct dibx000_i2c_master *mst)
{
strncpy(i2c_adap->name, name, sizeof(i2c_adap->name));
- i2c_adap->class = I2C_CLASS_TV_DIGITAL, i2c_adap->algo = algo;
i2c_adap->algo_data = NULL;
i2c_set_adapdata(i2c_adap, mst);
if (i2c_add_adapter(i2c_adap) < 0)
diff --git a/drivers/media/dvb/frontends/drx397xD.c b/drivers/media/dvb/frontends/drx397xD.c
index f74cca6dc26b..a05007c80985 100644
--- a/drivers/media/dvb/frontends/drx397xD.c
+++ b/drivers/media/dvb/frontends/drx397xD.c
@@ -232,7 +232,7 @@ static int write_fw(struct drx397xD_state *s, enum blob_ix ix)
exit_rc:
read_unlock(&fw[s->chip_rev].lock);
- return 0;
+ return rc;
}
/* Function is not endian safe, use the RD16 wrapper below */
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
new file mode 100644
index 000000000000..55f2eba7bc96
--- /dev/null
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -0,0 +1,323 @@
+/**
+ * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
+ *
+ * Copyright (C) 2010 Malcolm Priestley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/dvb/frontend.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "ix2505v.h"
+
+static int ix2505v_debug;
+#define dprintk(level, args...) do { \
+ if (ix2505v_debug & level) \
+ printk(KERN_DEBUG "ix2505v: " args); \
+} while (0)
+
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_i2c(args...) dprintk(0x02, args)
+
+struct ix2505v_state {
+ struct i2c_adapter *i2c;
+ const struct ix2505v_config *config;
+ u32 frequency;
+};
+
+/**
+ * Data read format of the Sharp IX2505V B0017
+ *
+ * byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1
+ * byte2: POR | FL | RD2 | RD1 | RD0 | X | X | X
+ *
+ * byte1 = address
+ * byte2;
+ * POR = Power on Reset (VCC H=<2.2v L=>2.2v)
+ * FL = Phase Lock (H=lock L=unlock)
+ * RD0-2 = Reserved internal operations
+ *
+ * Only POR can be used to check the tuner is present
+ *
+ * Caution: after byte2 the I2C reverts to write mode continuing to read
+ * may corrupt tuning data.
+ *
+ */
+
+static int ix2505v_read_status_reg(struct ix2505v_state *state)
+{
+ u8 addr = state->config->tuner_address;
+ u8 b2[] = {0};
+ int ret;
+
+ struct i2c_msg msg[1] = {
+ { .addr = addr, .flags = I2C_M_RD, .buf = b2, .len = 1 }
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 1);
+ deb_i2c("Read %s ", __func__);
+
+ return (ret = 1) ? (int) b2[0] : -1;
+}
+
+static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count)
+{
+ struct i2c_msg msg[1] = {
+ { .addr = state->config->tuner_address, .flags = 0,
+ .buf = buf, .len = count },
+ };
+
+ int ret;
+
+ ret = i2c_transfer(state->i2c, msg, 1);
+
+ if (ret != 1) {
+ deb_i2c("%s: i2c error, ret=%d\n", __func__, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ix2505v_release(struct dvb_frontend *fe)
+{
+ struct ix2505v_state *state = fe->tuner_priv;
+
+ fe->tuner_priv = NULL;
+ kfree(state);
+
+ return 0;
+}
+
+/**
+ * Data write format of the Sharp IX2505V B0017
+ *
+ * byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0
+ * byte2: 0 | BG1 | BG2 | N8 | N7 | N6 | N5 | N4
+ * byte3: N3 | N2 | N1 | A5 | A4 | A3 | A2 | A1
+ * byte4: 1 | 1(C1) | 1(C0) | PD5 | PD4 | TM | 0(RTS)| 1(REF)
+ * byte5: BA2 | BA1 | BA0 | PSC | PD3 |PD2/TS2|DIV/TS1|PD0/TS0
+ *
+ * byte1 = address
+ *
+ * Write order
+ * 1) byte1 -> byte2 -> byte3 -> byte4 -> byte5
+ * 2) byte1 -> byte4 -> byte5 -> byte2 -> byte3
+ * 3) byte1 -> byte2 -> byte3 -> byte4
+ * 4) byte1 -> byte4 -> byte5 -> byte2
+ * 5) byte1 -> byte2 -> byte3
+ * 6) byte1 -> byte4 -> byte5
+ * 7) byte1 -> byte2
+ * 8) byte1 -> byte4
+ *
+ * Recommended Setup
+ * 1 -> 8 -> 6
+ */
+
+static int ix2505v_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct ix2505v_state *state = fe->tuner_priv;
+ u32 frequency = params->frequency;
+ u32 b_w = (params->u.qpsk.symbol_rate * 27) / 32000;
+ u32 div_factor, N , A, x;
+ int ret = 0, len;
+ u8 gain, cc, ref, psc, local_osc, lpf;
+ u8 data[4] = {0};
+
+ if ((frequency < fe->ops.info.frequency_min)
+ || (frequency > fe->ops.info.frequency_max))
+ return -EINVAL;
+
+ if (state->config->tuner_gain)
+ gain = (state->config->tuner_gain < 4)
+ ? state->config->tuner_gain : 0;
+ else
+ gain = 0x0;
+
+ if (state->config->tuner_chargepump)
+ cc = state->config->tuner_chargepump;
+ else
+ cc = 0x3;
+
+ ref = 8; /* REF =1 */
+ psc = 32; /* PSC = 0 */
+
+ div_factor = (frequency * ref) / 40; /* local osc = 4Mhz */
+ x = div_factor / psc;
+ N = x/100;
+ A = ((x - (N * 100)) * psc) / 100;
+
+ data[0] = ((gain & 0x3) << 5) | (N >> 3);
+ data[1] = (N << 5) | (A & 0x1f);
+ data[2] = 0x81 | ((cc & 0x3) << 5) ; /*PD5,PD4 & TM = 0|C1,C0|REF=1*/
+
+ deb_info("Frq=%d x=%d N=%d A=%d\n", frequency, x, N, A);
+
+ if (frequency <= 1065000)
+ local_osc = (6 << 5) | 2;
+ else if (frequency <= 1170000)
+ local_osc = (7 << 5) | 2;
+ else if (frequency <= 1300000)
+ local_osc = (1 << 5);
+ else if (frequency <= 1445000)
+ local_osc = (2 << 5);
+ else if (frequency <= 1607000)
+ local_osc = (3 << 5);
+ else if (frequency <= 1778000)
+ local_osc = (4 << 5);
+ else if (frequency <= 1942000)
+ local_osc = (5 << 5);
+ else /*frequency up to 2150000*/
+ local_osc = (6 << 5);
+
+ data[3] = local_osc; /* all other bits set 0 */
+
+ if (b_w <= 10000)
+ lpf = 0xc;
+ else if (b_w <= 12000)
+ lpf = 0x2;
+ else if (b_w <= 14000)
+ lpf = 0xa;
+ else if (b_w <= 16000)
+ lpf = 0x6;
+ else if (b_w <= 18000)
+ lpf = 0xe;
+ else if (b_w <= 20000)
+ lpf = 0x1;
+ else if (b_w <= 22000)
+ lpf = 0x9;
+ else if (b_w <= 24000)
+ lpf = 0x5;
+ else if (b_w <= 26000)
+ lpf = 0xd;
+ else if (b_w <= 28000)
+ lpf = 0x3;
+ else
+ lpf = 0xb;
+
+ deb_info("Osc=%x b_w=%x lpf=%x\n", local_osc, b_w, lpf);
+ deb_info("Data 0=[%x%x%x%x]\n", data[0], data[1], data[2], data[3]);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ len = sizeof(data);
+
+ ret |= ix2505v_write(state, data, len);
+
+ data[2] |= 0x4; /* set TM = 1 other bits same */
+
+ len = 1;
+ ret |= ix2505v_write(state, &data[2], len); /* write byte 4 only */
+
+ msleep(10);
+
+ data[2] |= ((lpf >> 2) & 0x3) << 3; /* lpf */
+ data[3] |= (lpf & 0x3) << 2;
+
+ deb_info("Data 2=[%x%x]\n", data[2], data[3]);
+
+ len = 2;
+ ret |= ix2505v_write(state, &data[2], len); /* write byte 4 & 5 */
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (state->config->min_delay_ms)
+ msleep(state->config->min_delay_ms);
+
+ state->frequency = frequency;
+
+ return ret;
+}
+
+static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct ix2505v_state *state = fe->tuner_priv;
+
+ *frequency = state->frequency;
+
+ return 0;
+}
+
+static struct dvb_tuner_ops ix2505v_tuner_ops = {
+ .info = {
+ .name = "Sharp IX2505V (B0017)",
+ .frequency_min = 950000,
+ .frequency_max = 2175000
+ },
+ .release = ix2505v_release,
+ .set_params = ix2505v_set_params,
+ .get_frequency = ix2505v_get_frequency,
+};
+
+struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
+ const struct ix2505v_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct ix2505v_state *state = NULL;
+ int ret;
+
+ if (NULL == config) {
+ deb_i2c("%s: no config ", __func__);
+ goto error;
+ }
+
+ state = kzalloc(sizeof(struct ix2505v_state), GFP_KERNEL);
+ if (NULL == state)
+ return NULL;
+
+ state->config = config;
+ state->i2c = i2c;
+
+ if (state->config->tuner_write_only) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = ix2505v_read_status_reg(state);
+
+ if (ret & 0x80) {
+ deb_i2c("%s: No IX2505V found\n", __func__);
+ goto error;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ fe->tuner_priv = state;
+
+ memcpy(&fe->ops.tuner_ops, &ix2505v_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ deb_i2c("%s: initialization (%s addr=0x%02x) ok\n",
+ __func__, fe->ops.tuner_ops.info.name, config->tuner_address);
+
+ return fe;
+
+error:
+ ix2505v_release(fe);
+ return NULL;
+}
+EXPORT_SYMBOL(ix2505v_attach);
+
+module_param_named(debug, ix2505v_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+MODULE_DESCRIPTION("DVB IX2505V tuner driver");
+MODULE_AUTHOR("Malcolm Priestley");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/ix2505v.h b/drivers/media/dvb/frontends/ix2505v.h
new file mode 100644
index 000000000000..67e89d616d50
--- /dev/null
+++ b/drivers/media/dvb/frontends/ix2505v.h
@@ -0,0 +1,64 @@
+/**
+ * Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
+ *
+ * Copyright (C) 2010 Malcolm Priestley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef DVB_IX2505V_H
+#define DVB_IX2505V_H
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+/**
+ * Attach a ix2505v tuner to the supplied frontend structure.
+ *
+ * @param fe Frontend to attach to.
+ * @param config ix2505v_config structure
+ * @return FE pointer on success, NULL on failure.
+ */
+
+struct ix2505v_config {
+ u8 tuner_address;
+
+ /*Baseband AMP gain control 0/1=0dB(default) 2=-2bB 3=-4dB */
+ u8 tuner_gain;
+
+ /*Charge pump output +/- 0=120 1=260 2=555 3=1200(default) */
+ u8 tuner_chargepump;
+
+ /* delay after tune */
+ int min_delay_ms;
+
+ /* disables reads*/
+ u8 tuner_write_only;
+
+};
+
+#if defined(CONFIG_DVB_IX2505V) || \
+ (defined(CONFIG_DVB_IX2505V_MODULE) && defined(MODULE))
+extern struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
+ const struct ix2505v_config *config, struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
+ const struct ix2505v_config *config, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* DVB_IX2505V_H */
diff --git a/drivers/media/dvb/frontends/lgdt3304.c b/drivers/media/dvb/frontends/lgdt3304.c
deleted file mode 100644
index 45a529b06b9d..000000000000
--- a/drivers/media/dvb/frontends/lgdt3304.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Driver for LG ATSC lgdt3304 driver
- *
- * Copyright (C) 2008 Markus Rechberger <mrechberger@sundtek.de>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include "dvb_frontend.h"
-#include "lgdt3304.h"
-
-static unsigned int debug = 0;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug,"lgdt3304 debugging (default off)");
-
-#define dprintk(fmt, args...) if (debug) do {\
- printk("lgdt3304 debug: " fmt, ##args); } while (0)
-
-struct lgdt3304_state
-{
- struct dvb_frontend frontend;
- fe_modulation_t current_modulation;
- __u32 snr;
- __u32 current_frequency;
- __u8 addr;
- struct i2c_adapter *i2c;
-};
-
-static int i2c_write_demod_bytes (struct dvb_frontend *fe, __u8 *buf, int len)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- struct i2c_msg i2cmsgs = {
- .addr = state->addr,
- .flags = 0,
- .len = 3,
- .buf = buf
- };
- int i;
- int err;
-
- for (i=0; i<len-1; i+=3){
- if((err = i2c_transfer(state->i2c, &i2cmsgs, 1))<0) {
- printk("%s i2c_transfer error %d\n", __func__, err);
- if (err < 0)
- return err;
- else
- return -EREMOTEIO;
- }
- i2cmsgs.buf += 3;
- }
- return 0;
-}
-
-static int lgdt3304_i2c_read_reg(struct dvb_frontend *fe, unsigned int reg)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- struct i2c_msg i2cmsgs[2];
- int ret;
- __u8 buf;
-
- __u8 regbuf[2] = { reg>>8, reg&0xff };
-
- i2cmsgs[0].addr = state->addr;
- i2cmsgs[0].flags = 0;
- i2cmsgs[0].len = 2;
- i2cmsgs[0].buf = regbuf;
-
- i2cmsgs[1].addr = state->addr;
- i2cmsgs[1].flags = I2C_M_RD;
- i2cmsgs[1].len = 1;
- i2cmsgs[1].buf = &buf;
-
- if((ret = i2c_transfer(state->i2c, i2cmsgs, 2))<0) {
- printk("%s i2c_transfer error %d\n", __func__, ret);
- return ret;
- }
-
- return buf;
-}
-
-static int lgdt3304_i2c_write_reg(struct dvb_frontend *fe, int reg, int val)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- char buffer[3] = { reg>>8, reg&0xff, val };
- int ret;
-
- struct i2c_msg i2cmsgs = {
- .addr = state->addr,
- .flags = 0,
- .len = 3,
- .buf=buffer
- };
- ret = i2c_transfer(state->i2c, &i2cmsgs, 1);
- if (ret != 1) {
- printk("%s i2c_transfer error %d\n", __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
-
-static int lgdt3304_soft_Reset(struct dvb_frontend *fe)
-{
- lgdt3304_i2c_write_reg(fe, 0x0002, 0x9a);
- lgdt3304_i2c_write_reg(fe, 0x0002, 0x9b);
- mdelay(200);
- return 0;
-}
-
-static int lgdt3304_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) {
- int err = 0;
-
- static __u8 lgdt3304_vsb8_data[] = {
- /* 16bit , 8bit */
- /* regs , val */
- 0x00, 0x00, 0x02,
- 0x00, 0x00, 0x13,
- 0x00, 0x0d, 0x02,
- 0x00, 0x0e, 0x02,
- 0x00, 0x12, 0x32,
- 0x00, 0x13, 0xc4,
- 0x01, 0x12, 0x17,
- 0x01, 0x13, 0x15,
- 0x01, 0x14, 0x18,
- 0x01, 0x15, 0xff,
- 0x01, 0x16, 0x2c,
- 0x02, 0x14, 0x67,
- 0x02, 0x24, 0x8d,
- 0x04, 0x27, 0x12,
- 0x04, 0x28, 0x4f,
- 0x03, 0x08, 0x80,
- 0x03, 0x09, 0x00,
- 0x03, 0x0d, 0x00,
- 0x03, 0x0e, 0x1c,
- 0x03, 0x14, 0xe1,
- 0x05, 0x0e, 0x5b,
- };
-
- /* not yet tested .. */
- static __u8 lgdt3304_qam64_data[] = {
- /* 16bit , 8bit */
- /* regs , val */
- 0x00, 0x00, 0x18,
- 0x00, 0x0d, 0x02,
- //0x00, 0x0e, 0x02,
- 0x00, 0x12, 0x2a,
- 0x00, 0x13, 0x00,
- 0x03, 0x14, 0xe3,
- 0x03, 0x0e, 0x1c,
- 0x03, 0x08, 0x66,
- 0x03, 0x09, 0x66,
- 0x03, 0x0a, 0x08,
- 0x03, 0x0b, 0x9b,
- 0x05, 0x0e, 0x5b,
- };
-
-
- /* tested with KWorld a340 */
- static __u8 lgdt3304_qam256_data[] = {
- /* 16bit , 8bit */
- /* regs , val */
- 0x00, 0x00, 0x01, //0x19,
- 0x00, 0x12, 0x2a,
- 0x00, 0x13, 0x80,
- 0x00, 0x0d, 0x02,
- 0x03, 0x14, 0xe3,
-
- 0x03, 0x0e, 0x1c,
- 0x03, 0x08, 0x66,
- 0x03, 0x09, 0x66,
- 0x03, 0x0a, 0x08,
- 0x03, 0x0b, 0x9b,
-
- 0x03, 0x0d, 0x14,
- //0x05, 0x0e, 0x5b,
- 0x01, 0x06, 0x4a,
- 0x01, 0x07, 0x3d,
- 0x01, 0x08, 0x70,
- 0x01, 0x09, 0xa3,
-
- 0x05, 0x04, 0xfd,
-
- 0x00, 0x0d, 0x82,
-
- 0x05, 0x0e, 0x5b,
-
- 0x05, 0x0e, 0x5b,
-
- 0x00, 0x02, 0x9a,
-
- 0x00, 0x02, 0x9b,
-
- 0x00, 0x00, 0x01,
- 0x00, 0x12, 0x2a,
- 0x00, 0x13, 0x80,
- 0x00, 0x0d, 0x02,
- 0x03, 0x14, 0xe3,
-
- 0x03, 0x0e, 0x1c,
- 0x03, 0x08, 0x66,
- 0x03, 0x09, 0x66,
- 0x03, 0x0a, 0x08,
- 0x03, 0x0b, 0x9b,
-
- 0x03, 0x0d, 0x14,
- 0x01, 0x06, 0x4a,
- 0x01, 0x07, 0x3d,
- 0x01, 0x08, 0x70,
- 0x01, 0x09, 0xa3,
-
- 0x05, 0x04, 0xfd,
-
- 0x00, 0x0d, 0x82,
-
- 0x05, 0x0e, 0x5b,
- };
-
- struct lgdt3304_state *state = fe->demodulator_priv;
- if (state->current_modulation != param->u.vsb.modulation) {
- switch(param->u.vsb.modulation) {
- case VSB_8:
- err = i2c_write_demod_bytes(fe, lgdt3304_vsb8_data,
- sizeof(lgdt3304_vsb8_data));
- break;
- case QAM_64:
- err = i2c_write_demod_bytes(fe, lgdt3304_qam64_data,
- sizeof(lgdt3304_qam64_data));
- break;
- case QAM_256:
- err = i2c_write_demod_bytes(fe, lgdt3304_qam256_data,
- sizeof(lgdt3304_qam256_data));
- break;
- default:
- break;
- }
-
- if (err) {
- printk("%s error setting modulation\n", __func__);
- } else {
- state->current_modulation = param->u.vsb.modulation;
- }
- }
- state->current_frequency = param->frequency;
-
- lgdt3304_soft_Reset(fe);
-
-
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, param);
-
- return 0;
-}
-
-static int lgdt3304_init(struct dvb_frontend *fe) {
- return 0;
-}
-
-static int lgdt3304_sleep(struct dvb_frontend *fe) {
- return 0;
-}
-
-
-static int lgdt3304_read_status(struct dvb_frontend *fe, fe_status_t *status)
-{
- struct lgdt3304_state *state = fe->demodulator_priv;
- int r011d;
- int qam_lck;
-
- *status = 0;
- dprintk("lgdt read status\n");
-
- r011d = lgdt3304_i2c_read_reg(fe, 0x011d);
-
- dprintk("%02x\n", r011d);
-
- switch(state->current_modulation) {
- case VSB_8:
- if (r011d & 0x80) {
- dprintk("VSB Locked\n");
- *status |= FE_HAS_CARRIER;
- *status |= FE_HAS_LOCK;
- *status |= FE_HAS_SYNC;
- *status |= FE_HAS_SIGNAL;
- }
- break;
- case QAM_64:
- case QAM_256:
- qam_lck = r011d & 0x7;
- switch(qam_lck) {
- case 0x0: dprintk("Unlock\n");
- break;
- case 0x4: dprintk("1st Lock in acquisition state\n");
- break;
- case 0x6: dprintk("2nd Lock in acquisition state\n");
- break;
- case 0x7: dprintk("Final Lock in good reception state\n");
- *status |= FE_HAS_CARRIER;
- *status |= FE_HAS_LOCK;
- *status |= FE_HAS_SYNC;
- *status |= FE_HAS_SIGNAL;
- break;
- }
- break;
- default:
- printk("%s unhandled modulation\n", __func__);
- }
-
-
- return 0;
-}
-
-static int lgdt3304_read_ber(struct dvb_frontend *fe, __u32 *ber)
-{
- dprintk("read ber\n");
- return 0;
-}
-
-static int lgdt3304_read_snr(struct dvb_frontend *fe, __u16 *snr)
-{
- dprintk("read snr\n");
- return 0;
-}
-
-static int lgdt3304_read_ucblocks(struct dvb_frontend *fe, __u32 *ucblocks)
-{
- dprintk("read ucblocks\n");
- return 0;
-}
-
-static void lgdt3304_release(struct dvb_frontend *fe)
-{
- struct lgdt3304_state *state = (struct lgdt3304_state *)fe->demodulator_priv;
- kfree(state);
-}
-
-static struct dvb_frontend_ops demod_lgdt3304={
- .info = {
- .name = "LG 3304",
- .type = FE_ATSC,
- .frequency_min = 54000000,
- .frequency_max = 858000000,
- .frequency_stepsize = 62500,
- .symbol_rate_min = 5056941,
- .symbol_rate_max = 10762000,
- .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
- },
- .init = lgdt3304_init,
- .sleep = lgdt3304_sleep,
- .set_frontend = lgdt3304_set_parameters,
- .read_snr = lgdt3304_read_snr,
- .read_ber = lgdt3304_read_ber,
- .read_status = lgdt3304_read_status,
- .read_ucblocks = lgdt3304_read_ucblocks,
- .release = lgdt3304_release,
-};
-
-struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
- struct i2c_adapter *i2c)
-{
-
- struct lgdt3304_state *state;
- state = kzalloc(sizeof(struct lgdt3304_state), GFP_KERNEL);
- if (state == NULL)
- return NULL;
- state->addr = config->i2c_address;
- state->i2c = i2c;
-
- memcpy(&state->frontend.ops, &demod_lgdt3304, sizeof(struct dvb_frontend_ops));
- state->frontend.demodulator_priv = state;
- return &state->frontend;
-}
-
-EXPORT_SYMBOL_GPL(lgdt3304_attach);
-MODULE_AUTHOR("Markus Rechberger <mrechberger@empiatech.com>");
-MODULE_DESCRIPTION("LGE LGDT3304 DVB-T demodulator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/lgdt3304.h b/drivers/media/dvb/frontends/lgdt3304.h
deleted file mode 100644
index fc409fe59acb..000000000000
--- a/drivers/media/dvb/frontends/lgdt3304.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Driver for DVB-T lgdt3304 demodulator
- *
- * Copyright (C) 2008 Markus Rechberger <mrechberger@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
- */
-
-#ifndef LGDT3304_H
-#define LGDT3304_H
-
-#include <linux/dvb/frontend.h>
-
-struct lgdt3304_config
-{
- /* demodulator's I2C address */
- u8 i2c_address;
-};
-
-#if defined(CONFIG_DVB_LGDT3304) || (defined(CONFIG_DVB_LGDT3304_MODULE) && defined(MODULE))
-extern struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
- struct i2c_adapter *i2c);
-#else
-static inline struct dvb_frontend* lgdt3304_attach(const struct lgdt3304_config *config,
- struct i2c_adapter *i2c)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif /* CONFIG_DVB_LGDT */
-
-#endif /* LGDT3304_H */
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index 5ea28ae2ba8f..0fcddc4569d2 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -662,7 +662,7 @@ static void lgs8gxx_release(struct dvb_frontend *fe)
}
-static int lgs8gxx_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int lgs8gxx_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct lgs8gxx_state *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/mt352.c b/drivers/media/dvb/frontends/mt352.c
index beba5aa0db50..319672f8e1a7 100644
--- a/drivers/media/dvb/frontends/mt352.c
+++ b/drivers/media/dvb/frontends/mt352.c
@@ -69,7 +69,7 @@ static int mt352_single_write(struct dvb_frontend *fe, u8 reg, u8 val)
return 0;
}
-static int _mt352_write(struct dvb_frontend* fe, u8* ibuf, int ilen)
+static int _mt352_write(struct dvb_frontend* fe, const u8 ibuf[], int ilen)
{
int err,i;
for (i=0; i < ilen-1; i++)
diff --git a/drivers/media/dvb/frontends/mt352.h b/drivers/media/dvb/frontends/mt352.h
index 595092f9f0c4..ca2562d6f289 100644
--- a/drivers/media/dvb/frontends/mt352.h
+++ b/drivers/media/dvb/frontends/mt352.h
@@ -63,7 +63,7 @@ static inline struct dvb_frontend* mt352_attach(const struct mt352_config* confi
}
#endif // CONFIG_DVB_MT352
-static inline int mt352_write(struct dvb_frontend *fe, u8 *buf, int len) {
+static inline int mt352_write(struct dvb_frontend *fe, const u8 buf[], int len) {
int r = 0;
if (fe->ops.write)
r = fe->ops.write(fe, buf, len);
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 2e9fd2893ede..e87b747ea99c 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -920,7 +920,6 @@ struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
/* create tuner i2c adapter */
strlcpy(state->tuner_i2c_adapter.name, "S5H1420-PN1010 tuner I2C bus",
sizeof(state->tuner_i2c_adapter.name));
- state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL,
state->tuner_i2c_adapter.algo = &s5h1420_tuner_i2c_algo;
state->tuner_i2c_adapter.algo_data = NULL;
i2c_set_adapdata(&state->tuner_i2c_adapter, state);
diff --git a/drivers/media/dvb/frontends/s5h1432.c b/drivers/media/dvb/frontends/s5h1432.c
new file mode 100644
index 000000000000..0c6dcb90d168
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1432.c
@@ -0,0 +1,415 @@
+/*
+ * Samsung s5h1432 DVB-T demodulator driver
+ *
+ * Copyright (C) 2009 Bill Liu <Bill.Liu@Conexant.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "dvb_frontend.h"
+#include "s5h1432.h"
+
+struct s5h1432_state {
+
+ struct i2c_adapter *i2c;
+
+ /* configuration settings */
+ const struct s5h1432_config *config;
+
+ struct dvb_frontend frontend;
+
+ fe_modulation_t current_modulation;
+ unsigned int first_tune:1;
+
+ u32 current_frequency;
+ int if_freq;
+
+ u8 inversion;
+};
+
+static int debug;
+
+#define dprintk(arg...) do { \
+ if (debug) \
+ printk(arg); \
+ } while (0)
+
+static int s5h1432_writereg(struct s5h1432_state *state,
+ u8 addr, u8 reg, u8 data)
+{
+ int ret;
+ u8 buf[] = { reg, data };
+
+ struct i2c_msg msg = {.addr = addr, .flags = 0, .buf = buf, .len = 2 };
+
+ ret = i2c_transfer(state->i2c, &msg, 1);
+
+ if (ret != 1)
+ printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
+ "ret == %i)\n", __func__, addr, reg, data, ret);
+
+ return (ret != 1) ? -1 : 0;
+}
+
+static u8 s5h1432_readreg(struct s5h1432_state *state, u8 addr, u8 reg)
+{
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+
+ struct i2c_msg msg[] = {
+ {.addr = addr, .flags = 0, .buf = b0, .len = 1},
+ {.addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 1}
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+
+ if (ret != 2)
+ printk(KERN_ERR "%s: readreg error (ret == %i)\n",
+ __func__, ret);
+ return b1[0];
+}
+
+static int s5h1432_sleep(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int s5h1432_set_channel_bandwidth(struct dvb_frontend *fe,
+ u32 bandwidth)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ u8 reg = 0;
+
+ /* Register [0x2E] bit 3:2 : 8MHz = 0; 7MHz = 1; 6MHz = 2 */
+ reg = s5h1432_readreg(state, S5H1432_I2C_TOP_ADDR, 0x2E);
+ reg &= ~(0x0C);
+ switch (bandwidth) {
+ case 6:
+ reg |= 0x08;
+ break;
+ case 7:
+ reg |= 0x04;
+ break;
+ case 8:
+ reg |= 0x00;
+ break;
+ default:
+ return 0;
+ }
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x2E, reg);
+ return 1;
+}
+
+static int s5h1432_set_IF(struct dvb_frontend *fe, u32 ifFreqHz)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ switch (ifFreqHz) {
+ case TAIWAN_HI_IF_FREQ_44_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0x15);
+ break;
+ case EUROPE_HI_IF_FREQ_36_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0x40);
+ break;
+ case IF_FREQ_6_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xe0);
+ break;
+ case IF_FREQ_3point3_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xEE);
+ break;
+ case IF_FREQ_3point5_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x55);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xED);
+ break;
+ case IF_FREQ_4_MHZ:
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0xAA);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0xAA);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xEA);
+ break;
+ default:
+ {
+ u32 value = 0;
+ value = (u32) (((48000 - (ifFreqHz / 1000)) * 512 *
+ (u32) 32768) / (48 * 1000));
+ printk(KERN_INFO
+ "Default IFFreq %d :reg value = 0x%x\n",
+ ifFreqHz, value);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4,
+ (u8) value & 0xFF);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5,
+ (u8) (value >> 8) & 0xFF);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7,
+ (u8) (value >> 16) & 0xFF);
+ break;
+ }
+
+ }
+
+ return 1;
+}
+
+/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
+static int s5h1432_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ u32 dvb_bandwidth = 8;
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ if (p->frequency == state->current_frequency) {
+ /*current_frequency = p->frequency; */
+ /*state->current_frequency = p->frequency; */
+ } else {
+ fe->ops.tuner_ops.set_params(fe, p);
+ msleep(300);
+ s5h1432_set_channel_bandwidth(fe, dvb_bandwidth);
+ switch (p->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ dvb_bandwidth = 6;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_7_MHZ:
+ dvb_bandwidth = 7;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_8_MHZ:
+ dvb_bandwidth = 8;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ default:
+ return 0;
+ }
+ /*fe->ops.tuner_ops.set_params(fe, p); */
+/*Soft Reset chip*/
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1b);
+
+ s5h1432_set_channel_bandwidth(fe, dvb_bandwidth);
+ switch (p->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ dvb_bandwidth = 6;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_7_MHZ:
+ dvb_bandwidth = 7;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ case BANDWIDTH_8_MHZ:
+ dvb_bandwidth = 8;
+ s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
+ break;
+ default:
+ return 0;
+ }
+ /*fe->ops.tuner_ops.set_params(fe,p); */
+ /*Soft Reset chip*/
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1b);
+
+ }
+
+ state->current_frequency = p->frequency;
+
+ return 0;
+}
+
+static int s5h1432_init(struct dvb_frontend *fe)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+
+ u8 reg = 0;
+ state->current_frequency = 0;
+ printk(KERN_INFO " s5h1432_init().\n");
+
+ /*Set VSB mode as default, this also does a soft reset */
+ /*Initialize registers */
+
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x04, 0xa8);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x05, 0x01);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x07, 0x70);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x19, 0x80);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1b, 0x9D);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1c, 0x30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1d, 0x20);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1e, 0x1B);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x2e, 0x40);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x42, 0x84);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x50, 0x5a);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x5a, 0xd3);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x68, 0x50);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xb8, 0x3c);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xc4, 0x10);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xcc, 0x9c);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xDA, 0x00);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe1, 0x94);
+ /* s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xf4, 0xa1); */
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xf9, 0x00);
+
+ /*For NXP tuner*/
+
+ /*Set 3.3MHz as default IF frequency */
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe4, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe5, 0x66);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0xe7, 0xEE);
+ /* Set reg 0x1E to get the full dynamic range */
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x1e, 0x31);
+
+ /* Mode setting in demod */
+ reg = s5h1432_readreg(state, S5H1432_I2C_TOP_ADDR, 0x42);
+ reg |= 0x80;
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x42, reg);
+ /* Serial mode */
+
+ /* Soft Reset chip */
+
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
+ msleep(30);
+ s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1b);
+
+
+ return 0;
+}
+
+static int s5h1432_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ return 0;
+}
+
+static int s5h1432_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ return 0;
+}
+
+static int s5h1432_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ return 0;
+}
+
+static int s5h1432_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+
+ return 0;
+}
+
+static int s5h1432_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ return 0;
+}
+
+static int s5h1432_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ return 0;
+}
+
+static int s5h1432_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *tune)
+{
+ return 0;
+}
+
+static void s5h1432_release(struct dvb_frontend *fe)
+{
+ struct s5h1432_state *state = fe->demodulator_priv;
+ kfree(state);
+}
+
+static struct dvb_frontend_ops s5h1432_ops;
+
+struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct s5h1432_state *state = NULL;
+
+ printk(KERN_INFO " Enter s5h1432_attach(). attach success!\n");
+ /* allocate memory for the internal state */
+ state = kmalloc(sizeof(struct s5h1432_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ /* setup the state */
+ state->config = config;
+ state->i2c = i2c;
+ state->current_modulation = QAM_16;
+ state->inversion = state->config->inversion;
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &s5h1432_ops,
+ sizeof(struct dvb_frontend_ops));
+
+ state->frontend.demodulator_priv = state;
+
+ return &state->frontend;
+
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(s5h1432_attach);
+
+static struct dvb_frontend_ops s5h1432_ops = {
+
+ .info = {
+ .name = "Samsung s5h1432 DVB-T Frontend",
+ .type = FE_OFDM,
+ .frequency_min = 177000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 166666,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ FE_CAN_HIERARCHY_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER},
+
+ .init = s5h1432_init,
+ .sleep = s5h1432_sleep,
+ .set_frontend = s5h1432_set_frontend,
+ .get_frontend = s5h1432_get_frontend,
+ .get_tune_settings = s5h1432_get_tune_settings,
+ .read_status = s5h1432_read_status,
+ .read_ber = s5h1432_read_ber,
+ .read_signal_strength = s5h1432_read_signal_strength,
+ .read_snr = s5h1432_read_snr,
+ .read_ucblocks = s5h1432_read_ucblocks,
+ .release = s5h1432_release,
+};
+
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable verbose debug messages");
+
+MODULE_DESCRIPTION("Samsung s5h1432 DVB-T Demodulator driver");
+MODULE_AUTHOR("Bill Liu");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/s5h1432.h b/drivers/media/dvb/frontends/s5h1432.h
new file mode 100644
index 000000000000..b57438c32546
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1432.h
@@ -0,0 +1,91 @@
+/*
+ * Samsung s5h1432 VSB/QAM demodulator driver
+ *
+ * Copyright (C) 2009 Bill Liu <Bill.Liu@Conexant.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __S5H1432_H__
+#define __S5H1432_H__
+
+#include <linux/dvb/frontend.h>
+
+#define S5H1432_I2C_TOP_ADDR (0x02 >> 1)
+
+#define TAIWAN_HI_IF_FREQ_44_MHZ 44000000
+#define EUROPE_HI_IF_FREQ_36_MHZ 36000000
+#define IF_FREQ_6_MHZ 6000000
+#define IF_FREQ_3point3_MHZ 3300000
+#define IF_FREQ_3point5_MHZ 3500000
+#define IF_FREQ_4_MHZ 4000000
+
+struct s5h1432_config {
+
+ /* serial/parallel output */
+#define S5H1432_PARALLEL_OUTPUT 0
+#define S5H1432_SERIAL_OUTPUT 1
+ u8 output_mode;
+
+ /* GPIO Setting */
+#define S5H1432_GPIO_OFF 0
+#define S5H1432_GPIO_ON 1
+ u8 gpio;
+
+ /* MPEG signal timing */
+#define S5H1432_MPEGTIMING_CONTINOUS_INVERTING_CLOCK 0
+#define S5H1432_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 1
+#define S5H1432_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK 2
+#define S5H1432_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK 3
+ u16 mpeg_timing;
+
+ /* IF Freq for QAM and VSB in KHz */
+#define S5H1432_IF_3250 3250
+#define S5H1432_IF_3500 3500
+#define S5H1432_IF_4000 4000
+#define S5H1432_IF_5380 5380
+#define S5H1432_IF_44000 44000
+#define S5H1432_VSB_IF_DEFAULT s5h1432_IF_44000
+#define S5H1432_QAM_IF_DEFAULT s5h1432_IF_44000
+ u16 qam_if;
+ u16 vsb_if;
+
+ /* Spectral Inversion */
+#define S5H1432_INVERSION_OFF 0
+#define S5H1432_INVERSION_ON 1
+ u8 inversion;
+
+ /* Return lock status based on tuner lock, or demod lock */
+#define S5H1432_TUNERLOCKING 0
+#define S5H1432_DEMODLOCKING 1
+ u8 status_mode;
+};
+
+#if defined(CONFIG_DVB_S5H1432) || \
+ (defined(CONFIG_DVB_S5H1432_MODULE) && defined(MODULE))
+extern struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *s5h1432_attach(const struct s5h1432_config
+ *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_s5h1432 */
+
+#endif /* __s5h1432_H__ */
diff --git a/drivers/media/dvb/frontends/si21xx.c b/drivers/media/dvb/frontends/si21xx.c
index d21a327db629..4b0c99a08a85 100644
--- a/drivers/media/dvb/frontends/si21xx.c
+++ b/drivers/media/dvb/frontends/si21xx.c
@@ -268,7 +268,7 @@ static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data)
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int si21_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int si21_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct si21xx_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/stb6100.c b/drivers/media/dvb/frontends/stb6100.c
index f73c13323e90..80a9e4cba631 100644
--- a/drivers/media/dvb/frontends/stb6100.c
+++ b/drivers/media/dvb/frontends/stb6100.c
@@ -506,7 +506,7 @@ static struct dvb_tuner_ops stb6100_ops = {
};
struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
- struct stb6100_config *config,
+ const struct stb6100_config *config,
struct i2c_adapter *i2c)
{
struct stb6100_state *state = NULL;
diff --git a/drivers/media/dvb/frontends/stb6100.h b/drivers/media/dvb/frontends/stb6100.h
index 395d056599a6..2ab096614b3f 100644
--- a/drivers/media/dvb/frontends/stb6100.h
+++ b/drivers/media/dvb/frontends/stb6100.h
@@ -97,13 +97,13 @@ struct stb6100_state {
#if defined(CONFIG_DVB_STB6100) || (defined(CONFIG_DVB_STB6100_MODULE) && defined(MODULE))
extern struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
- struct stb6100_config *config,
+ const struct stb6100_config *config,
struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
- struct stb6100_config *config,
+ const struct stb6100_config *config,
struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb/frontends/stv0288.c b/drivers/media/dvb/frontends/stv0288.c
index 2930a5d6768a..63db8fd2754c 100644
--- a/drivers/media/dvb/frontends/stv0288.c
+++ b/drivers/media/dvb/frontends/stv0288.c
@@ -6,6 +6,8 @@
Copyright (C) 2008 Igor M. Liplianin <liplianin@me.by>
Removed stb6000 specific tuner code and revised some
procedures.
+ 2010-09-01 Josef Pavlik <josef@pavlik.it>
+ Fixed diseqc_msg, diseqc_burst and set_tone problems
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -78,7 +80,7 @@ static int stv0288_writeregI(struct stv0288_state *state, u8 reg, u8 data)
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int stv0288_write(struct dvb_frontend *fe, u8 *buf, int len)
+static int stv0288_write(struct dvb_frontend *fe, const u8 buf[], int len)
{
struct stv0288_state *state = fe->demodulator_priv;
@@ -156,14 +158,13 @@ static int stv0288_send_diseqc_msg(struct dvb_frontend *fe,
stv0288_writeregI(state, 0x09, 0);
msleep(30);
- stv0288_writeregI(state, 0x05, 0x16);
+ stv0288_writeregI(state, 0x05, 0x12);/* modulated mode, single shot */
for (i = 0; i < m->msg_len; i++) {
if (stv0288_writeregI(state, 0x06, m->msg[i]))
return -EREMOTEIO;
- msleep(12);
}
-
+ msleep(m->msg_len*12);
return 0;
}
@@ -174,13 +175,14 @@ static int stv0288_send_diseqc_burst(struct dvb_frontend *fe,
dprintk("%s\n", __func__);
- if (stv0288_writeregI(state, 0x05, 0x16))/* burst mode */
+ if (stv0288_writeregI(state, 0x05, 0x03))/* burst mode, single shot */
return -EREMOTEIO;
if (stv0288_writeregI(state, 0x06, burst == SEC_MINI_A ? 0x00 : 0xff))
return -EREMOTEIO;
- if (stv0288_writeregI(state, 0x06, 0x12))
+ msleep(15);
+ if (stv0288_writeregI(state, 0x05, 0x12))
return -EREMOTEIO;
return 0;
@@ -192,18 +194,19 @@ static int stv0288_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
switch (tone) {
case SEC_TONE_ON:
- if (stv0288_writeregI(state, 0x05, 0x10))/* burst mode */
+ if (stv0288_writeregI(state, 0x05, 0x10))/* cont carrier */
return -EREMOTEIO;
- return stv0288_writeregI(state, 0x06, 0xff);
+ break;
case SEC_TONE_OFF:
- if (stv0288_writeregI(state, 0x05, 0x13))/* burst mode */
+ if (stv0288_writeregI(state, 0x05, 0x12))/* burst mode off*/
return -EREMOTEIO;
- return stv0288_writeregI(state, 0x06, 0x00);
+ break;
default:
return -EINVAL;
}
+ return 0;
}
static u8 stv0288_inittab[] = {
@@ -486,7 +489,7 @@ static int stv0288_set_frontend(struct dvb_frontend *fe,
tda[2] = 0x0; /* CFRL */
for (tm = -6; tm < 7;) {
/* Viterbi status */
- if (stv0288_readreg(state, 0x24) & 0x80)
+ if (stv0288_readreg(state, 0x24) & 0x8)
break;
tda[2] += 40;
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 968874469726..4e3db3a42e06 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -92,7 +92,7 @@ static int stv0299_writeregI (struct stv0299_state* state, u8 reg, u8 data)
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int stv0299_write(struct dvb_frontend* fe, u8 *buf, int len)
+static int stv0299_write(struct dvb_frontend* fe, const u8 buf[], int len)
{
struct stv0299_state* state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/stv0299.h b/drivers/media/dvb/frontends/stv0299.h
index 0fd96e22b650..ba219b767a69 100644
--- a/drivers/media/dvb/frontends/stv0299.h
+++ b/drivers/media/dvb/frontends/stv0299.h
@@ -65,7 +65,7 @@ struct stv0299_config
* First of each pair is the register, second is the value.
* List should be terminated with an 0xff, 0xff pair.
*/
- u8* inittab;
+ const u8* inittab;
/* master clock to use */
u32 mclk;
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index f2a8abe0a243..ea485d923550 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -598,7 +598,7 @@ static int tda1004x_decode_fec(int tdafec)
return -1;
}
-static int tda1004x_write(struct dvb_frontend* fe, u8 *buf, int len)
+static int tda1004x_write(struct dvb_frontend* fe, const u8 buf[], int len)
{
struct tda1004x_state* state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
index 8c612719adfc..adbbf6d3d044 100644
--- a/drivers/media/dvb/frontends/zl10353.c
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -64,7 +64,7 @@ static int zl10353_single_write(struct dvb_frontend *fe, u8 reg, u8 val)
return 0;
}
-static int zl10353_write(struct dvb_frontend *fe, u8 *ibuf, int ilen)
+static int zl10353_write(struct dvb_frontend *fe, const u8 ibuf[], int ilen)
{
int err, i;
for (i = 0; i < ilen - 1; i++)
diff --git a/drivers/media/dvb/mantis/mantis_core.c b/drivers/media/dvb/mantis/mantis_core.c
index 8113b23ce448..22524a8e6f61 100644
--- a/drivers/media/dvb/mantis/mantis_core.c
+++ b/drivers/media/dvb/mantis/mantis_core.c
@@ -91,10 +91,7 @@ static int get_mac_address(struct mantis_pci *mantis)
return err;
}
dprintk(verbose, MANTIS_ERROR, 0,
- " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
- mantis->mac_address[0], mantis->mac_address[1],
- mantis->mac_address[2], mantis->mac_address[3],
- mantis->mac_address[4], mantis->mac_address[5]);
+ " MAC Address=[%pM]\n", mantis->mac_address);
return 0;
}
diff --git a/drivers/media/dvb/mantis/mantis_i2c.c b/drivers/media/dvb/mantis/mantis_i2c.c
index 7870bcf9689a..e7794517fe26 100644
--- a/drivers/media/dvb/mantis/mantis_i2c.c
+++ b/drivers/media/dvb/mantis/mantis_i2c.c
@@ -229,7 +229,6 @@ int __devinit mantis_i2c_init(struct mantis_pci *mantis)
i2c_set_adapdata(i2c_adapter, mantis);
i2c_adapter->owner = THIS_MODULE;
- i2c_adapter->class = I2C_CLASS_TV_DIGITAL;
i2c_adapter->algo = &mantis_algo;
i2c_adapter->algo_data = NULL;
i2c_adapter->timeout = 500;
diff --git a/drivers/media/dvb/mantis/mantis_ioc.c b/drivers/media/dvb/mantis/mantis_ioc.c
index de148ded52d8..fe31cfb0b158 100644
--- a/drivers/media/dvb/mantis/mantis_ioc.c
+++ b/drivers/media/dvb/mantis/mantis_ioc.c
@@ -68,14 +68,7 @@ int mantis_get_mac(struct mantis_pci *mantis)
return err;
}
- dprintk(MANTIS_ERROR, 0,
- " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
- mac_addr[0],
- mac_addr[1],
- mac_addr[2],
- mac_addr[3],
- mac_addr[4],
- mac_addr[5]);
+ dprintk(MANTIS_ERROR, 0, " MAC Address=[%pM]\n", mac_addr);
return 0;
}
diff --git a/drivers/media/dvb/ngene/ngene-i2c.c b/drivers/media/dvb/ngene/ngene-i2c.c
index 477fe0aade86..c3ae956714e7 100644
--- a/drivers/media/dvb/ngene/ngene-i2c.c
+++ b/drivers/media/dvb/ngene/ngene-i2c.c
@@ -165,7 +165,6 @@ int ngene_i2c_init(struct ngene *dev, int dev_nr)
struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter);
i2c_set_adapdata(adap, &(dev->channel[dev_nr]));
- adap->class = I2C_CLASS_TV_DIGITAL | I2C_CLASS_TV_ANALOG;
strcpy(adap->name, "nGene");
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 1c798219dc7c..6ca6713d527a 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -647,7 +647,6 @@ static int __devinit pluto2_probe(struct pci_dev *pdev,
i2c_set_adapdata(&pluto->i2c_adap, pluto);
strcpy(pluto->i2c_adap.name, DRIVER_NAME);
pluto->i2c_adap.owner = THIS_MODULE;
- pluto->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
pluto->i2c_adap.dev.parent = &pdev->dev;
pluto->i2c_adap.algo_data = &pluto->i2c_bit;
pluto->i2c_bit.data = pluto;
diff --git a/drivers/media/dvb/pt1/pt1.c b/drivers/media/dvb/pt1/pt1.c
index 69ad94934ec2..0486919c1d0f 100644
--- a/drivers/media/dvb/pt1/pt1.c
+++ b/drivers/media/dvb/pt1/pt1.c
@@ -1087,7 +1087,6 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pt1_update_power(pt1);
i2c_adap = &pt1->i2c_adap;
- i2c_adap->class = I2C_CLASS_TV_DIGITAL;
i2c_adap->algo = &pt1_i2c_algo;
i2c_adap->algo_data = NULL;
i2c_adap->dev.parent = &pdev->dev;
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index ff3b0fa901b3..135e45bd00c7 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1504,8 +1504,7 @@ int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
u32 msgData[3]; /* keep it 3 ! */
} *pMsg;
- if ((NewLevel > 1) || (PinNum > MAX_GPIO_PIN_NUMBER) ||
- (PinNum > MAX_GPIO_PIN_NUMBER))
+ if ((NewLevel > 1) || (PinNum > MAX_GPIO_PIN_NUMBER))
return -EINVAL;
totalLen = sizeof(struct SmsMsgHdr_ST) +
diff --git a/drivers/media/dvb/siano/smsir.c b/drivers/media/dvb/siano/smsir.c
index d0e4639ee9db..a27c44a8af5a 100644
--- a/drivers/media/dvb/siano/smsir.c
+++ b/drivers/media/dvb/siano/smsir.c
@@ -40,7 +40,7 @@ void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len)
const s32 *samples = (const void *)buf;
for (i = 0; i < len >> 2; i++) {
- struct ir_raw_event ev;
+ DEFINE_IR_RAW_EVENT(ev);
ev.duration = abs(samples[i]) * 1000; /* Convert to ns */
ev.pulse = (samples[i] > 0) ? false : true;
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index a12b88f53ed9..fc0a60f8a1e1 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -2472,7 +2472,6 @@ static int __devinit av7110_attach(struct saa7146_dev* dev,
get recognized before the main driver is fully loaded */
saa7146_write(dev, GPIO_CTRL, 0x500000);
- av7110->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
strlcpy(av7110->i2c_adap.name, pci_ext->ext_priv, sizeof(av7110->i2c_adap.name));
saa7146_i2c_adapter_prepare(dev, &av7110->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120); /* 275 kHz */
@@ -2886,7 +2885,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension av7110_extension_driver = {
- .name = "dvb",
+ .name = "av7110",
.flags = SAA7146_USE_I2C_IRQ,
.module = THIS_MODULE,
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 244d5d51f5f9..952b33dbac4f 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -245,8 +245,11 @@ int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen)
return -1;
}
while (1) {
- if ((len = dvb_ringbuffer_avail(buf)) < 6)
+ len = dvb_ringbuffer_avail(buf);
+ if (len < 6) {
+ wake_up(&buf->queue);
return -1;
+ }
sync = DVB_RINGBUFFER_PEEK(buf, 0) << 24;
sync |= DVB_RINGBUFFER_PEEK(buf, 1) << 16;
sync |= DVB_RINGBUFFER_PEEK(buf, 2) << 8;
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index 054661315311..37666d4edab6 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -495,8 +495,6 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
if (bi->type != BUDGET_FS_ACTIVY)
saa7146_write(dev, GPIO_CTRL, 0x500000); /* GPIO 3 = 1 */
- budget->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
-
strlcpy(budget->i2c_adap.name, budget->card->name, sizeof(budget->i2c_adap.name));
saa7146_i2c_adapter_prepare(dev, &budget->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120);
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 4a3f2b8ea37d..40625b26ac10 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1694,7 +1694,6 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
i2c_set_adapdata(&ttusb->i2c_adap, ttusb);
- ttusb->i2c_adap.class = I2C_CLASS_TV_DIGITAL;
ttusb->i2c_adap.algo = &ttusb_dec_algo;
ttusb->i2c_adap.algo_data = NULL;
ttusb->i2c_adap.dev.parent = &udev->dev;
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 482d0f3be5ff..b701ea6e7c73 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -374,7 +374,8 @@ static int vidioc_g_tuner(struct file *file, void *priv,
switch (v->index) {
case 0:
strlcpy(v->name, "FM", sizeof(v->name));
- v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS;
+ v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
+ V4L2_TUNER_CAP_RDS_BLOCK_IO;
v->rangelow = 1400; /* 87.5 MHz */
v->rangehigh = 1728; /* 108.0 MHz */
v->rxsubchans = cadet_getstereo(dev);
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 353b82855949..b540e8072e92 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -176,8 +176,6 @@ static int amradio_set_mute(struct amradio_device *radio, char argument)
int retval;
int size;
- BUG_ON(!mutex_is_locked(&radio->lock));
-
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
@@ -207,8 +205,6 @@ static int amradio_setfreq(struct amradio_device *radio, int freq)
int size;
unsigned short freq_send = 0x10 + (freq >> 3) / 25;
- BUG_ON(!mutex_is_locked(&radio->lock));
-
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
@@ -253,8 +249,6 @@ static int amradio_set_stereo(struct amradio_device *radio, char argument)
int retval;
int size;
- BUG_ON(!mutex_is_locked(&radio->lock));
-
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
@@ -290,11 +284,13 @@ static void usb_amradio_disconnect(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- radio->usbdev = NULL;
- mutex_unlock(&radio->lock);
-
+ /* increase the device node's refcount */
+ get_device(&radio->videodev.dev);
v4l2_device_disconnect(&radio->v4l2_dev);
video_unregister_device(&radio->videodev);
+ mutex_unlock(&radio->lock);
+ /* decrease the device node's refcount, allowing it to be released */
+ put_device(&radio->videodev.dev);
}
/* vidioc_querycap - query device capabilities */
@@ -503,28 +499,18 @@ out:
static int usb_amradio_open(struct file *file)
{
struct amradio_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
-
- if (!radio->usbdev) {
- retval = -EIO;
- goto unlock;
- }
+ int retval;
file->private_data = radio;
retval = usb_autopm_get_interface(radio->intf);
if (retval)
- goto unlock;
+ return retval;
if (unlikely(!radio->initialized)) {
retval = usb_amradio_init(radio);
if (retval)
usb_autopm_put_interface(radio->intf);
}
-
-unlock:
- mutex_unlock(&radio->lock);
return retval;
}
@@ -532,37 +518,10 @@ unlock:
static int usb_amradio_close(struct file *file)
{
struct amradio_device *radio = file->private_data;
- int retval = 0;
-
- mutex_lock(&radio->lock);
- if (!radio->usbdev)
- retval = -EIO;
- else
+ if (video_is_registered(&radio->videodev))
usb_autopm_put_interface(radio->intf);
-
- mutex_unlock(&radio->lock);
- return retval;
-}
-
-static long usb_amradio_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct amradio_device *radio = file->private_data;
- long retval = 0;
-
- mutex_lock(&radio->lock);
-
- if (!radio->usbdev) {
- retval = -EIO;
- goto unlock;
- }
-
- retval = video_ioctl2(file, cmd, arg);
-
-unlock:
- mutex_unlock(&radio->lock);
- return retval;
+ return 0;
}
/* Suspend device - stop device. Need to be checked and fixed */
@@ -571,15 +530,13 @@ static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
-
if (!radio->muted && radio->initialized) {
amradio_set_mute(radio, AMRADIO_STOP);
radio->muted = 0;
}
+ mutex_unlock(&radio->lock);
dev_info(&intf->dev, "going into suspend..\n");
-
- mutex_unlock(&radio->lock);
return 0;
}
@@ -589,7 +546,6 @@ static int usb_amradio_resume(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
-
if (unlikely(!radio->initialized))
goto unlock;
@@ -604,9 +560,9 @@ static int usb_amradio_resume(struct usb_interface *intf)
amradio_set_mute(radio, AMRADIO_START);
unlock:
- dev_info(&intf->dev, "coming out of suspend..\n");
-
mutex_unlock(&radio->lock);
+
+ dev_info(&intf->dev, "coming out of suspend..\n");
return 0;
}
@@ -615,7 +571,7 @@ static const struct v4l2_file_operations usb_amradio_fops = {
.owner = THIS_MODULE,
.open = usb_amradio_open,
.release = usb_amradio_close,
- .ioctl = usb_amradio_ioctl,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = {
@@ -671,19 +627,20 @@ static int usb_amradio_probe(struct usb_interface *intf,
goto err_v4l2;
}
+ mutex_init(&radio->lock);
+
strlcpy(radio->videodev.name, radio->v4l2_dev.name,
sizeof(radio->videodev.name));
radio->videodev.v4l2_dev = &radio->v4l2_dev;
radio->videodev.fops = &usb_amradio_fops;
radio->videodev.ioctl_ops = &usb_amradio_ioctl_ops;
radio->videodev.release = usb_amradio_video_device_release;
+ radio->videodev.lock = &radio->lock;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
radio->curfreq = 95.16 * FREQ_MUL;
- mutex_init(&radio->lock);
-
video_set_drvdata(&radio->videodev, radio);
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index 13554ab13f76..6a435786b63d 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -291,19 +291,19 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
goto unregister_v4l2_dev;
}
- sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, "si4713_i2c",
+ sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, NULL,
pdata->subdev_board_info, NULL);
if (!sd) {
dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n");
rval = -ENODEV;
- goto unregister_v4l2_dev;
+ goto put_adapter;
}
rsdev->radio_dev = video_device_alloc();
if (!rsdev->radio_dev) {
dev_err(&pdev->dev, "Failed to alloc video device.\n");
rval = -ENOMEM;
- goto unregister_v4l2_dev;
+ goto put_adapter;
}
memcpy(rsdev->radio_dev, &radio_si4713_vdev_template,
@@ -320,6 +320,8 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
free_vdev:
video_device_release(rsdev->radio_dev);
+put_adapter:
+ i2c_put_adapter(adapter);
unregister_v4l2_dev:
v4l2_device_unregister(&rsdev->v4l2_dev);
free_rsdev:
@@ -335,8 +337,12 @@ static int __exit radio_si4713_pdriver_remove(struct platform_device *pdev)
struct radio_si4713_device *rsdev = container_of(v4l2_dev,
struct radio_si4713_device,
v4l2_dev);
+ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
video_unregister_device(rsdev->radio_dev);
+ i2c_put_adapter(client->adapter);
v4l2_device_unregister(&rsdev->v4l2_dev);
kfree(rsdev);
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 9927a595b426..ac76dfe5b3fa 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -408,17 +408,15 @@ done:
/*
* si470x_rds_on - switch on rds reception
*/
-int si470x_rds_on(struct si470x_device *radio)
+static int si470x_rds_on(struct si470x_device *radio)
{
int retval;
/* sysconfig 1 */
- mutex_lock(&radio->lock);
radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDS;
retval = si470x_set_register(radio, SYSCONFIG1);
if (retval < 0)
radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS;
- mutex_unlock(&radio->lock);
return retval;
}
@@ -440,6 +438,7 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
unsigned int block_count = 0;
/* switch on rds reception */
+ mutex_lock(&radio->lock);
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
si470x_rds_on(radio);
@@ -480,9 +479,9 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
buf += 3;
retval += 3;
}
- mutex_unlock(&radio->lock);
done:
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -497,8 +496,11 @@ static unsigned int si470x_fops_poll(struct file *file,
int retval = 0;
/* switch on rds reception */
+
+ mutex_lock(&radio->lock);
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
si470x_rds_on(radio);
+ mutex_unlock(&radio->lock);
poll_wait(file, &radio->read_queue, pts);
@@ -516,7 +518,7 @@ static const struct v4l2_file_operations si470x_fops = {
.owner = THIS_MODULE,
.read = si470x_fops_read,
.poll = si470x_fops_poll,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.open = si470x_fops_open,
.release = si470x_fops_release,
};
@@ -572,6 +574,7 @@ static int si470x_vidioc_g_ctrl(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -594,6 +597,8 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"get control failed with %d\n", retval);
+
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -607,6 +612,7 @@ static int si470x_vidioc_s_ctrl(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -633,6 +639,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set control failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -662,6 +669,7 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -681,7 +689,7 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
tuner->type = V4L2_TUNER_RADIO;
#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
- V4L2_TUNER_CAP_RDS;
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
#else
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
#endif
@@ -737,6 +745,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"get tuner failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -750,6 +759,7 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -776,6 +786,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set tuner failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -790,6 +801,7 @@ static int si470x_vidioc_g_frequency(struct file *file, void *priv,
int retval = 0;
/* safety checks */
+ mutex_lock(&radio->lock);
retval = si470x_disconnect_check(radio);
if (retval)
goto done;
@@ -806,6 +818,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"get frequency failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -819,6 +832,7 @@ static int si470x_vidioc_s_frequency(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -835,6 +849,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set frequency failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -848,6 +863,7 @@ static int si470x_vidioc_s_hw_freq_seek(struct file *file, void *priv,
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
+ mutex_lock(&radio->lock);
/* safety checks */
retval = si470x_disconnect_check(radio);
if (retval)
@@ -864,6 +880,7 @@ done:
if (retval < 0)
dev_warn(&radio->videodev->dev,
"set hardware frequency seek failed with %d\n", retval);
+ mutex_unlock(&radio->lock);
return retval;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 5ec13e50a9f0..392e84fe90ef 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -517,7 +517,7 @@ int si470x_fops_open(struct file *file)
struct si470x_device *radio = video_drvdata(file);
int retval;
- lock_kernel();
+ mutex_lock(&radio->lock);
radio->users++;
retval = usb_autopm_get_interface(radio->intf);
@@ -558,7 +558,7 @@ int si470x_fops_open(struct file *file)
}
done:
- unlock_kernel();
+ mutex_unlock(&radio->lock);
return retval;
}
@@ -577,7 +577,7 @@ int si470x_fops_release(struct file *file)
goto done;
}
- mutex_lock(&radio->disconnect_lock);
+ mutex_lock(&radio->lock);
radio->users--;
if (radio->users == 0) {
/* shutdown interrupt handler */
@@ -591,7 +591,7 @@ int si470x_fops_release(struct file *file)
video_unregister_device(radio->videodev);
kfree(radio->int_in_buffer);
kfree(radio->buffer);
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
kfree(radio);
goto done;
}
@@ -603,7 +603,7 @@ int si470x_fops_release(struct file *file)
retval = si470x_stop(radio);
usb_autopm_put_interface(radio->intf);
}
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
done:
return retval;
}
@@ -661,7 +661,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->disconnected = 0;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
- mutex_init(&radio->disconnect_lock);
mutex_init(&radio->lock);
iface_desc = intf->cur_altsetting;
@@ -830,7 +829,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
{
struct si470x_device *radio = usb_get_intfdata(intf);
- mutex_lock(&radio->disconnect_lock);
+ mutex_lock(&radio->lock);
radio->disconnected = 1;
usb_set_intfdata(intf, NULL);
if (radio->users == 0) {
@@ -843,10 +842,10 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
kfree(radio->int_in_buffer);
video_unregister_device(radio->videodev);
kfree(radio->buffer);
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
kfree(radio);
} else {
- mutex_unlock(&radio->disconnect_lock);
+ mutex_unlock(&radio->lock);
}
}
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 3cd0a29cd6e7..ea12782359a0 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -177,7 +177,6 @@ struct si470x_device {
/* driver management */
unsigned char disconnected;
- struct mutex disconnect_lock;
#endif
#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
@@ -221,7 +220,6 @@ int si470x_disconnect_check(struct si470x_device *radio);
int si470x_set_freq(struct si470x_device *radio, unsigned int freq);
int si470x_start(struct si470x_device *radio);
int si470x_stop(struct si470x_device *radio);
-int si470x_rds_on(struct si470x_device *radio);
int si470x_fops_open(struct file *file);
int si470x_fops_release(struct file *file);
int si470x_vidioc_querycap(struct file *file, void *priv,
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index fc7f4b794649..a6e6f1987a3a 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1804,7 +1804,7 @@ static int si4713_g_modulator(struct v4l2_subdev *sd, struct v4l2_modulator *vm)
strncpy(vm->name, "FM Modulator", 32);
vm->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW |
- V4L2_TUNER_CAP_RDS;
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_CONTROLS;
/* Report current frequency range limits */
vm->rangelow = si4713_to_v4l2(FREQ_RANGE_LOW);
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 90cae90277e7..7c0d77751f6e 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index d000522cb0f4..ac16e815e275 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -539,7 +539,7 @@ config VIDEO_VIU
config VIDEO_VIVI
tristate "Virtual Video Driver"
depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
- depends on (FRAMEBUFFER_CONSOLE || STI_CONSOLE) && FONTS
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
select FONT_8x16
select VIDEOBUF_VMALLOC
default n
@@ -599,68 +599,8 @@ config VIDEO_W9966
Check out <file:Documentation/video4linux/w9966.txt> for more
information.
-config VIDEO_CPIA
- tristate "CPiA Video For Linux (DEPRECATED)"
- depends on VIDEO_V4L1
- default n
- ---help---
- This driver is DEPRECATED please use the gspca cpia1 module
- instead. Note that you need atleast version 0.6.4 of libv4l for
- the cpia1 gspca module.
-
- This is the video4linux driver for cameras based on Vision's CPiA
- (Colour Processor Interface ASIC), such as the Creative Labs Video
- Blaster Webcam II. If you have one of these cameras, say Y here
- and select parallel port and/or USB lowlevel support below,
- otherwise say N. This will not work with the Creative Webcam III.
-
- Please read <file:Documentation/video4linux/README.cpia> for more
- information.
-
- This driver is also available as a module (cpia).
-
-config VIDEO_CPIA_PP
- tristate "CPiA Parallel Port Lowlevel Support"
- depends on PARPORT_1284 && VIDEO_CPIA && PARPORT
- help
- This is the lowlevel parallel port support for cameras based on
- Vision's CPiA (Colour Processor Interface ASIC), such as the
- Creative Webcam II. If you have the parallel port version of one
- of these cameras, say Y here, otherwise say N. It is also available
- as a module (cpia_pp).
-
-config VIDEO_CPIA_USB
- tristate "CPiA USB Lowlevel Support"
- depends on VIDEO_CPIA && USB
- help
- This is the lowlevel USB support for cameras based on Vision's CPiA
- (Colour Processor Interface ASIC), such as the Creative Webcam II.
- If you have the USB version of one of these cameras, say Y here,
- otherwise say N. This will not work with the Creative Webcam III.
- It is also available as a module (cpia_usb).
-
source "drivers/media/video/cpia2/Kconfig"
-config VIDEO_SAA5246A
- tristate "SAA5246A, SAA5281 Teletext processor"
- depends on I2C && VIDEO_V4L2
- help
- Support for I2C bus based teletext using the SAA5246A or SAA5281
- chip. Useful only if you live in Europe.
-
- To compile this driver as a module, choose M here: the
- module will be called saa5246a.
-
-config VIDEO_SAA5249
- tristate "SAA5249 Teletext processor"
- depends on I2C && VIDEO_V4L2
- help
- Support for I2C bus based teletext using the SAA5249 chip. At the
- moment this is only useful on some European WinTV cards.
-
- To compile this driver as a module, choose M here: the
- module will be called saa5249.
-
config VIDEO_VINO
tristate "SGI Vino Video For Linux (EXPERIMENTAL)"
depends on I2C && SGI_IP22 && EXPERIMENTAL && VIDEO_V4L2
@@ -669,14 +609,6 @@ config VIDEO_VINO
Say Y here to build in support for the Vino video input system found
on SGI Indy machines.
-config VIDEO_STRADIS
- tristate "Stradis 4:2:2 MPEG-2 video driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS
- help
- Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video
- driver for PCI. There is a product page at
- <http://www.stradis.com/>.
-
source "drivers/media/video/zoran/Kconfig"
config VIDEO_MEYE
@@ -774,6 +706,22 @@ config VIDEO_CAFE_CCIC
CMOS camera controller. This is the controller found on first-
generation OLPC systems.
+config VIDEO_SR030PC30
+ tristate "SR030PC30 VGA camera sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This driver supports SR030PC30 VGA camera from Siliconfile
+
+config VIDEO_VIA_CAMERA
+ tristate "VIAFB camera controller support"
+ depends on FB_VIA
+ select VIDEOBUF_DMA_SG
+ select VIDEO_OV7670
+ help
+ Driver support for the integrated camera controller in VIA
+ Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
+ with ov7670 sensors.
+
config SOC_CAMERA
tristate "SoC camera support"
depends on VIDEO_V4L2 && HAS_DMA && I2C
@@ -783,6 +731,12 @@ config SOC_CAMERA
over a bus like PCI or USB. For example some i2c camera connected
directly to the data bus of an SoC.
+config SOC_CAMERA_IMX074
+ tristate "imx074 support"
+ depends on SOC_CAMERA && I2C
+ help
+ This driver supports IMX074 cameras from Sony
+
config SOC_CAMERA_MT9M001
tristate "mt9m001 support"
depends on SOC_CAMERA && I2C
@@ -835,6 +789,12 @@ config SOC_CAMERA_PLATFORM
help
This is a generic SoC camera platform driver, useful for testing
+config SOC_CAMERA_OV6650
+ tristate "ov6650 sensor support"
+ depends on SOC_CAMERA && I2C
+ ---help---
+ This is a V4L2 SoC camera driver for the OmniVision OV6650 sensor
+
config SOC_CAMERA_OV772X
tristate "ov772x camera support"
depends on SOC_CAMERA && I2C
@@ -890,6 +850,14 @@ config VIDEO_SH_MOBILE_CEU
---help---
This is a v4l2 driver for the SuperH Mobile CEU Interface
+config VIDEO_OMAP1
+ tristate "OMAP1 Camera Interface driver"
+ depends on VIDEO_DEV && ARCH_OMAP1 && SOC_CAMERA
+ select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF_DMA_SG
+ ---help---
+ This is a v4l2 driver for the TI OMAP1 camera interface
+
config VIDEO_OMAP2
tristate "OMAP2 Camera Capture Interface driver"
depends on VIDEO_DEV && ARCH_OMAP2
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 40f98fba5f88..af79d476a4c8 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -33,8 +33,6 @@ obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
-obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
-obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o
obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o
@@ -73,12 +71,15 @@ obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
+obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
+obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
+obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o
obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
obj-$(CONFIG_SOC_CAMERA_RJ54N1) += rj54n1cb0c.o
@@ -93,10 +94,6 @@ obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
obj-$(CONFIG_VIDEO_W9966) += w9966.o
obj-$(CONFIG_VIDEO_PMS) += pms.o
obj-$(CONFIG_VIDEO_VINO) += vino.o
-obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
-obj-$(CONFIG_VIDEO_CPIA) += cpia.o
-obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
-obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o
obj-$(CONFIG_VIDEO_MEYE) += meye.o
obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
obj-$(CONFIG_VIDEO_CX88) += cx88/
@@ -125,6 +122,8 @@ obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
+
obj-$(CONFIG_USB_DABUSB) += dabusb.o
obj-$(CONFIG_USB_SE401) += se401.o
obj-$(CONFIG_USB_ZR364XX) += zr364xx.o
@@ -163,6 +162,7 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
+obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 48e89fbf391b..23ba5c37c3e4 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -34,11 +34,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver");
MODULE_AUTHOR("Maxim Yevtyushkin");
@@ -337,9 +335,25 @@ static const struct i2c_device_id adv7170_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adv7170_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "adv7170",
- .probe = adv7170_probe,
- .remove = adv7170_remove,
- .id_table = adv7170_id,
+static struct i2c_driver adv7170_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7170",
+ },
+ .probe = adv7170_probe,
+ .remove = adv7170_remove,
+ .id_table = adv7170_id,
};
+
+static __init int init_adv7170(void)
+{
+ return i2c_add_driver(&adv7170_driver);
+}
+
+static __exit void exit_adv7170(void)
+{
+ i2c_del_driver(&adv7170_driver);
+}
+
+module_init(init_adv7170);
+module_exit(exit_adv7170);
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f1ba0d742c65..f318b51448b3 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -30,11 +30,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Analog Devices ADV7175 video encoder driver");
MODULE_AUTHOR("Dave Perks");
@@ -376,9 +374,25 @@ static const struct i2c_device_id adv7175_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adv7175_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "adv7175",
- .probe = adv7175_probe,
- .remove = adv7175_remove,
- .id_table = adv7175_id,
+static struct i2c_driver adv7175_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7175",
+ },
+ .probe = adv7175_probe,
+ .remove = adv7175_remove,
+ .id_table = adv7175_id,
};
+
+static __init int init_adv7175(void)
+{
+ return i2c_add_driver(&adv7175_driver);
+}
+
+static __exit void exit_adv7175(void)
+{
+ i2c_del_driver(&adv7175_driver);
+}
+
+module_init(init_adv7175);
+module_exit(exit_adv7175);
diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c
index 23e610f62736..d2138d06bcad 100644
--- a/drivers/media/video/adv7180.c
+++ b/drivers/media/video/adv7180.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 57dd9195daf5..0453816d4ec3 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -212,7 +212,7 @@ void au0828_card_setup(struct au0828_dev *dev)
be abstracted out if we ever need to support a different
demod) */
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "au8522", "au8522", 0x8e >> 1, NULL);
+ NULL, "au8522", 0x8e >> 1, NULL);
if (sd == NULL)
printk(KERN_ERR "analog subdev registration failed\n");
}
@@ -221,7 +221,7 @@ void au0828_card_setup(struct au0828_dev *dev)
if (dev->board.tuner_type != TUNER_ABSENT) {
/* Load the tuner module, which does the attach */
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tuner", "tuner", dev->board.tuner_addr, NULL);
+ NULL, "tuner", dev->board.tuner_addr, NULL);
if (sd == NULL)
printk(KERN_ERR "tuner subdev registration fail\n");
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 7989a7ba7c40..162fd5f9d448 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -965,7 +965,7 @@ static int au0828_v4l2_open(struct file *filp)
NULL, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
- sizeof(struct au0828_buffer), fh);
+ sizeof(struct au0828_buffer), fh, NULL);
/* VBI Setup */
dev->vbi_width = 720;
@@ -974,7 +974,7 @@ static int au0828_v4l2_open(struct file *filp)
NULL, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
- sizeof(struct au0828_buffer), fh);
+ sizeof(struct au0828_buffer), fh, NULL);
return ret;
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index 770cb9accf81..c38300fc0b1d 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -33,12 +33,10 @@
#include <linux/ioctl.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/bt819.h>
MODULE_DESCRIPTION("Brooktree-819 video decoder driver");
@@ -537,9 +535,25 @@ static const struct i2c_device_id bt819_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bt819_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "bt819",
- .probe = bt819_probe,
- .remove = bt819_remove,
- .id_table = bt819_id,
+static struct i2c_driver bt819_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bt819",
+ },
+ .probe = bt819_probe,
+ .remove = bt819_remove,
+ .id_table = bt819_id,
};
+
+static __init int init_bt819(void)
+{
+ return i2c_add_driver(&bt819_driver);
+}
+
+static __exit void exit_bt819(void)
+{
+ i2c_del_driver(&bt819_driver);
+}
+
+module_init(init_bt819);
+module_exit(exit_bt819);
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index ae3337392505..a43059d4c799 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -34,11 +34,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Brooktree-856A video encoder driver");
MODULE_AUTHOR("Mike Bernson & Dave Perks");
@@ -262,9 +260,25 @@ static const struct i2c_device_id bt856_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bt856_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "bt856",
- .probe = bt856_probe,
- .remove = bt856_remove,
- .id_table = bt856_id,
+static struct i2c_driver bt856_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bt856",
+ },
+ .probe = bt856_probe,
+ .remove = bt856_remove,
+ .id_table = bt856_id,
};
+
+static __init int init_bt856(void)
+{
+ return i2c_add_driver(&bt856_driver);
+}
+
+static __exit void exit_bt856(void)
+{
+ i2c_del_driver(&bt856_driver);
+}
+
+module_init(init_bt856);
+module_exit(exit_bt856);
diff --git a/drivers/media/video/bt866.c b/drivers/media/video/bt866.c
index 62ac422bb159..4e5dcea0501d 100644
--- a/drivers/media/video/bt866.c
+++ b/drivers/media/video/bt866.c
@@ -34,11 +34,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Brooktree-866 video encoder driver");
MODULE_AUTHOR("Mike Bernson & Dave Perks");
@@ -232,9 +230,25 @@ static const struct i2c_device_id bt866_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bt866_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "bt866",
- .probe = bt866_probe,
- .remove = bt866_remove,
- .id_table = bt866_id,
+static struct i2c_driver bt866_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "bt866",
+ },
+ .probe = bt866_probe,
+ .remove = bt866_remove,
+ .id_table = bt866_id,
};
+
+static __init int init_bt866(void)
+{
+ return i2c_add_driver(&bt866_driver);
+}
+
+static __exit void exit_bt866(void)
+{
+ i2c_del_driver(&bt866_driver);
+}
+
+module_init(init_bt866);
+module_exit(exit_bt866);
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 7af56cde0c79..87d8b006ef77 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -3529,7 +3529,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "saa6588", "saa6588", 0, addrs);
+ &btv->c.i2c_adap, NULL, "saa6588", 0, addrs);
btv->has_saa6588 = (sd != NULL);
}
@@ -3554,7 +3554,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
};
btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "msp3400", "msp3400", 0, addrs);
+ &btv->c.i2c_adap, NULL, "msp3400", 0, addrs);
if (btv->sd_msp34xx)
return;
goto no_audio;
@@ -3568,7 +3568,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
};
if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tda7432", "tda7432", 0, addrs))
+ &btv->c.i2c_adap, NULL, "tda7432", 0, addrs))
return;
goto no_audio;
}
@@ -3576,7 +3576,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
case 3: {
/* The user specified that we should probe for tvaudio */
btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tvaudio", "tvaudio", 0, tvaudio_addrs());
+ &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs());
if (btv->sd_tvaudio)
return;
goto no_audio;
@@ -3596,11 +3596,11 @@ void __devinit bttv_init_card2(struct bttv *btv)
found is really something else (e.g. a tea6300). */
if (!bttv_tvcards[btv->c.type].no_msp34xx) {
btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "msp3400", "msp3400",
+ &btv->c.i2c_adap, NULL, "msp3400",
0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1));
} else if (bttv_tvcards[btv->c.type].msp34xx_alt) {
btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "msp3400", "msp3400",
+ &btv->c.i2c_adap, NULL, "msp3400",
0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1));
}
@@ -3616,13 +3616,13 @@ void __devinit bttv_init_card2(struct bttv *btv)
};
if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tda7432", "tda7432", 0, addrs))
+ &btv->c.i2c_adap, NULL, "tda7432", 0, addrs))
return;
}
/* Now see if we can find one of the tvaudio devices. */
btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tvaudio", "tvaudio", 0, tvaudio_addrs());
+ &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs());
if (btv->sd_tvaudio)
return;
@@ -3646,13 +3646,13 @@ void __devinit bttv_init_tuner(struct bttv *btv)
/* Load tuner module before issuing tuner config call! */
if (bttv_tvcards[btv->c.type].has_radio)
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tuner", "tuner",
+ &btv->c.i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tuner", "tuner",
+ &btv->c.i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
- &btv->c.i2c_adap, "tuner", "tuner",
+ &btv->c.i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD));
tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 38c7f78ad9cf..3da6e80e1041 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -842,7 +842,7 @@ static const struct v4l2_queryctrl *ctrl_by_id(int id)
RESOURCE_OVERLAY)
static
-int check_alloc_btres(struct bttv *btv, struct bttv_fh *fh, int bit)
+int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit)
{
int xbits; /* mutual exclusive resources */
@@ -935,7 +935,7 @@ disclaim_video_lines(struct bttv *btv)
}
static
-void free_btres(struct bttv *btv, struct bttv_fh *fh, int bits)
+void free_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bits)
{
if ((fh->resources & bits) != bits) {
/* trying to free ressources not allocated by us ... */
@@ -1682,7 +1682,7 @@ bttv_switch_overlay(struct bttv *btv, struct bttv_fh *fh,
kfree(old);
}
if (NULL == new)
- free_btres(btv,fh,RESOURCE_OVERLAY);
+ free_btres_lock(btv,fh,RESOURCE_OVERLAY);
dprintk("switch_overlay: done\n");
return retval;
}
@@ -1859,21 +1859,25 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id *id)
unsigned int i;
int err;
+ mutex_lock(&btv->lock);
err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
+ if (err)
+ goto err;
for (i = 0; i < BTTV_TVNORMS; i++)
if (*id & bttv_tvnorms[i].v4l2_id)
break;
- if (i == BTTV_TVNORMS)
- return -EINVAL;
+ if (i == BTTV_TVNORMS) {
+ err = -EINVAL;
+ goto err;
+ }
- mutex_lock(&btv->lock);
set_tvnorm(btv, i);
+
+err:
mutex_unlock(&btv->lock);
- return 0;
+ return err;
}
static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id)
@@ -1893,10 +1897,13 @@ static int bttv_enum_input(struct file *file, void *priv,
{
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
- int n;
+ int rc = 0;
- if (i->index >= bttv_tvcards[btv->c.type].video_inputs)
- return -EINVAL;
+ mutex_lock(&btv->lock);
+ if (i->index >= bttv_tvcards[btv->c.type].video_inputs) {
+ rc = -EINVAL;
+ goto err;
+ }
i->type = V4L2_INPUT_TYPE_CAMERA;
i->audioset = 1;
@@ -1919,10 +1926,12 @@ static int bttv_enum_input(struct file *file, void *priv,
i->status |= V4L2_IN_ST_NO_H_LOCK;
}
- for (n = 0; n < BTTV_TVNORMS; n++)
- i->std |= bttv_tvnorms[n].v4l2_id;
+ i->std = BTTV_NORMS;
- return 0;
+err:
+ mutex_unlock(&btv->lock);
+
+ return rc;
}
static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
@@ -1930,7 +1939,10 @@ static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
*i = btv->input;
+ mutex_unlock(&btv->lock);
+
return 0;
}
@@ -1941,15 +1953,19 @@ static int bttv_s_input(struct file *file, void *priv, unsigned int i)
int err;
+ mutex_lock(&btv->lock);
err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
+ if (unlikely(err))
+ goto err;
- if (i > bttv_tvcards[btv->c.type].video_inputs)
- return -EINVAL;
+ if (i > bttv_tvcards[btv->c.type].video_inputs) {
+ err = -EINVAL;
+ goto err;
+ }
- mutex_lock(&btv->lock);
set_input(btv, i, btv->tvnorm);
+
+err:
mutex_unlock(&btv->lock);
return 0;
}
@@ -1961,22 +1977,25 @@ static int bttv_s_tuner(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
-
- if (btv->tuner_type == TUNER_ABSENT)
- return -EINVAL;
-
- if (0 != t->index)
+ if (unlikely(0 != t->index))
return -EINVAL;
mutex_lock(&btv->lock);
+ if (unlikely(btv->tuner_type == TUNER_ABSENT)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = v4l2_prio_check(&btv->prio, fh->prio);
+ if (unlikely(err))
+ goto err;
+
bttv_call_all(btv, tuner, s_tuner, t);
if (btv->audio_mode_gpio)
btv->audio_mode_gpio(btv, t, 1);
+err:
mutex_unlock(&btv->lock);
return 0;
@@ -1988,8 +2007,10 @@ static int bttv_g_frequency(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = btv->freq;
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -2001,21 +2022,26 @@ static int bttv_s_frequency(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != err)
- return err;
-
if (unlikely(f->tuner != 0))
return -EINVAL;
- if (unlikely(f->type != (btv->radio_user
- ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV)))
- return -EINVAL;
+
mutex_lock(&btv->lock);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
+ if (unlikely(err))
+ goto err;
+
+ if (unlikely(f->type != (btv->radio_user
+ ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV))) {
+ err = -EINVAL;
+ goto err;
+ }
btv->freq = f->frequency;
bttv_call_all(btv, tuner, s_frequency, f);
if (btv->has_matchbox && btv->radio_user)
tea5757_set_freq(btv, btv->freq);
+err:
mutex_unlock(&btv->lock);
+
return 0;
}
@@ -2124,7 +2150,7 @@ bttv_crop_adjust (struct bttv_crop * c,
also adjust the current cropping parameters to get closer to the
desired image size. */
static int
-limit_scaled_size (struct bttv_fh * fh,
+limit_scaled_size_lock (struct bttv_fh * fh,
__s32 * width,
__s32 * height,
enum v4l2_field field,
@@ -2238,7 +2264,7 @@ limit_scaled_size (struct bttv_fh * fh,
may also adjust the current cropping parameters to get closer
to the desired window size. */
static int
-verify_window (struct bttv_fh * fh,
+verify_window_lock (struct bttv_fh * fh,
struct v4l2_window * win,
int adjust_size,
int adjust_crop)
@@ -2257,7 +2283,9 @@ verify_window (struct bttv_fh * fh,
if (V4L2_FIELD_ANY == field) {
__s32 height2;
+ mutex_lock(&fh->btv->lock);
height2 = fh->btv->crop[!!fh->do_crop].rect.height >> 1;
+ mutex_unlock(&fh->btv->lock);
field = (win->w.height > height2)
? V4L2_FIELD_INTERLACED
: V4L2_FIELD_TOP;
@@ -2292,7 +2320,7 @@ verify_window (struct bttv_fh * fh,
win->w.width -= win->w.left & ~width_mask;
win->w.left = (win->w.left - width_mask - 1) & width_mask;
- rc = limit_scaled_size(fh, &win->w.width, &win->w.height,
+ rc = limit_scaled_size_lock(fh, &win->w.width, &win->w.height,
field, width_mask,
/* width_bias: round down */ 0,
adjust_size, adjust_crop);
@@ -2303,7 +2331,7 @@ verify_window (struct bttv_fh * fh,
return 0;
}
-static int setup_window(struct bttv_fh *fh, struct bttv *btv,
+static int setup_window_lock(struct bttv_fh *fh, struct bttv *btv,
struct v4l2_window *win, int fixup)
{
struct v4l2_clip *clips = NULL;
@@ -2313,7 +2341,7 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
return -EINVAL;
if (!(fh->ovfmt->flags & FORMAT_FLAGS_PACKED))
return -EINVAL;
- retval = verify_window(fh, win,
+ retval = verify_window_lock(fh, win,
/* adjust_size */ fixup,
/* adjust_crop */ fixup);
if (0 != retval)
@@ -2332,6 +2360,8 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
return -EFAULT;
}
}
+
+ mutex_lock(&fh->cap.vb_lock);
/* clip against screen */
if (NULL != btv->fbuf.base)
n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height,
@@ -2354,7 +2384,6 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
BUG();
}
- mutex_lock(&fh->cap.vb_lock);
kfree(fh->ov.clips);
fh->ov.clips = clips;
fh->ov.nclips = n;
@@ -2362,6 +2391,14 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
fh->ov.w = win->w;
fh->ov.field = win->field;
fh->ov.setup_ok = 1;
+
+ /*
+ * FIXME: btv is protected by btv->lock mutex, while btv->init
+ * is protected by fh->cap.vb_lock. This seems to open the
+ * possibility for some race situations. Maybe the better would
+ * be to unify those locks or to use another way to store the
+ * init values that will be consumed by videobuf callbacks
+ */
btv->init.ov.w.width = win->w.width;
btv->init.ov.w.height = win->w.height;
btv->init.ov.field = win->field;
@@ -2490,7 +2527,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
if (V4L2_FIELD_ANY == field) {
__s32 height2;
+ mutex_lock(&btv->lock);
height2 = btv->crop[!!fh->do_crop].rect.height >> 1;
+ mutex_unlock(&btv->lock);
field = (f->fmt.pix.height > height2)
? V4L2_FIELD_INTERLACED
: V4L2_FIELD_BOTTOM;
@@ -2516,7 +2555,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
- rc = limit_scaled_size(fh, &width, &height, field,
+ rc = limit_scaled_size_lock(fh, &width, &height, field,
/* width_mask: 4 pixels */ ~3,
/* width_bias: nearest */ 2,
/* adjust_size */ 1,
@@ -2536,7 +2575,7 @@ static int bttv_try_fmt_vid_overlay(struct file *file, void *priv,
{
struct bttv_fh *fh = priv;
- return verify_window(fh, &f->fmt.win,
+ return verify_window_lock(fh, &f->fmt.win,
/* adjust_size */ 1,
/* adjust_crop */ 0);
}
@@ -2563,7 +2602,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
- retval = limit_scaled_size(fh, &width, &height, f->fmt.pix.field,
+ retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
/* width_mask: 4 pixels */ ~3,
/* width_bias: nearest */ 2,
/* adjust_size */ 1,
@@ -2601,7 +2640,7 @@ static int bttv_s_fmt_vid_overlay(struct file *file, void *priv,
return -EINVAL;
}
- return setup_window(fh, btv, &f->fmt.win, 1);
+ return setup_window_lock(fh, btv, &f->fmt.win, 1);
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
@@ -2651,11 +2690,15 @@ static int bttv_querycap(struct file *file, void *priv,
V4L2_CAP_VBI_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
- if (btv->has_saa6588)
- cap->capabilities |= V4L2_CAP_RDS_CAPTURE;
if (no_overlay <= 0)
cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY;
+ /*
+ * No need to lock here: those vars are initialized during board
+ * probe and remains untouched during the rest of the driver lifecycle
+ */
+ if (btv->has_saa6588)
+ cap->capabilities |= V4L2_CAP_RDS_CAPTURE;
if (btv->tuner_type != TUNER_ABSENT)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
@@ -2730,19 +2773,25 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on)
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
struct bttv_buffer *new;
- int retval;
+ int retval = 0;
if (on) {
+ mutex_lock(&fh->cap.vb_lock);
/* verify args */
- if (NULL == btv->fbuf.base)
+ if (unlikely(!btv->fbuf.base)) {
+ mutex_unlock(&fh->cap.vb_lock);
return -EINVAL;
- if (!fh->ov.setup_ok) {
+ }
+ if (unlikely(!fh->ov.setup_ok)) {
dprintk("bttv%d: overlay: !setup_ok\n", btv->c.nr);
- return -EINVAL;
+ retval = -EINVAL;
}
+ if (retval)
+ return retval;
+ mutex_unlock(&fh->cap.vb_lock);
}
- if (!check_alloc_btres(btv, fh, RESOURCE_OVERLAY))
+ if (!check_alloc_btres_lock(btv, fh, RESOURCE_OVERLAY))
return -EBUSY;
mutex_lock(&fh->cap.vb_lock);
@@ -2785,7 +2834,7 @@ static int bttv_s_fbuf(struct file *file, void *f,
__s32 width = fb->fmt.width;
__s32 height = fb->fmt.height;
- retval = limit_scaled_size(fh, &width, &height,
+ retval = limit_scaled_size_lock(fh, &width, &height,
V4L2_FIELD_INTERLACED,
/* width_mask */ ~3,
/* width_bias */ 2,
@@ -2852,7 +2901,7 @@ static int bttv_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
struct bttv *btv = fh->btv;
int res = bttv_resource(fh);
- if (!check_alloc_btres(btv, fh, res))
+ if (!check_alloc_btres_lock(btv, fh, res))
return -EBUSY;
return videobuf_qbuf(bttv_queue(fh), b);
@@ -2872,7 +2921,7 @@ static int bttv_streamon(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int res = bttv_resource(fh);
- if (!check_alloc_btres(btv, fh, res))
+ if (!check_alloc_btres_lock(btv, fh, res))
return -EBUSY;
return videobuf_streamon(bttv_queue(fh));
}
@@ -2890,7 +2939,7 @@ static int bttv_streamoff(struct file *file, void *priv,
retval = videobuf_streamoff(bttv_queue(fh));
if (retval < 0)
return retval;
- free_btres(btv, fh, res);
+ free_btres_lock(btv, fh, res);
return 0;
}
@@ -2907,6 +2956,7 @@ static int bttv_queryctrl(struct file *file, void *priv,
c->id >= V4L2_CID_PRIVATE_LASTP1))
return -EINVAL;
+ mutex_lock(&btv->lock);
if (!btv->volume_gpio && (c->id == V4L2_CID_AUDIO_VOLUME))
*c = no_ctl;
else {
@@ -2914,6 +2964,7 @@ static int bttv_queryctrl(struct file *file, void *priv,
*c = (NULL != ctrl) ? *ctrl : no_ctl;
}
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -2924,8 +2975,11 @@ static int bttv_g_parm(struct file *file, void *f,
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
v4l2_video_std_frame_period(bttv_tvnorms[btv->tvnorm].v4l2_id,
&parm->parm.capture.timeperframe);
+ mutex_unlock(&btv->lock);
+
return 0;
}
@@ -2961,7 +3015,9 @@ static int bttv_g_priority(struct file *file, void *f, enum v4l2_priority *p)
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
*p = v4l2_prio_max(&btv->prio);
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -2971,8 +3027,13 @@ static int bttv_s_priority(struct file *file, void *f,
{
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
+ int rc;
- return v4l2_prio_change(&btv->prio, &fh->prio, prio);
+ mutex_lock(&btv->lock);
+ rc = v4l2_prio_change(&btv->prio, &fh->prio, prio);
+ mutex_unlock(&btv->lock);
+
+ return rc;
}
static int bttv_cropcap(struct file *file, void *priv,
@@ -2985,7 +3046,9 @@ static int bttv_cropcap(struct file *file, void *priv,
cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
+ mutex_lock(&btv->lock);
*cap = bttv_tvnorms[btv->tvnorm].cropcap;
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3003,7 +3066,9 @@ static int bttv_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
inconsistent with fh->width or fh->height and apps
do not expect a change here. */
+ mutex_lock(&btv->lock);
crop->c = btv->crop[!!fh->do_crop].rect;
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3024,14 +3089,15 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop)
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- retval = v4l2_prio_check(&btv->prio, fh->prio);
- if (0 != retval)
- return retval;
-
/* Make sure tvnorm, vbi_end and the current cropping
parameters remain consistent until we're done. Note
- read() may change vbi_end in check_alloc_btres(). */
+ read() may change vbi_end in check_alloc_btres_lock(). */
mutex_lock(&btv->lock);
+ retval = v4l2_prio_check(&btv->prio, fh->prio);
+ if (0 != retval) {
+ mutex_unlock(&btv->lock);
+ return retval;
+ }
retval = -EBUSY;
@@ -3128,17 +3194,17 @@ static ssize_t bttv_read(struct file *file, char __user *data,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (!check_alloc_btres(fh->btv, fh, RESOURCE_VIDEO_READ)) {
+ if (!check_alloc_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ)) {
/* VIDEO_READ in use by another fh,
or VIDEO_STREAM by any fh. */
return -EBUSY;
}
retval = videobuf_read_one(&fh->cap, data, count, ppos,
file->f_flags & O_NONBLOCK);
- free_btres(fh->btv, fh, RESOURCE_VIDEO_READ);
+ free_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ);
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (!check_alloc_btres(fh->btv,fh,RESOURCE_VBI))
+ if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI))
return -EBUSY;
retval = videobuf_read_stream(&fh->vbi, data, count, ppos, 1,
file->f_flags & O_NONBLOCK);
@@ -3157,20 +3223,19 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
unsigned int rc = POLLERR;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
- if (!check_alloc_btres(fh->btv,fh,RESOURCE_VBI))
+ if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI))
return POLLERR;
return videobuf_poll_stream(file, &fh->vbi, wait);
}
+ mutex_lock(&fh->cap.vb_lock);
if (check_btres(fh,RESOURCE_VIDEO_STREAM)) {
- mutex_lock(&fh->cap.vb_lock);
/* streaming capture */
if (list_empty(&fh->cap.stream))
goto err;
buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
} else {
/* read() capture */
- mutex_lock(&fh->cap.vb_lock);
if (NULL == fh->cap.read_buf) {
/* need to capture a new frame */
if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM))
@@ -3188,7 +3253,6 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
fh->cap.read_off = 0;
}
- mutex_unlock(&fh->cap.vb_lock);
buf = (struct bttv_buffer*)fh->cap.read_buf;
}
@@ -3221,21 +3285,32 @@ static int bttv_open(struct file *file)
return -ENODEV;
}
- lock_kernel();
-
dprintk(KERN_DEBUG "bttv%d: open called (type=%s)\n",
btv->c.nr,v4l2_type_names[type]);
/* allocate per filehandle data */
- fh = kmalloc(sizeof(*fh),GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+ if (unlikely(!fh))
return -ENOMEM;
- }
file->private_data = fh;
+
+ /*
+ * btv is protected by btv->lock mutex, while btv->init and other
+ * streaming vars are protected by fh->cap.vb_lock. We need to take
+ * care of both locks to avoid troubles. However, vb_lock is used also
+ * inside videobuf, without calling buf->lock. So, it is a very bad
+ * idea to hold both locks at the same time.
+ * Let's first copy btv->init at fh, holding cap.vb_lock, and then work
+ * with the rest of init, holding btv->lock.
+ */
+ mutex_lock(&fh->cap.vb_lock);
*fh = btv->init;
+ mutex_unlock(&fh->cap.vb_lock);
+
fh->type = type;
fh->ov.setup_ok = 0;
+
+ mutex_lock(&btv->lock);
v4l2_prio_open(&btv->prio, &fh->prio);
videobuf_queue_sg_init(&fh->cap, &bttv_video_qops,
@@ -3243,13 +3318,13 @@ static int bttv_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct bttv_buffer),
- fh);
+ fh, NULL);
videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops,
&btv->c.pci->dev, &btv->s_lock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
sizeof(struct bttv_buffer),
- fh);
+ fh, NULL);
set_tvnorm(btv,btv->tvnorm);
set_input(btv, btv->input, btv->tvnorm);
@@ -3272,7 +3347,7 @@ static int bttv_open(struct file *file)
bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm);
bttv_field_count(btv);
- unlock_kernel();
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3281,6 +3356,7 @@ static int bttv_release(struct file *file)
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
+ mutex_lock(&btv->lock);
/* turn off overlay */
if (check_btres(fh, RESOURCE_OVERLAY))
bttv_switch_overlay(btv,fh,NULL);
@@ -3288,25 +3364,32 @@ static int bttv_release(struct file *file)
/* stop video capture */
if (check_btres(fh, RESOURCE_VIDEO_STREAM)) {
videobuf_streamoff(&fh->cap);
- free_btres(btv,fh,RESOURCE_VIDEO_STREAM);
+ free_btres_lock(btv,fh,RESOURCE_VIDEO_STREAM);
}
if (fh->cap.read_buf) {
buffer_release(&fh->cap,fh->cap.read_buf);
kfree(fh->cap.read_buf);
}
if (check_btres(fh, RESOURCE_VIDEO_READ)) {
- free_btres(btv, fh, RESOURCE_VIDEO_READ);
+ free_btres_lock(btv, fh, RESOURCE_VIDEO_READ);
}
/* stop vbi capture */
if (check_btres(fh, RESOURCE_VBI)) {
videobuf_stop(&fh->vbi);
- free_btres(btv,fh,RESOURCE_VBI);
+ free_btres_lock(btv,fh,RESOURCE_VBI);
}
/* free stuff */
+
+ /*
+ * videobuf uses cap.vb_lock - we should avoid holding btv->lock,
+ * otherwise we may have dead lock conditions
+ */
+ mutex_unlock(&btv->lock);
videobuf_mmap_free(&fh->cap);
videobuf_mmap_free(&fh->vbi);
+ mutex_lock(&btv->lock);
v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -3316,6 +3399,7 @@ static int bttv_release(struct file *file)
if (!btv->users)
audio_mute(btv, 1);
+ mutex_unlock(&btv->lock);
return 0;
}
@@ -3333,13 +3417,13 @@ bttv_mmap(struct file *file, struct vm_area_struct *vma)
static const struct v4l2_file_operations bttv_fops =
{
- .owner = THIS_MODULE,
- .open = bttv_open,
- .release = bttv_release,
- .ioctl = video_ioctl2,
- .read = bttv_read,
- .mmap = bttv_mmap,
- .poll = bttv_poll,
+ .owner = THIS_MODULE,
+ .open = bttv_open,
+ .release = bttv_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = bttv_read,
+ .mmap = bttv_mmap,
+ .poll = bttv_poll,
};
static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
@@ -3412,21 +3496,19 @@ static int radio_open(struct file *file)
dprintk("bttv: open dev=%s\n", video_device_node_name(vdev));
- lock_kernel();
-
dprintk("bttv%d: open called (radio)\n",btv->c.nr);
/* allocate per filehandle data */
fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (unlikely(!fh))
return -ENOMEM;
- }
file->private_data = fh;
+ mutex_lock(&fh->cap.vb_lock);
*fh = btv->init;
- v4l2_prio_open(&btv->prio, &fh->prio);
+ mutex_unlock(&fh->cap.vb_lock);
mutex_lock(&btv->lock);
+ v4l2_prio_open(&btv->prio, &fh->prio);
btv->radio_user++;
@@ -3434,7 +3516,6 @@ static int radio_open(struct file *file)
audio_input(btv,TVAUDIO_INPUT_RADIO);
mutex_unlock(&btv->lock);
- unlock_kernel();
return 0;
}
@@ -3444,6 +3525,7 @@ static int radio_release(struct file *file)
struct bttv *btv = fh->btv;
struct rds_command cmd;
+ mutex_lock(&btv->lock);
v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -3451,6 +3533,7 @@ static int radio_release(struct file *file)
btv->radio_user--;
bttv_call_all(btv, core, ioctl, RDS_CMD_CLOSE, &cmd);
+ mutex_unlock(&btv->lock);
return 0;
}
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 685d6597ee79..d49b675045fe 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -121,9 +121,8 @@ bttv_i2c_wait_done(struct bttv *btv)
/* timeout */
if (wait_event_interruptible_timeout(btv->i2c_queue,
- btv->i2c_done, msecs_to_jiffies(85)) == -ERESTARTSYS)
-
- rc = -EIO;
+ btv->i2c_done, msecs_to_jiffies(85)) == -ERESTARTSYS)
+ rc = -EIO;
if (btv->i2c_done & BT848_INT_RACK)
rc = 1;
@@ -390,41 +389,3 @@ int __devinit init_bttv_i2c(struct bttv *btv)
return btv->i2c_rc;
}
-
-/* Instantiate the I2C IR receiver device, if present */
-void __devinit init_bttv_i2c_ir(struct bttv *btv)
-{
- if (0 == btv->i2c_rc) {
- struct i2c_board_info info;
- /* The external IR receiver is at i2c address 0x34 (0x35 for
- reads). Future Hauppauge cards will have an internal
- receiver at 0x30 (0x31 for reads). In theory, both can be
- fitted, and Hauppauge suggest an external overrides an
- internal.
-
- That's why we probe 0x1a (~0x34) first. CB
- */
- const unsigned short addr_list[] = {
- 0x1a, 0x18, 0x4b, 0x64, 0x30, 0x71,
- I2C_CLIENT_END
- };
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL);
- }
-}
-
-int __devexit fini_bttv_i2c(struct bttv *btv)
-{
- if (0 != btv->i2c_rc)
- return 0;
-
- return i2c_del_adapter(&btv->c.i2c_adap);
-}
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index f68717a4bdec..6bf05a7dc5f9 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -245,6 +245,83 @@ static void bttv_ir_stop(struct bttv *btv)
}
}
+/*
+ * Get_key functions used by I2C remotes
+ */
+
+static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+{
+ unsigned char b;
+
+ /* poll IR chip */
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
+ dprintk(KERN_INFO DEVNAME ": read error\n");
+ return -EIO;
+ }
+
+ /* ignore 0xaa */
+ if (b==0xaa)
+ return 0;
+ dprintk(KERN_INFO DEVNAME ": key %02x\n", b);
+
+ *ir_key = b;
+ *ir_raw = b;
+ return 1;
+}
+
+/* Instantiate the I2C IR receiver device, if present */
+void __devinit init_bttv_i2c_ir(struct bttv *btv)
+{
+ const unsigned short addr_list[] = {
+ 0x1a, 0x18, 0x64, 0x30, 0x71,
+ I2C_CLIENT_END
+ };
+ struct i2c_board_info info;
+
+ if (0 != btv->i2c_rc)
+ return;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ memset(&btv->init_data, 0, sizeof(btv->init_data));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+
+ switch (btv->c.type) {
+ case BTTV_BOARD_PV951:
+ btv->init_data.name = "PV951";
+ btv->init_data.get_key = get_key_pv951;
+ btv->init_data.ir_codes = RC_MAP_PV951;
+ btv->init_data.type = IR_TYPE_OTHER;
+ info.addr = 0x4b;
+ break;
+ default:
+ /*
+ * The external IR receiver is at i2c address 0x34 (0x35 for
+ * reads). Future Hauppauge cards will have an internal
+ * receiver at 0x30 (0x31 for reads). In theory, both can be
+ * fitted, and Hauppauge suggest an external overrides an
+ * internal.
+ * That's why we probe 0x1a (~0x34) first. CB
+ */
+
+ i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL);
+ return;
+ }
+
+ if (btv->init_data.name)
+ info.platform_data = &btv->init_data;
+ i2c_new_device(&btv->c.i2c_adap, &info);
+
+ return;
+}
+
+int __devexit fini_bttv_i2c(struct bttv *btv)
+{
+ if (0 != btv->i2c_rc)
+ return 0;
+
+ return i2c_del_adapter(&btv->c.i2c_adap);
+}
+
int bttv_input_init(struct bttv *btv)
{
struct card_ir *ir;
@@ -420,10 +497,3 @@ void bttv_input_fini(struct bttv *btv)
kfree(btv->remote);
btv->remote = NULL;
}
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/bt8xx/bttv-risc.c b/drivers/media/video/bt8xx/bttv-risc.c
index 0fa9f39f37a3..9b57d091da48 100644
--- a/drivers/media/video/bt8xx/bttv-risc.c
+++ b/drivers/media/video/bt8xx/bttv-risc.c
@@ -582,7 +582,7 @@ bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(btv->c.pci,&buf->bottom);
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index 3ec2402c6b4a..6fd2a8ebda1e 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -18,7 +18,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/ir-common.h>
-#include <media/ir-kbd-i2c.h>
#include <media/i2c-addr.h>
#include <media/tuner.h>
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 6cccc2a17eee..d1e26a448ed2 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -42,7 +42,7 @@
#include <media/videobuf-dma-sg.h>
#include <media/tveeprom.h>
#include <media/ir-common.h>
-
+#include <media/ir-kbd-i2c.h>
#include "bt848.h"
#include "bttv.h"
@@ -271,6 +271,12 @@ int bttv_sub_del_devices(struct bttv_core *core);
extern int no_overlay;
/* ---------------------------------------------------------- */
+/* bttv-input.c */
+
+extern void init_bttv_i2c_ir(struct bttv *btv);
+extern int fini_bttv_i2c(struct bttv *btv);
+
+/* ---------------------------------------------------------- */
/* bttv-driver.c */
/* insmod options */
@@ -279,8 +285,6 @@ extern unsigned int bttv_debug;
extern unsigned int bttv_gpio;
extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
extern int init_bttv_i2c(struct bttv *btv);
-extern void init_bttv_i2c_ir(struct bttv *btv);
-extern int fini_bttv_i2c(struct bttv *btv);
#define bttv_printk if (bttv_verbose) printk
#define dprintk if (bttv_debug >= 1) printk
@@ -366,6 +370,9 @@ struct bttv {
int has_remote;
struct card_ir *remote;
+ /* I2C remote data */
+ struct IR_i2c_init_data init_data;
+
/* locking */
spinlock_t s_lock;
struct mutex lock;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 9536f1a40dd2..2934770dacc3 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/dmi.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/i2c.h>
@@ -46,6 +47,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
+#include "ov7670.h"
#include "cafe_ccic-regs.h"
#define CAFE_VERSION 0x000002
@@ -180,6 +182,7 @@ struct cafe_camera
/* Current operating parameters */
u32 sensor_type; /* Currently ov7670 only */
struct v4l2_pix_format pix_format;
+ enum v4l2_mbus_pixelcode mbus_code;
/* Locks */
struct mutex s_mutex; /* Access to this structure */
@@ -207,6 +210,49 @@ static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
return container_of(dev, struct cafe_camera, v4l2_dev);
}
+static struct cafe_format_struct {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ enum v4l2_mbus_pixelcode mbus_code;
+} cafe_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 444",
+ .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "Raw RGB Bayer",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .bpp = 1
+ },
+};
+#define N_CAFE_FMTS ARRAY_SIZE(cafe_formats)
+
+static struct cafe_format_struct *cafe_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_CAFE_FMTS; i++)
+ if (cafe_formats[i].pixelformat == pixelformat)
+ return cafe_formats + i;
+ /* Not found? Then return the first format. */
+ return cafe_formats;
+}
/*
* Start over with DMA buffers - dev_lock needed.
@@ -319,7 +365,6 @@ static int cafe_smbus_write_data(struct cafe_camera *cam,
{
unsigned int rval;
unsigned long flags;
- DEFINE_WAIT(the_wait);
spin_lock_irqsave(&cam->dev_lock, flags);
rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
@@ -334,28 +379,27 @@ static int cafe_smbus_write_data(struct cafe_camera *cam,
cafe_reg_write(cam, REG_TWSIC1, rval);
spin_unlock_irqrestore(&cam->dev_lock, flags);
+ /* Unfortunately, reading TWSIC1 too soon after sending a command
+ * causes the device to die.
+ * Use a busy-wait because we often send a large quantity of small
+ * commands at-once; using msleep() would cause a lot of context
+ * switches which take longer than 2ms, resulting in a noticable
+ * boot-time and capture-start delays.
+ */
+ mdelay(2);
+
/*
- * Time to wait for the write to complete. THIS IS A RACY
- * WAY TO DO IT, but the sad fact is that reading the TWSIC1
- * register too quickly after starting the operation sends
- * the device into a place that may be kinder and better, but
- * which is absolutely useless for controlling the sensor. In
- * practice we have plenty of time to get into our sleep state
- * before the interrupt hits, and the worst case is that we
- * time out and then see that things completed, so this seems
- * the best way for now.
+ * Another sad fact is that sometimes, commands silently complete but
+ * cafe_smbus_write_done() never becomes aware of this.
+ * This happens at random and appears to possible occur with any
+ * command.
+ * We don't understand why this is. We work around this issue
+ * with the timeout in the wait below, assuming that all commands
+ * complete within the timeout.
*/
- do {
- prepare_to_wait(&cam->smbus_wait, &the_wait,
- TASK_UNINTERRUPTIBLE);
- schedule_timeout(1); /* even 1 jiffy is too long */
- finish_wait(&cam->smbus_wait, &the_wait);
- } while (!cafe_smbus_write_done(cam));
-
-#ifdef IF_THE_CAFE_HARDWARE_WORKED_RIGHT
wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(cam),
CAFE_SMBUS_TIMEOUT);
-#endif
+
spin_lock_irqsave(&cam->dev_lock, flags);
rval = cafe_reg_read(cam, REG_TWSIC1);
spin_unlock_irqrestore(&cam->dev_lock, flags);
@@ -812,15 +856,15 @@ static int cafe_cam_set_flip(struct cafe_camera *cam)
static int cafe_cam_configure(struct cafe_camera *cam)
{
- struct v4l2_format fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
if (cam->state != S_IDLE)
return -EINVAL;
- fmt.fmt.pix = cam->pix_format;
+ v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
ret = sensor_call(cam, core, init, 0);
if (ret == 0)
- ret = sensor_call(cam, video, s_fmt, &fmt);
+ ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
/*
* OV7670 does weird things if flip is set *before* format...
*/
@@ -1481,7 +1525,7 @@ static int cafe_vidioc_querycap(struct file *file, void *priv,
/*
* The default format we use until somebody says otherwise.
*/
-static struct v4l2_pix_format cafe_def_pix_format = {
+static const struct v4l2_pix_format cafe_def_pix_format = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.pixelformat = V4L2_PIX_FMT_YUYV,
@@ -1490,28 +1534,38 @@ static struct v4l2_pix_format cafe_def_pix_format = {
.sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
};
+static const enum v4l2_mbus_pixelcode cafe_def_mbus_code =
+ V4L2_MBUS_FMT_YUYV8_2X8;
+
static int cafe_vidioc_enum_fmt_vid_cap(struct file *filp,
void *priv, struct v4l2_fmtdesc *fmt)
{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_fmt, fmt);
- mutex_unlock(&cam->s_mutex);
- return ret;
+ if (fmt->index >= N_CAFE_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, cafe_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = cafe_formats[fmt->index].pixelformat;
+ return 0;
}
-
static int cafe_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
struct cafe_camera *cam = priv;
+ struct cafe_format_struct *f;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
+ f = cafe_find_format(pix->pixelformat);
+ pix->pixelformat = f->pixelformat;
+ v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, try_fmt, fmt);
+ ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
mutex_unlock(&cam->s_mutex);
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ pix->bytesperline = pix->width * f->bpp;
+ pix->sizeimage = pix->height * pix->bytesperline;
return ret;
}
@@ -1519,6 +1573,7 @@ static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
struct cafe_camera *cam = priv;
+ struct cafe_format_struct *f;
int ret;
/*
@@ -1527,6 +1582,9 @@ static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
*/
if (cam->state != S_IDLE || cam->n_sbufs > 0)
return -EBUSY;
+
+ f = cafe_find_format(fmt->fmt.pix.pixelformat);
+
/*
* See if the formatting works in principle.
*/
@@ -1539,6 +1597,8 @@ static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
*/
mutex_lock(&cam->s_mutex);
cam->pix_format = fmt->fmt.pix;
+ cam->mbus_code = f->mbus_code;
+
/*
* Make sure we have appropriate DMA buffers.
*/
@@ -1652,6 +1712,30 @@ static int cafe_vidioc_g_chip_ident(struct file *file, void *priv,
return sensor_call(cam, core, g_chip_ident, chip);
}
+static int cafe_vidioc_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ struct cafe_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_framesizes, sizes);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int cafe_vidioc_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct cafe_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_frameintervals, interval);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int cafe_vidioc_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
@@ -1715,6 +1799,8 @@ static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
.vidioc_s_ctrl = cafe_vidioc_s_ctrl,
.vidioc_g_parm = cafe_vidioc_g_parm,
.vidioc_s_parm = cafe_vidioc_s_parm,
+ .vidioc_enum_framesizes = cafe_vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = cafe_vidioc_enum_frameintervals,
.vidioc_g_chip_ident = cafe_vidioc_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = cafe_vidioc_g_register,
@@ -1890,11 +1976,33 @@ static irqreturn_t cafe_irq(int irq, void *data)
* PCI interface stuff.
*/
+static const struct dmi_system_id olpc_xo1_dmi[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "OLPC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ },
+ { }
+};
+
static int cafe_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct cafe_camera *cam;
+ struct ov7670_config sensor_cfg = {
+ /* This controller only does SMBUS */
+ .use_smbus = true,
+
+ /*
+ * Exclude QCIF mode, because it only captures a tiny portion
+ * of the sensor FOV
+ */
+ .min_width = 320,
+ .min_height = 240,
+ };
/*
* Start putting together one of our big camera structures.
@@ -1915,6 +2023,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
init_waitqueue_head(&cam->iowait);
cam->pdev = pdev;
cam->pix_format = cafe_def_pix_format;
+ cam->mbus_code = cafe_def_mbus_code;
INIT_LIST_HEAD(&cam->dev_list);
INIT_LIST_HEAD(&cam->sb_avail);
INIT_LIST_HEAD(&cam->sb_full);
@@ -1951,13 +2060,18 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_freeirq;
+ /* Apply XO-1 clock speed */
+ if (dmi_check_system(olpc_xo1_dmi))
+ sensor_cfg.clock_speed = 45;
+
cam->sensor_addr = 0x42;
cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, &cam->i2c_adapter,
- "ov7670", "ov7670", cam->sensor_addr, NULL);
+ NULL, "ov7670", cam->sensor_addr, NULL);
if (cam->sensor == NULL) {
ret = -ENODEV;
goto out_smbus;
}
+
ret = cafe_cam_init(cam);
if (ret)
goto out_smbus;
diff --git a/drivers/media/video/cpia2/Kconfig b/drivers/media/video/cpia2/Kconfig
index e39a96152004..66e9283f5993 100644
--- a/drivers/media/video/cpia2/Kconfig
+++ b/drivers/media/video/cpia2/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_CPIA2
tristate "CPiA2 Video For Linux"
- depends on VIDEO_DEV && USB && VIDEO_V4L1
+ depends on VIDEO_DEV && USB && VIDEO_V4L2
---help---
This is the video4linux driver for cameras based on Vision's CPiA2
(Colour Processor Interface ASIC), such as the Digital Blue QX5
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 8d2dfc128821..916c13d5cf7d 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -32,7 +32,7 @@
#define __CPIA2_H__
#include <linux/version.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <linux/usb.h>
#include <linux/poll.h>
@@ -43,7 +43,7 @@
/* define for verbose debug output */
//#define _CPIA2_DEBUG_
-#define CPIA2_MAJ_VER 2
+#define CPIA2_MAJ_VER 3
#define CPIA2_MIN_VER 0
#define CPIA2_PATCH_VER 0
@@ -396,8 +396,8 @@ struct camera_data {
/* v4l */
int video_size; /* VIDEO_SIZE_ */
struct video_device *vdev; /* v4l videodev */
- struct video_picture vp; /* v4l camera settings */
- struct video_window vw; /* v4l capture area */
+ u32 width;
+ u32 height; /* Its size */
__u32 pixelformat; /* Format fourcc */
/* USB */
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 1cc0df8befff..9606bc01b803 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -1058,44 +1058,44 @@ static int set_vw_size(struct camera_data *cam, int size)
DBG("Setting size to VGA\n");
cam->params.roi.width = STV_IMAGE_VGA_COLS;
cam->params.roi.height = STV_IMAGE_VGA_ROWS;
- cam->vw.width = STV_IMAGE_VGA_COLS;
- cam->vw.height = STV_IMAGE_VGA_ROWS;
+ cam->width = STV_IMAGE_VGA_COLS;
+ cam->height = STV_IMAGE_VGA_ROWS;
break;
case VIDEOSIZE_CIF:
DBG("Setting size to CIF\n");
cam->params.roi.width = STV_IMAGE_CIF_COLS;
cam->params.roi.height = STV_IMAGE_CIF_ROWS;
- cam->vw.width = STV_IMAGE_CIF_COLS;
- cam->vw.height = STV_IMAGE_CIF_ROWS;
+ cam->width = STV_IMAGE_CIF_COLS;
+ cam->height = STV_IMAGE_CIF_ROWS;
break;
case VIDEOSIZE_QVGA:
DBG("Setting size to QVGA\n");
cam->params.roi.width = STV_IMAGE_QVGA_COLS;
cam->params.roi.height = STV_IMAGE_QVGA_ROWS;
- cam->vw.width = STV_IMAGE_QVGA_COLS;
- cam->vw.height = STV_IMAGE_QVGA_ROWS;
+ cam->width = STV_IMAGE_QVGA_COLS;
+ cam->height = STV_IMAGE_QVGA_ROWS;
break;
case VIDEOSIZE_288_216:
cam->params.roi.width = 288;
cam->params.roi.height = 216;
- cam->vw.width = 288;
- cam->vw.height = 216;
+ cam->width = 288;
+ cam->height = 216;
break;
case VIDEOSIZE_256_192:
- cam->vw.width = 256;
- cam->vw.height = 192;
+ cam->width = 256;
+ cam->height = 192;
cam->params.roi.width = 256;
cam->params.roi.height = 192;
break;
case VIDEOSIZE_224_168:
- cam->vw.width = 224;
- cam->vw.height = 168;
+ cam->width = 224;
+ cam->height = 168;
cam->params.roi.width = 224;
cam->params.roi.height = 168;
break;
case VIDEOSIZE_192_144:
- cam->vw.width = 192;
- cam->vw.height = 144;
+ cam->width = 192;
+ cam->height = 144;
cam->params.roi.width = 192;
cam->params.roi.height = 144;
break;
@@ -1103,8 +1103,8 @@ static int set_vw_size(struct camera_data *cam, int size)
DBG("Setting size to QCIF\n");
cam->params.roi.width = STV_IMAGE_QCIF_COLS;
cam->params.roi.height = STV_IMAGE_QCIF_ROWS;
- cam->vw.width = STV_IMAGE_QCIF_COLS;
- cam->vw.height = STV_IMAGE_QCIF_ROWS;
+ cam->width = STV_IMAGE_QCIF_COLS;
+ cam->height = STV_IMAGE_QCIF_ROWS;
break;
default:
retval = -EINVAL;
@@ -2224,23 +2224,8 @@ static void reset_camera_struct(struct camera_data *cam)
cam->params.roi.height = STV_IMAGE_CIF_ROWS;
}
- /***
- * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP
- * Ioctl. Here, just do the window and picture stucts.
- ***/
- cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */
- cam->vp.brightness = (u16) cam->params.color_params.brightness * 256;
- cam->vp.colour = (u16) cam->params.color_params.saturation * 256;
- cam->vp.contrast = (u16) cam->params.color_params.contrast * 256;
-
- cam->vw.x = 0;
- cam->vw.y = 0;
- cam->vw.width = cam->params.roi.width;
- cam->vw.height = cam->params.roi.height;
- cam->vw.flags = 0;
- cam->vw.clipcount = 0;
-
- return;
+ cam->width = cam->params.roi.width;
+ cam->height = cam->params.roi.height;
}
/******************************************************************************
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 5520789854da..46b433bbf2c1 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -37,7 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <linux/stringify.h>
#include <media/v4l2-ioctl.h>
@@ -391,113 +391,6 @@ static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *
}
-/******************************************************************************
- *
- * ioctl_cap_query
- *
- *****************************************************************************/
-static int ioctl_cap_query(void *arg, struct camera_data *cam)
-{
- struct video_capability *vc;
- int retval = 0;
- vc = arg;
-
- if (cam->params.pnp_id.product == 0x151)
- strcpy(vc->name, "QX5 Microscope");
- else
- strcpy(vc->name, "CPiA2 Camera");
-
- vc->type = VID_TYPE_CAPTURE | VID_TYPE_MJPEG_ENCODER;
- vc->channels = 1;
- vc->audios = 0;
- vc->minwidth = 176; /* VIDEOSIZE_QCIF */
- vc->minheight = 144;
- switch (cam->params.version.sensor_flags) {
- case CPIA2_VP_SENSOR_FLAGS_500:
- vc->maxwidth = STV_IMAGE_VGA_COLS;
- vc->maxheight = STV_IMAGE_VGA_ROWS;
- break;
- case CPIA2_VP_SENSOR_FLAGS_410:
- vc->maxwidth = STV_IMAGE_CIF_COLS;
- vc->maxheight = STV_IMAGE_CIF_ROWS;
- break;
- default:
- return -EINVAL;
- }
-
- return retval;
-}
-
-/******************************************************************************
- *
- * ioctl_get_channel
- *
- *****************************************************************************/
-static int ioctl_get_channel(void *arg)
-{
- int retval = 0;
- struct video_channel *v;
- v = arg;
-
- if (v->channel != 0)
- return -EINVAL;
-
- v->channel = 0;
- strcpy(v->name, "Camera");
- v->tuners = 0;
- v->flags = 0;
- v->type = VIDEO_TYPE_CAMERA;
- v->norm = 0;
-
- return retval;
-}
-
-/******************************************************************************
- *
- * ioctl_set_channel
- *
- *****************************************************************************/
-static int ioctl_set_channel(void *arg)
-{
- struct video_channel *v;
- int retval = 0;
- v = arg;
-
- if (retval == 0 && v->channel != 0)
- retval = -EINVAL;
-
- return retval;
-}
-
-/******************************************************************************
- *
- * ioctl_set_image_prop
- *
- *****************************************************************************/
-static int ioctl_set_image_prop(void *arg, struct camera_data *cam)
-{
- struct video_picture *vp;
- int retval = 0;
- vp = arg;
-
- /* brightness, color, contrast need no check 0-65535 */
- memcpy(&cam->vp, vp, sizeof(*vp));
-
- /* update cam->params.colorParams */
- cam->params.color_params.brightness = vp->brightness / 256;
- cam->params.color_params.saturation = vp->colour / 256;
- cam->params.color_params.contrast = vp->contrast / 256;
-
- DBG("Requested params: bright 0x%X, sat 0x%X, contrast 0x%X\n",
- cam->params.color_params.brightness,
- cam->params.color_params.saturation,
- cam->params.color_params.contrast);
-
- cpia2_set_color_params(cam);
-
- return retval;
-}
-
static int sync(struct camera_data *cam, int frame_nr)
{
struct framebuf *frame = &cam->buffers[frame_nr];
@@ -526,61 +419,10 @@ static int sync(struct camera_data *cam, int frame_nr)
/******************************************************************************
*
- * ioctl_set_window_size
- *
- *****************************************************************************/
-static int ioctl_set_window_size(void *arg, struct camera_data *cam,
- struct cpia2_fh *fh)
-{
- /* copy_from_user, check validity, copy to internal structure */
- struct video_window *vw;
- int frame, err;
- vw = arg;
-
- if (vw->clipcount != 0) /* clipping not supported */
- return -EINVAL;
-
- if (vw->clips != NULL) /* clipping not supported */
- return -EINVAL;
-
- /* Ensure that only this process can change the format. */
- err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
- if(err != 0)
- return err;
-
- cam->pixelformat = V4L2_PIX_FMT_JPEG;
-
- /* Be sure to supply the Huffman tables, this isn't MJPEG */
- cam->params.compression.inhibit_htables = 0;
-
- /* we set the video window to something smaller or equal to what
- * is requested by the user???
- */
- DBG("Requested width = %d, height = %d\n", vw->width, vw->height);
- if (vw->width != cam->vw.width || vw->height != cam->vw.height) {
- cam->vw.width = vw->width;
- cam->vw.height = vw->height;
- cam->params.roi.width = vw->width;
- cam->params.roi.height = vw->height;
- cpia2_set_format(cam);
- }
-
- for (frame = 0; frame < cam->num_frames; ++frame) {
- if (cam->buffers[frame].status == FRAME_READING)
- if ((err = sync(cam, frame)) < 0)
- return err;
-
- cam->buffers[frame].status = FRAME_EMPTY;
- }
-
- return 0;
-}
-
-/******************************************************************************
- *
* ioctl_get_mbuf
*
*****************************************************************************/
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int ioctl_get_mbuf(void *arg, struct camera_data *cam)
{
struct video_mbuf *vm;
@@ -595,66 +437,7 @@ static int ioctl_get_mbuf(void *arg, struct camera_data *cam)
return 0;
}
-
-/******************************************************************************
- *
- * ioctl_mcapture
- *
- *****************************************************************************/
-static int ioctl_mcapture(void *arg, struct camera_data *cam,
- struct cpia2_fh *fh)
-{
- struct video_mmap *vm;
- int video_size, err;
- vm = arg;
-
- if (vm->frame < 0 || vm->frame >= cam->num_frames)
- return -EINVAL;
-
- /* set video size */
- video_size = cpia2_match_video_size(vm->width, vm->height);
- if (cam->video_size < 0) {
- return -EINVAL;
- }
-
- /* Ensure that only this process can change the format. */
- err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
- if(err != 0)
- return err;
-
- if (video_size != cam->video_size) {
- cam->video_size = video_size;
- cam->params.roi.width = vm->width;
- cam->params.roi.height = vm->height;
- cpia2_set_format(cam);
- }
-
- if (cam->buffers[vm->frame].status == FRAME_READING)
- if ((err=sync(cam, vm->frame)) < 0)
- return err;
-
- cam->buffers[vm->frame].status = FRAME_EMPTY;
-
- return cpia2_usb_stream_start(cam,cam->params.camera_state.stream_mode);
-}
-
-/******************************************************************************
- *
- * ioctl_sync
- *
- *****************************************************************************/
-static int ioctl_sync(void *arg, struct camera_data *cam)
-{
- int frame;
-
- frame = *(int*)arg;
-
- if (frame < 0 || frame >= cam->num_frames)
- return -EINVAL;
-
- return sync(cam, frame);
-}
-
+#endif
/******************************************************************************
*
@@ -897,10 +680,10 @@ static int ioctl_set_fmt(void *arg,struct camera_data *cam, struct cpia2_fh *fh)
*/
DBG("Requested width = %d, height = %d\n",
f->fmt.pix.width, f->fmt.pix.height);
- if (f->fmt.pix.width != cam->vw.width ||
- f->fmt.pix.height != cam->vw.height) {
- cam->vw.width = f->fmt.pix.width;
- cam->vw.height = f->fmt.pix.height;
+ if (f->fmt.pix.width != cam->width ||
+ f->fmt.pix.height != cam->height) {
+ cam->width = f->fmt.pix.width;
+ cam->height = f->fmt.pix.height;
cam->params.roi.width = f->fmt.pix.width;
cam->params.roi.height = f->fmt.pix.height;
cpia2_set_format(cam);
@@ -932,8 +715,8 @@ static int ioctl_get_fmt(void *arg,struct camera_data *cam)
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- f->fmt.pix.width = cam->vw.width;
- f->fmt.pix.height = cam->vw.height;
+ f->fmt.pix.width = cam->width;
+ f->fmt.pix.height = cam->height;
f->fmt.pix.pixelformat = cam->pixelformat;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.bytesperline = 0;
@@ -962,12 +745,12 @@ static int ioctl_cropcap(void *arg,struct camera_data *cam)
c->bounds.left = 0;
c->bounds.top = 0;
- c->bounds.width = cam->vw.width;
- c->bounds.height = cam->vw.height;
+ c->bounds.width = cam->width;
+ c->bounds.height = cam->height;
c->defrect.left = 0;
c->defrect.top = 0;
- c->defrect.width = cam->vw.width;
- c->defrect.height = cam->vw.height;
+ c->defrect.width = cam->width;
+ c->defrect.height = cam->height;
c->pixelaspect.numerator = 1;
c->pixelaspect.denominator = 1;
@@ -1587,8 +1370,6 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Priority check */
switch (cmd) {
- case VIDIOCSWIN:
- case VIDIOCMCAPTURE:
case VIDIOC_S_FMT:
{
struct cpia2_fh *fh = file->private_data;
@@ -1599,8 +1380,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
break;
}
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
case VIDIOCGMBUF:
- case VIDIOCSYNC:
{
struct cpia2_fh *fh = file->private_data;
if(fh->prio != V4L2_PRIORITY_RECORD) {
@@ -1609,68 +1390,21 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
break;
}
+#endif
default:
break;
}
switch (cmd) {
- case VIDIOCGCAP: /* query capabilities */
- retval = ioctl_cap_query(arg, cam);
- break;
-
- case VIDIOCGCHAN: /* get video source - we are a camera, nothing else */
- retval = ioctl_get_channel(arg);
- break;
- case VIDIOCSCHAN: /* set video source - we are a camera, nothing else */
- retval = ioctl_set_channel(arg);
- break;
- case VIDIOCGPICT: /* image properties */
- memcpy(arg, &cam->vp, sizeof(struct video_picture));
- break;
- case VIDIOCSPICT:
- retval = ioctl_set_image_prop(arg, cam);
- break;
- case VIDIOCGWIN: /* get/set capture window */
- memcpy(arg, &cam->vw, sizeof(struct video_window));
- break;
- case VIDIOCSWIN:
- retval = ioctl_set_window_size(arg, cam, file->private_data);
- break;
- case VIDIOCGMBUF: /* mmap interface */
- retval = ioctl_get_mbuf(arg, cam);
- break;
- case VIDIOCMCAPTURE:
- retval = ioctl_mcapture(arg, cam, file->private_data);
- break;
- case VIDIOCSYNC:
- retval = ioctl_sync(arg, cam);
- break;
- /* pointless to implement overlay with this camera */
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCKEY:
- retval = -EINVAL;
- break;
-
- /* tuner interface - we have none */
- case VIDIOCGTUNER:
- case VIDIOCSTUNER:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- retval = -EINVAL;
- break;
-
- /* audio interface - we have none */
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- retval = -EINVAL;
- break;
-
/* CPIA2 extension to Video4Linux API */
case CPIA2_IOC_SET_GPIO:
retval = ioctl_set_gpio(arg, cam);
break;
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ case VIDIOCGMBUF: /* mmap interface */
+ retval = ioctl_get_mbuf(arg, cam);
+ break;
+#endif
case VIDIOC_QUERYCAP:
retval = ioctl_querycap(arg,cam);
break;
@@ -1874,21 +1608,8 @@ static int cpia2_mmap(struct file *file, struct vm_area_struct *area)
*****************************************************************************/
static void reset_camera_struct_v4l(struct camera_data *cam)
{
- /***
- * Fill in the v4l structures. video_cap is filled in inside the VIDIOCCAP
- * Ioctl. Here, just do the window and picture stucts.
- ***/
- cam->vp.palette = (u16) VIDEO_PALETTE_RGB24; /* Is this right? */
- cam->vp.brightness = (u16) cam->params.color_params.brightness * 256;
- cam->vp.colour = (u16) cam->params.color_params.saturation * 256;
- cam->vp.contrast = (u16) cam->params.color_params.contrast * 256;
-
- cam->vw.x = 0;
- cam->vw.y = 0;
- cam->vw.width = cam->params.roi.width;
- cam->vw.height = cam->params.roi.height;
- cam->vw.flags = 0;
- cam->vw.clipcount = 0;
+ cam->width = cam->params.roi.width;
+ cam->height = cam->params.roi.height;
cam->frame_size = buffer_size;
cam->num_frames = num_buffers;
@@ -1902,13 +1623,12 @@ static void reset_camera_struct_v4l(struct camera_data *cam)
cam->pixelformat = V4L2_PIX_FMT_JPEG;
v4l2_prio_init(&cam->prio);
- return;
}
/***
* The v4l video device structure initialized for this device
***/
-static const struct v4l2_file_operations fops_template = {
+static const struct v4l2_file_operations cpia2_fops = {
.owner = THIS_MODULE,
.open = cpia2_open,
.release = cpia2_close,
@@ -1920,9 +1640,9 @@ static const struct v4l2_file_operations fops_template = {
static struct video_device cpia2_template = {
/* I could not find any place for the old .initialize initializer?? */
- .name= "CPiA2 Camera",
- .fops= &fops_template,
- .release= video_device_release,
+ .name = "CPiA2 Camera",
+ .fops = &cpia2_fops,
+ .release = video_device_release,
};
/******************************************************************************
diff --git a/drivers/media/video/cpia2/cpia2dev.h b/drivers/media/video/cpia2/cpia2dev.h
index d58097ce0d5e..f66691fe5a35 100644
--- a/drivers/media/video/cpia2/cpia2dev.h
+++ b/drivers/media/video/cpia2/cpia2dev.h
@@ -29,14 +29,14 @@
#ifndef CPIA2_DEV_HEADER
#define CPIA2_DEV_HEADER
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
/***
* The following defines are ioctl numbers based on video4linux private ioctls,
* which can range from 192 (BASE_VIDIOCPRIVATE) to 255. All of these take int
* args
*/
-#define CPIA2_IOC_SET_GPIO _IOW('v', BASE_VIDIOCPRIVATE + 17, __u32)
+#define CPIA2_IOC_SET_GPIO _IOW('v', BASE_VIDIOC_PRIVATE + 17, __u32)
/* V4L2 driver specific controls */
#define CPIA2_CID_TARGET_KB (V4L2_CID_PRIVATE_BASE+0)
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index 8362db509e2c..9358fe77e562 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("i2c device driver for cs5345 Audio ADC");
MODULE_AUTHOR("Hans Verkuil");
@@ -209,9 +208,25 @@ static const struct i2c_device_id cs5345_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cs5345_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "cs5345",
- .probe = cs5345_probe,
- .remove = cs5345_remove,
- .id_table = cs5345_id,
+static struct i2c_driver cs5345_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cs5345",
+ },
+ .probe = cs5345_probe,
+ .remove = cs5345_remove,
+ .id_table = cs5345_id,
};
+
+static __init int init_cs5345(void)
+{
+ return i2c_add_driver(&cs5345_driver);
+}
+
+static __exit void exit_cs5345(void)
+{
+ i2c_del_driver(&cs5345_driver);
+}
+
+module_init(init_cs5345);
+module_exit(exit_cs5345);
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index cc9e84d75ea7..d93e5ab45fd3 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -30,7 +30,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC");
MODULE_AUTHOR("Martin Vaughan");
@@ -239,9 +238,25 @@ static const struct i2c_device_id cs53l32a_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cs53l32a_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "cs53l32a",
- .remove = cs53l32a_remove,
- .probe = cs53l32a_probe,
- .id_table = cs53l32a_id,
+static struct i2c_driver cs53l32a_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cs53l32a",
+ },
+ .probe = cs53l32a_probe,
+ .remove = cs53l32a_remove,
+ .id_table = cs53l32a_id,
};
+
+static __init int init_cs53l32a(void)
+{
+ return i2c_add_driver(&cs53l32a_driver);
+}
+
+static __exit void exit_cs53l32a(void)
+{
+ i2c_del_driver(&cs53l32a_driver);
+}
+
+module_init(init_cs53l32a);
+module_exit(exit_cs53l32a);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 9bc51a99376b..77be58c1096b 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -674,18 +674,25 @@ static inline int cx18_raw_vbi(const struct cx18 *cx)
/* Call the specified callback for all subdevs with a grp_id bit matching the
* mask in hw (if 0, then match them all). Ignore any errors. */
-#define cx18_call_hw(cx, hw, o, f, args...) \
- __v4l2_device_call_subdevs(&(cx)->v4l2_dev, \
- !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+#define cx18_call_hw(cx, hw, o, f, args...) \
+ do { \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_p(&(cx)->v4l2_dev, __sd, \
+ !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
+ } while (0)
#define cx18_call_all(cx, o, f, args...) cx18_call_hw(cx, 0, o, f , ##args)
/* Call the specified callback for all subdevs with a grp_id bit matching the
* mask in hw (if 0, then match them all). If the callback returns an error
* other than 0 or -ENOIOCTLCMD, then return with that error code. */
-#define cx18_call_hw_err(cx, hw, o, f, args...) \
- __v4l2_device_call_subdevs_until_err( \
- &(cx)->v4l2_dev, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+#define cx18_call_hw_err(cx, hw, o, f, args...) \
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(&(cx)->v4l2_dev, \
+ __sd, !(hw) || (__sd->grp_id & (hw)), o, f, \
+ ##args); \
+})
#define cx18_call_all_err(cx, o, f, args...) \
cx18_call_hw_err(cx, 0, o, f , ##args)
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index 73ce90c2f577..a09caf883170 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -71,19 +71,6 @@ static const u8 hw_bus[] = {
};
/* This array should match the CX18_HW_ defines */
-static const char * const hw_modules[] = {
- "tuner", /* CX18_HW_TUNER */
- NULL, /* CX18_HW_TVEEPROM */
- "cs5345", /* CX18_HW_CS5345 */
- NULL, /* CX18_HW_DVB */
- NULL, /* CX18_HW_418_AV */
- NULL, /* CX18_HW_GPIO_MUX */
- NULL, /* CX18_HW_GPIO_RESET_CTRL */
- NULL, /* CX18_HW_Z8F0811_IR_TX_HAUP */
- NULL, /* CX18_HW_Z8F0811_IR_RX_HAUP */
-};
-
-/* This array should match the CX18_HW_ defines */
static const char * const hw_devicenames[] = {
"tuner",
"tveeprom",
@@ -126,7 +113,6 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
struct v4l2_subdev *sd;
int bus = hw_bus[idx];
struct i2c_adapter *adap = &cx->i2c_adap[bus];
- const char *mod = hw_modules[idx];
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
@@ -136,15 +122,15 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
if (hw == CX18_HW_TUNER) {
/* special tuner group handling */
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
- adap, mod, type, 0, cx->card_i2c->radio);
+ adap, NULL, type, 0, cx->card_i2c->radio);
if (sd != NULL)
sd->grp_id = hw;
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
- adap, mod, type, 0, cx->card_i2c->demod);
+ adap, NULL, type, 0, cx->card_i2c->demod);
if (sd != NULL)
sd->grp_id = hw;
sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
- adap, mod, type, 0, cx->card_i2c->tv);
+ adap, NULL, type, 0, cx->card_i2c->tv);
if (sd != NULL)
sd->grp_id = hw;
return sd != NULL ? 0 : -1;
@@ -158,7 +144,8 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
return -1;
/* It's an I2C device other than an analog tuner or IR chip */
- sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, mod, type, hw_addrs[idx], NULL);
+ sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, NULL, type, hw_addrs[idx],
+ NULL);
if (sd != NULL)
sd->grp_id = hw;
return sd != NULL ? 0 : -1;
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index d6792405f8d3..7150195740dc 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -40,7 +40,6 @@
#include "cx18-av-core.h"
#include <media/tveeprom.h>
#include <media/v4l2-chip-ident.h>
-#include <linux/i2c-id.h>
u16 cx18_service2vbi(int type)
{
diff --git a/drivers/media/video/cx231xx/Kconfig b/drivers/media/video/cx231xx/Kconfig
index 5ac7eceececa..bb04914983fd 100644
--- a/drivers/media/video/cx231xx/Kconfig
+++ b/drivers/media/video/cx231xx/Kconfig
@@ -6,6 +6,7 @@ config VIDEO_CX231XX
depends on VIDEO_IR
select VIDEOBUF_VMALLOC
select VIDEO_CX25840
+ select VIDEO_CX2341X
---help---
This is a video4linux driver for Conexant 231xx USB based TV cards.
diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile
index 6f2b57384488..a6bc4cc54677 100644
--- a/drivers/media/video/cx231xx/Makefile
+++ b/drivers/media/video/cx231xx/Makefile
@@ -1,5 +1,5 @@
cx231xx-objs := cx231xx-video.o cx231xx-i2c.o cx231xx-cards.o cx231xx-core.o \
- cx231xx-avcore.o cx231xx-pcb-cfg.o cx231xx-vbi.o
+ cx231xx-avcore.o cx231xx-417.o cx231xx-pcb-cfg.o cx231xx-vbi.o
cx231xx-alsa-objs := cx231xx-audio.o
diff --git a/drivers/media/video/cx231xx/cx231xx-417.c b/drivers/media/video/cx231xx/cx231xx-417.c
new file mode 100644
index 000000000000..aab21f3ce472
--- /dev/null
+++ b/drivers/media/video/cx231xx/cx231xx-417.c
@@ -0,0 +1,2194 @@
+/*
+ *
+ * Support for a cx23417 mpeg encoder via cx231xx host port.
+ *
+ * (c) 2004 Jelle Foks <jelle@foks.us>
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org>
+ * (c) 2008 Steven Toth <stoth@linuxtv.org>
+ * - CX23885/7/8 support
+ *
+ * Includes parts from the ivtv driver( http://ivtv.sourceforge.net/),
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cx2341x.h>
+#include <linux/usb.h>
+
+#include "cx231xx.h"
+/*#include "cx23885-ioctl.h"*/
+
+#define CX231xx_FIRM_IMAGE_SIZE 376836
+#define CX231xx_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
+
+/* for polaris ITVC */
+#define ITVC_WRITE_DIR 0x03FDFC00
+#define ITVC_READ_DIR 0x0001FC00
+
+#define MCI_MEMORY_DATA_BYTE0 0x00
+#define MCI_MEMORY_DATA_BYTE1 0x08
+#define MCI_MEMORY_DATA_BYTE2 0x10
+#define MCI_MEMORY_DATA_BYTE3 0x18
+
+#define MCI_MEMORY_ADDRESS_BYTE2 0x20
+#define MCI_MEMORY_ADDRESS_BYTE1 0x28
+#define MCI_MEMORY_ADDRESS_BYTE0 0x30
+
+#define MCI_REGISTER_DATA_BYTE0 0x40
+#define MCI_REGISTER_DATA_BYTE1 0x48
+#define MCI_REGISTER_DATA_BYTE2 0x50
+#define MCI_REGISTER_DATA_BYTE3 0x58
+
+#define MCI_REGISTER_ADDRESS_BYTE0 0x60
+#define MCI_REGISTER_ADDRESS_BYTE1 0x68
+
+#define MCI_REGISTER_MODE 0x70
+
+/* Read and write modes for polaris ITVC */
+#define MCI_MODE_REGISTER_READ 0x000
+#define MCI_MODE_REGISTER_WRITE 0x100
+#define MCI_MODE_MEMORY_READ 0x000
+#define MCI_MODE_MEMORY_WRITE 0x4000
+
+static unsigned int mpegbufs = 8;
+module_param(mpegbufs, int, 0644);
+MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32");
+static unsigned int mpeglines = 128;
+module_param(mpeglines, int, 0644);
+MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32");
+static unsigned int mpeglinesize = 512;
+module_param(mpeglinesize, int, 0644);
+MODULE_PARM_DESC(mpeglinesize,
+ "number of bytes in each line of an MPEG buffer, range 512-1024");
+
+static unsigned int v4l_debug = 1;
+module_param(v4l_debug, int, 0644);
+MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
+struct cx231xx_dmaqueue *dma_qq;
+#define dprintk(level, fmt, arg...)\
+ do { if (v4l_debug >= level) \
+ printk(KERN_INFO "%s: " fmt, \
+ (dev) ? dev->name : "cx231xx[?]", ## arg); \
+ } while (0)
+
+static struct cx231xx_tvnorm cx231xx_tvnorms[] = {
+ {
+ .name = "NTSC-M",
+ .id = V4L2_STD_NTSC_M,
+ }, {
+ .name = "NTSC-JP",
+ .id = V4L2_STD_NTSC_M_JP,
+ }, {
+ .name = "PAL-BG",
+ .id = V4L2_STD_PAL_BG,
+ }, {
+ .name = "PAL-DK",
+ .id = V4L2_STD_PAL_DK,
+ }, {
+ .name = "PAL-I",
+ .id = V4L2_STD_PAL_I,
+ }, {
+ .name = "PAL-M",
+ .id = V4L2_STD_PAL_M,
+ }, {
+ .name = "PAL-N",
+ .id = V4L2_STD_PAL_N,
+ }, {
+ .name = "PAL-Nc",
+ .id = V4L2_STD_PAL_Nc,
+ }, {
+ .name = "PAL-60",
+ .id = V4L2_STD_PAL_60,
+ }, {
+ .name = "SECAM-L",
+ .id = V4L2_STD_SECAM_L,
+ }, {
+ .name = "SECAM-DK",
+ .id = V4L2_STD_SECAM_DK,
+ }
+};
+
+/* ------------------------------------------------------------------ */
+enum cx231xx_capture_type {
+ CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_CAPTURE,
+ CX231xx_RAW_PASSTHRU_CAPTURE
+};
+enum cx231xx_capture_bits {
+ CX231xx_RAW_BITS_NONE = 0x00,
+ CX231xx_RAW_BITS_YUV_CAPTURE = 0x01,
+ CX231xx_RAW_BITS_PCM_CAPTURE = 0x02,
+ CX231xx_RAW_BITS_VBI_CAPTURE = 0x04,
+ CX231xx_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
+ CX231xx_RAW_BITS_TO_HOST_CAPTURE = 0x10
+};
+enum cx231xx_capture_end {
+ CX231xx_END_AT_GOP, /* stop at the end of gop, generate irq */
+ CX231xx_END_NOW, /* stop immediately, no irq */
+};
+enum cx231xx_framerate {
+ CX231xx_FRAMERATE_NTSC_30, /* NTSC: 30fps */
+ CX231xx_FRAMERATE_PAL_25 /* PAL: 25fps */
+};
+enum cx231xx_stream_port {
+ CX231xx_OUTPUT_PORT_MEMORY,
+ CX231xx_OUTPUT_PORT_STREAMING,
+ CX231xx_OUTPUT_PORT_SERIAL
+};
+enum cx231xx_data_xfer_status {
+ CX231xx_MORE_BUFFERS_FOLLOW,
+ CX231xx_LAST_BUFFER,
+};
+enum cx231xx_picture_mask {
+ CX231xx_PICTURE_MASK_NONE,
+ CX231xx_PICTURE_MASK_I_FRAMES,
+ CX231xx_PICTURE_MASK_I_P_FRAMES = 0x3,
+ CX231xx_PICTURE_MASK_ALL_FRAMES = 0x7,
+};
+enum cx231xx_vbi_mode_bits {
+ CX231xx_VBI_BITS_SLICED,
+ CX231xx_VBI_BITS_RAW,
+};
+enum cx231xx_vbi_insertion_bits {
+ CX231xx_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
+ CX231xx_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
+ CX231xx_VBI_BITS_SEPARATE_STREAM = 0x2 << 1,
+ CX231xx_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
+ CX231xx_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
+};
+enum cx231xx_dma_unit {
+ CX231xx_DMA_BYTES,
+ CX231xx_DMA_FRAMES,
+};
+enum cx231xx_dma_transfer_status_bits {
+ CX231xx_DMA_TRANSFER_BITS_DONE = 0x01,
+ CX231xx_DMA_TRANSFER_BITS_ERROR = 0x04,
+ CX231xx_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
+};
+enum cx231xx_pause {
+ CX231xx_PAUSE_ENCODING,
+ CX231xx_RESUME_ENCODING,
+};
+enum cx231xx_copyright {
+ CX231xx_COPYRIGHT_OFF,
+ CX231xx_COPYRIGHT_ON,
+};
+enum cx231xx_notification_type {
+ CX231xx_NOTIFICATION_REFRESH,
+};
+enum cx231xx_notification_status {
+ CX231xx_NOTIFICATION_OFF,
+ CX231xx_NOTIFICATION_ON,
+};
+enum cx231xx_notification_mailbox {
+ CX231xx_NOTIFICATION_NO_MAILBOX = -1,
+};
+enum cx231xx_field1_lines {
+ CX231xx_FIELD1_SAA7114 = 0x00EF, /* 239 */
+ CX231xx_FIELD1_SAA7115 = 0x00F0, /* 240 */
+ CX231xx_FIELD1_MICRONAS = 0x0105, /* 261 */
+};
+enum cx231xx_field2_lines {
+ CX231xx_FIELD2_SAA7114 = 0x00EF, /* 239 */
+ CX231xx_FIELD2_SAA7115 = 0x00F0, /* 240 */
+ CX231xx_FIELD2_MICRONAS = 0x0106, /* 262 */
+};
+enum cx231xx_custom_data_type {
+ CX231xx_CUSTOM_EXTENSION_USR_DATA,
+ CX231xx_CUSTOM_PRIVATE_PACKET,
+};
+enum cx231xx_mute {
+ CX231xx_UNMUTE,
+ CX231xx_MUTE,
+};
+enum cx231xx_mute_video_mask {
+ CX231xx_MUTE_VIDEO_V_MASK = 0x0000FF00,
+ CX231xx_MUTE_VIDEO_U_MASK = 0x00FF0000,
+ CX231xx_MUTE_VIDEO_Y_MASK = 0xFF000000,
+};
+enum cx231xx_mute_video_shift {
+ CX231xx_MUTE_VIDEO_V_SHIFT = 8,
+ CX231xx_MUTE_VIDEO_U_SHIFT = 16,
+ CX231xx_MUTE_VIDEO_Y_SHIFT = 24,
+};
+
+/* defines below are from ivtv-driver.h */
+#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF
+
+/* Firmware API commands */
+#define IVTV_API_STD_TIMEOUT 500
+
+/* Registers */
+/* IVTV_REG_OFFSET */
+#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8)
+#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC)
+#define IVTV_REG_SPU (0x9050)
+#define IVTV_REG_HW_BLOCKS (0x9054)
+#define IVTV_REG_VPU (0x9058)
+#define IVTV_REG_APU (0xA064)
+
+/*
+ * Bit definitions for MC417_RWD and MC417_OEN registers
+ *
+ * bits 31-16
+ *+-----------+
+ *| Reserved |
+ *|+-----------+
+ *| bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ *|| MIWR# | MIRD# | MICS# |MIRDY# |MIADDR3|MIADDR2|MIADDR1|MIADDR0|
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ *| bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ *||MIDATA7|MIDATA6|MIDATA5|MIDATA4|MIDATA3|MIDATA2|MIDATA1|MIDATA0|
+ *|+-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define MC417_MIWR 0x8000
+#define MC417_MIRD 0x4000
+#define MC417_MICS 0x2000
+#define MC417_MIRDY 0x1000
+#define MC417_MIADDR 0x0F00
+#define MC417_MIDATA 0x00FF
+
+
+/* Bit definitions for MC417_CTL register ****
+ *bits 31-6 bits 5-4 bit 3 bits 2-1 Bit 0
+ *+--------+-------------+--------+--------------+------------+
+ *|Reserved|MC417_SPD_CTL|Reserved|MC417_GPIO_SEL|UART_GPIO_EN|
+ *+--------+-------------+--------+--------------+------------+
+ */
+#define MC417_SPD_CTL(x) (((x) << 4) & 0x00000030)
+#define MC417_GPIO_SEL(x) (((x) << 1) & 0x00000006)
+#define MC417_UART_GPIO_EN 0x00000001
+
+/* Values for speed control */
+#define MC417_SPD_CTL_SLOW 0x1
+#define MC417_SPD_CTL_MEDIUM 0x0
+#define MC417_SPD_CTL_FAST 0x3 /* b'1x, but we use b'11 */
+
+/* Values for GPIO select */
+#define MC417_GPIO_SEL_GPIO3 0x3
+#define MC417_GPIO_SEL_GPIO2 0x2
+#define MC417_GPIO_SEL_GPIO1 0x1
+#define MC417_GPIO_SEL_GPIO0 0x0
+
+
+#define CX23417_GPIO_MASK 0xFC0003FF
+static int setITVCReg(struct cx231xx *dev, u32 gpio_direction, u32 value)
+{
+ int status = 0;
+ u32 _gpio_direction = 0;
+
+ _gpio_direction = _gpio_direction & CX23417_GPIO_MASK;
+ _gpio_direction = _gpio_direction|gpio_direction;
+ status = cx231xx_send_gpio_cmd(dev, _gpio_direction,
+ (u8 *)&value, 4, 0, 0);
+ return status;
+}
+static int getITVCReg(struct cx231xx *dev, u32 gpio_direction, u32 *pValue)
+{
+ int status = 0;
+ u32 _gpio_direction = 0;
+
+ _gpio_direction = _gpio_direction & CX23417_GPIO_MASK;
+ _gpio_direction = _gpio_direction|gpio_direction;
+
+ status = cx231xx_send_gpio_cmd(dev, _gpio_direction,
+ (u8 *)pValue, 4, 0, 1);
+ return status;
+}
+
+static int waitForMciComplete(struct cx231xx *dev)
+{
+ u32 gpio;
+ u32 gpio_driection = 0;
+ u8 count = 0;
+ getITVCReg(dev, gpio_driection, &gpio);
+
+ while (!(gpio&0x020000)) {
+ msleep(10);
+
+ getITVCReg(dev, gpio_driection, &gpio);
+
+ if (count++ > 100) {
+ dprintk(3, "ERROR: Timeout - gpio=%x\n", gpio);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int mc417_register_write(struct cx231xx *dev, u16 address, u32 value)
+{
+ u32 temp;
+ int status = 0;
+
+ temp = 0x82|MCI_REGISTER_DATA_BYTE0|((value&0x000000FF)<<8);
+ temp = temp<<10;
+ status = setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ if (status < 0)
+ return status;
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 1;*/
+ temp = 0x82|MCI_REGISTER_DATA_BYTE1|(value&0x0000FF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 2;*/
+ temp = 0x82|MCI_REGISTER_DATA_BYTE2|((value&0x00FF0000)>>8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 3;*/
+ temp = 0x82|MCI_REGISTER_DATA_BYTE3|((value&0xFF000000)>>16);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 0;*/
+ temp = 0x82|MCI_REGISTER_ADDRESS_BYTE0|((address&0x000000FF)<<8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 1;*/
+ temp = 0x82|MCI_REGISTER_ADDRESS_BYTE1|(address&0x0000FF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*Write that the mode is write.*/
+ temp = 0x82 | MCI_REGISTER_MODE | MCI_MODE_REGISTER_WRITE;
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ return waitForMciComplete(dev);
+}
+
+static int mc417_register_read(struct cx231xx *dev, u16 address, u32 *value)
+{
+ /*write address byte 0;*/
+ u32 temp;
+ u32 return_value = 0;
+ int ret = 0;
+
+ temp = 0x82 | MCI_REGISTER_ADDRESS_BYTE0 | ((address & 0x00FF) << 8);
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 1;*/
+ temp = 0x82 | MCI_REGISTER_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write that the mode is read;*/
+ temp = 0x82 | MCI_REGISTER_MODE | MCI_MODE_REGISTER_READ;
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*wait for the MIRDY line to be asserted ,
+ signalling that the read is done;*/
+ ret = waitForMciComplete(dev);
+
+ /*switch the DATA- GPIO to input mode;*/
+
+ /*Read data byte 0;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE0) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE0) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp & 0x03FC0000) >> 18);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ /* Read data byte 1;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE1) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE1) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+
+ return_value |= ((temp & 0x03FC0000) >> 10);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ /*Read data byte 2;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE2) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE2) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp & 0x03FC0000) >> 2);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ /*Read data byte 3;*/
+ temp = (0x82 | MCI_REGISTER_DATA_BYTE3) << 10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81 | MCI_REGISTER_DATA_BYTE3) << 10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp & 0x03FC0000) << 6);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87 << 10));
+
+ *value = return_value;
+
+
+ return ret;
+}
+
+static int mc417_memory_write(struct cx231xx *dev, u32 address, u32 value)
+{
+ /*write data byte 0;*/
+
+ u32 temp;
+ int ret = 0;
+
+ temp = 0x82 | MCI_MEMORY_DATA_BYTE0|((value & 0x000000FF) << 8);
+ temp = temp << 10;
+ ret = setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ if (ret < 0)
+ return ret;
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 1;*/
+ temp = 0x82 | MCI_MEMORY_DATA_BYTE1 | (value & 0x0000FF00);
+ temp = temp << 10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp | ((0x05) << 10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 2;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE2|((value&0x00FF0000)>>8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write data byte 3;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE3|((value&0xFF000000)>>16);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /* write address byte 2;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_WRITE |
+ ((address & 0x003F0000)>>8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /* write address byte 1;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /* write address byte 0;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE0|((address & 0x00FF)<<8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*wait for MIRDY line;*/
+ waitForMciComplete(dev);
+
+ return 0;
+}
+
+static int mc417_memory_read(struct cx231xx *dev, u32 address, u32 *value)
+{
+ u32 temp = 0;
+ u32 return_value = 0;
+ int ret = 0;
+
+ /*write address byte 2;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_READ |
+ ((address & 0x003F0000)>>8);
+ temp = temp<<10;
+ ret = setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ if (ret < 0)
+ return ret;
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 1*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*write address byte 0*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE0 | ((address & 0x00FF)<<8);
+ temp = temp<<10;
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+ temp = temp|((0x05)<<10);
+ setITVCReg(dev, ITVC_WRITE_DIR, temp);
+
+ /*Wait for MIRDY line*/
+ ret = waitForMciComplete(dev);
+
+
+ /*Read data byte 3;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE3)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE3)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)<<6);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ /*Read data byte 2;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE2)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE2)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)>>2);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ /* Read data byte 1;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE1)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE1)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)>>10);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ /*Read data byte 0;*/
+ temp = (0x82|MCI_MEMORY_DATA_BYTE0)<<10;
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ temp = ((0x81|MCI_MEMORY_DATA_BYTE0)<<10);
+ setITVCReg(dev, ITVC_READ_DIR, temp);
+ getITVCReg(dev, ITVC_READ_DIR, &temp);
+ return_value |= ((temp&0x03FC0000)>>18);
+ setITVCReg(dev, ITVC_READ_DIR, (0x87<<10));
+
+ *value = return_value;
+ return ret;
+}
+
+/* ------------------------------------------------------------------ */
+
+/* MPEG encoder API */
+static char *cmd_to_str(int cmd)
+{
+ switch (cmd) {
+ case CX2341X_ENC_PING_FW:
+ return "PING_FW";
+ case CX2341X_ENC_START_CAPTURE:
+ return "START_CAPTURE";
+ case CX2341X_ENC_STOP_CAPTURE:
+ return "STOP_CAPTURE";
+ case CX2341X_ENC_SET_AUDIO_ID:
+ return "SET_AUDIO_ID";
+ case CX2341X_ENC_SET_VIDEO_ID:
+ return "SET_VIDEO_ID";
+ case CX2341X_ENC_SET_PCR_ID:
+ return "SET_PCR_PID";
+ case CX2341X_ENC_SET_FRAME_RATE:
+ return "SET_FRAME_RATE";
+ case CX2341X_ENC_SET_FRAME_SIZE:
+ return "SET_FRAME_SIZE";
+ case CX2341X_ENC_SET_BIT_RATE:
+ return "SET_BIT_RATE";
+ case CX2341X_ENC_SET_GOP_PROPERTIES:
+ return "SET_GOP_PROPERTIES";
+ case CX2341X_ENC_SET_ASPECT_RATIO:
+ return "SET_ASPECT_RATIO";
+ case CX2341X_ENC_SET_DNR_FILTER_MODE:
+ return "SET_DNR_FILTER_PROPS";
+ case CX2341X_ENC_SET_DNR_FILTER_PROPS:
+ return "SET_DNR_FILTER_PROPS";
+ case CX2341X_ENC_SET_CORING_LEVELS:
+ return "SET_CORING_LEVELS";
+ case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
+ return "SET_SPATIAL_FILTER_TYPE";
+ case CX2341X_ENC_SET_VBI_LINE:
+ return "SET_VBI_LINE";
+ case CX2341X_ENC_SET_STREAM_TYPE:
+ return "SET_STREAM_TYPE";
+ case CX2341X_ENC_SET_OUTPUT_PORT:
+ return "SET_OUTPUT_PORT";
+ case CX2341X_ENC_SET_AUDIO_PROPERTIES:
+ return "SET_AUDIO_PROPERTIES";
+ case CX2341X_ENC_HALT_FW:
+ return "HALT_FW";
+ case CX2341X_ENC_GET_VERSION:
+ return "GET_VERSION";
+ case CX2341X_ENC_SET_GOP_CLOSURE:
+ return "SET_GOP_CLOSURE";
+ case CX2341X_ENC_GET_SEQ_END:
+ return "GET_SEQ_END";
+ case CX2341X_ENC_SET_PGM_INDEX_INFO:
+ return "SET_PGM_INDEX_INFO";
+ case CX2341X_ENC_SET_VBI_CONFIG:
+ return "SET_VBI_CONFIG";
+ case CX2341X_ENC_SET_DMA_BLOCK_SIZE:
+ return "SET_DMA_BLOCK_SIZE";
+ case CX2341X_ENC_GET_PREV_DMA_INFO_MB_10:
+ return "GET_PREV_DMA_INFO_MB_10";
+ case CX2341X_ENC_GET_PREV_DMA_INFO_MB_9:
+ return "GET_PREV_DMA_INFO_MB_9";
+ case CX2341X_ENC_SCHED_DMA_TO_HOST:
+ return "SCHED_DMA_TO_HOST";
+ case CX2341X_ENC_INITIALIZE_INPUT:
+ return "INITIALIZE_INPUT";
+ case CX2341X_ENC_SET_FRAME_DROP_RATE:
+ return "SET_FRAME_DROP_RATE";
+ case CX2341X_ENC_PAUSE_ENCODER:
+ return "PAUSE_ENCODER";
+ case CX2341X_ENC_REFRESH_INPUT:
+ return "REFRESH_INPUT";
+ case CX2341X_ENC_SET_COPYRIGHT:
+ return "SET_COPYRIGHT";
+ case CX2341X_ENC_SET_EVENT_NOTIFICATION:
+ return "SET_EVENT_NOTIFICATION";
+ case CX2341X_ENC_SET_NUM_VSYNC_LINES:
+ return "SET_NUM_VSYNC_LINES";
+ case CX2341X_ENC_SET_PLACEHOLDER:
+ return "SET_PLACEHOLDER";
+ case CX2341X_ENC_MUTE_VIDEO:
+ return "MUTE_VIDEO";
+ case CX2341X_ENC_MUTE_AUDIO:
+ return "MUTE_AUDIO";
+ case CX2341X_ENC_MISC:
+ return "MISC";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static int cx231xx_mbox_func(void *priv,
+ u32 command,
+ int in,
+ int out,
+ u32 data[CX2341X_MBOX_MAX_DATA])
+{
+ struct cx231xx *dev = priv;
+ unsigned long timeout;
+ u32 value, flag, retval = 0;
+ int i;
+
+ dprintk(3, "%s: command(0x%X) = %s\n", __func__, command,
+ cmd_to_str(command));
+
+ /* this may not be 100% safe if we can't read any memory location
+ without side effects */
+ mc417_memory_read(dev, dev->cx23417_mailbox - 4, &value);
+ if (value != 0x12345678) {
+ dprintk(3,
+ "Firmware and/or mailbox pointer not initialized "
+ "or corrupted, signature = 0x%x, cmd = %s\n", value,
+ cmd_to_str(command));
+ return -1;
+ }
+
+ /* This read looks at 32 bits, but flag is only 8 bits.
+ * Seems we also bail if CMD or TIMEOUT bytes are set???
+ */
+ mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
+ if (flag) {
+ dprintk(3, "ERROR: Mailbox appears to be in use "
+ "(%x), cmd = %s\n", flag, cmd_to_str(command));
+ return -1;
+ }
+
+ flag |= 1; /* tell 'em we're working on it */
+ mc417_memory_write(dev, dev->cx23417_mailbox, flag);
+
+ /* write command + args + fill remaining with zeros */
+ /* command code */
+ mc417_memory_write(dev, dev->cx23417_mailbox + 1, command);
+ mc417_memory_write(dev, dev->cx23417_mailbox + 3,
+ IVTV_API_STD_TIMEOUT); /* timeout */
+ for (i = 0; i < in; i++) {
+ mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, data[i]);
+ dprintk(3, "API Input %d = %d\n", i, data[i]);
+ }
+ for (; i < CX2341X_MBOX_MAX_DATA; i++)
+ mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, 0);
+
+ flag |= 3; /* tell 'em we're done writing */
+ mc417_memory_write(dev, dev->cx23417_mailbox, flag);
+
+ /* wait for firmware to handle the API command */
+ timeout = jiffies + msecs_to_jiffies(10);
+ for (;;) {
+ mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
+ if (0 != (flag & 4))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dprintk(3, "ERROR: API Mailbox timeout\n");
+ return -1;
+ }
+ udelay(10);
+ }
+
+ /* read output values */
+ for (i = 0; i < out; i++) {
+ mc417_memory_read(dev, dev->cx23417_mailbox + 4 + i, data + i);
+ dprintk(3, "API Output %d = %d\n", i, data[i]);
+ }
+
+ mc417_memory_read(dev, dev->cx23417_mailbox + 2, &retval);
+ dprintk(3, "API result = %d\n", retval);
+
+ flag = 0;
+ mc417_memory_write(dev, dev->cx23417_mailbox, flag);
+
+ return retval;
+}
+
+/* We don't need to call the API often, so using just one
+ * mailbox will probably suffice
+ */
+static int cx231xx_api_cmd(struct cx231xx *dev,
+ u32 command,
+ u32 inputcnt,
+ u32 outputcnt,
+ ...)
+{
+ u32 data[CX2341X_MBOX_MAX_DATA];
+ va_list vargs;
+ int i, err;
+
+ dprintk(3, "%s() cmds = 0x%08x\n", __func__, command);
+
+ va_start(vargs, outputcnt);
+ for (i = 0; i < inputcnt; i++)
+ data[i] = va_arg(vargs, int);
+
+ err = cx231xx_mbox_func(dev, command, inputcnt, outputcnt, data);
+ for (i = 0; i < outputcnt; i++) {
+ int *vptr = va_arg(vargs, int *);
+ *vptr = data[i];
+ }
+ va_end(vargs);
+
+ return err;
+}
+
+static int cx231xx_find_mailbox(struct cx231xx *dev)
+{
+ u32 signature[4] = {
+ 0x12345678, 0x34567812, 0x56781234, 0x78123456
+ };
+ int signaturecnt = 0;
+ u32 value;
+ int i;
+ int ret = 0;
+
+ dprintk(2, "%s()\n", __func__);
+
+ for (i = 0; i < 0x100; i++) {/*CX231xx_FIRM_IMAGE_SIZE*/
+ ret = mc417_memory_read(dev, i, &value);
+ if (ret < 0)
+ return ret;
+ if (value == signature[signaturecnt])
+ signaturecnt++;
+ else
+ signaturecnt = 0;
+ if (4 == signaturecnt) {
+ dprintk(1, "Mailbox signature found at 0x%x\n", i+1);
+ return i+1;
+ }
+ }
+ dprintk(3, "Mailbox signature values not found!\n");
+ return -1;
+}
+
+static void mciWriteMemoryToGPIO(struct cx231xx *dev, u32 address, u32 value,
+ u32 *p_fw_image)
+{
+
+ u32 temp = 0;
+ int i = 0;
+
+ temp = 0x82|MCI_MEMORY_DATA_BYTE0|((value&0x000000FF)<<8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /*write data byte 1;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE1|(value&0x0000FF00);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /*write data byte 2;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE2|((value&0x00FF0000)>>8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /*write data byte 3;*/
+ temp = 0x82|MCI_MEMORY_DATA_BYTE3|((value&0xFF000000)>>16);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /* write address byte 2;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE2 | MCI_MODE_MEMORY_WRITE |
+ ((address & 0x003F0000)>>8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /* write address byte 1;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE1 | (address & 0xFF00);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ /* write address byte 0;*/
+ temp = 0x82|MCI_MEMORY_ADDRESS_BYTE0|((address & 0x00FF)<<8);
+ temp = temp<<10;
+ *p_fw_image = temp;
+ p_fw_image++;
+ temp = temp|((0x05)<<10);
+ *p_fw_image = temp;
+ p_fw_image++;
+
+ for (i = 0; i < 6; i++) {
+ *p_fw_image = 0xFFFFFFFF;
+ p_fw_image++;
+ }
+}
+
+
+static int cx231xx_load_firmware(struct cx231xx *dev)
+{
+ static const unsigned char magic[8] = {
+ 0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa
+ };
+ const struct firmware *firmware;
+ int i, retval = 0;
+ u32 value = 0;
+ u32 gpio_output = 0;
+ /*u32 checksum = 0;*/
+ /*u32 *dataptr;*/
+ u32 transfer_size = 0;
+ u32 fw_data = 0;
+ u32 address = 0;
+ /*u32 current_fw[800];*/
+ u32 *p_current_fw, *p_fw;
+ u32 *p_fw_data;
+ int frame = 0;
+ u16 _buffer_size = 4096;
+ u8 *p_buffer;
+
+ p_current_fw = (u32 *)vmalloc(1884180*4);
+ p_fw = p_current_fw;
+ if (p_current_fw == 0) {
+ dprintk(2, "FAIL!!!\n");
+ return -1;
+ }
+
+ p_buffer = (u8 *)vmalloc(4096);
+ if (p_buffer == 0) {
+ dprintk(2, "FAIL!!!\n");
+ return -1;
+ }
+
+ dprintk(2, "%s()\n", __func__);
+
+ /* Save GPIO settings before reset of APU */
+ retval |= mc417_memory_read(dev, 0x9020, &gpio_output);
+ retval |= mc417_memory_read(dev, 0x900C, &value);
+
+ retval = mc417_register_write(dev,
+ IVTV_REG_VPU, 0xFFFFFFED);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_ENC_SDRAM_REFRESH, 0x80000800);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
+ retval |= mc417_register_write(dev,
+ IVTV_REG_APU, 0);
+
+ if (retval != 0) {
+ printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ __func__);
+ return -1;
+ }
+
+ retval = request_firmware(&firmware, CX231xx_FIRM_IMAGE_NAME,
+ &dev->udev->dev);
+
+ if (retval != 0) {
+ printk(KERN_ERR
+ "ERROR: Hotplug firmware request failed (%s).\n",
+ CX231xx_FIRM_IMAGE_NAME);
+ printk(KERN_ERR "Please fix your hotplug setup, the board will "
+ "not work without firmware loaded!\n");
+ return -1;
+ }
+
+ if (firmware->size != CX231xx_FIRM_IMAGE_SIZE) {
+ printk(KERN_ERR "ERROR: Firmware size mismatch "
+ "(have %zd, expected %d)\n",
+ firmware->size, CX231xx_FIRM_IMAGE_SIZE);
+ release_firmware(firmware);
+ return -1;
+ }
+
+ if (0 != memcmp(firmware->data, magic, 8)) {
+ printk(KERN_ERR
+ "ERROR: Firmware magic mismatch, wrong file?\n");
+ release_firmware(firmware);
+ return -1;
+ }
+
+ initGPIO(dev);
+
+ /* transfer to the chip */
+ dprintk(2, "Loading firmware to GPIO...\n");
+ p_fw_data = (u32 *)firmware->data;
+ dprintk(2, "firmware->size=%zd\n", firmware->size);
+ for (transfer_size = 0; transfer_size < firmware->size;
+ transfer_size += 4) {
+ fw_data = *p_fw_data;
+
+ mciWriteMemoryToGPIO(dev, address, fw_data, p_current_fw);
+ address = address + 1;
+ p_current_fw += 20;
+ p_fw_data += 1;
+ }
+
+ /*download the firmware by ep5-out*/
+
+ for (frame = 0; frame < (int)(CX231xx_FIRM_IMAGE_SIZE*20/_buffer_size);
+ frame++) {
+ for (i = 0; i < _buffer_size; i++) {
+ *(p_buffer + i) = (u8)(*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x000000FF);
+ i++;
+ *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x0000FF00) >> 8);
+ i++;
+ *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x00FF0000) >> 16);
+ i++;
+ *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0xFF000000) >> 24);
+ }
+ cx231xx_ep5_bulkout(dev, p_buffer, _buffer_size);
+ }
+
+ p_current_fw = p_fw;
+ vfree(p_current_fw);
+ p_current_fw = NULL;
+ uninitGPIO(dev);
+ release_firmware(firmware);
+ dprintk(1, "Firmware upload successful.\n");
+
+ retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS,
+ IVTV_CMD_HW_BLOCKS_RST);
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ __func__);
+ return retval;
+ }
+ /* F/W power up disturbs the GPIOs, restore state */
+ retval |= mc417_register_write(dev, 0x9020, gpio_output);
+ retval |= mc417_register_write(dev, 0x900C, value);
+
+ retval |= mc417_register_read(dev, IVTV_REG_VPU, &value);
+ retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8);
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s: Error with mc417_register_write\n",
+ __func__);
+ return retval;
+ }
+ return 0;
+}
+
+static void cx231xx_417_check_encoder(struct cx231xx *dev)
+{
+ u32 status, seq;
+
+ status = 0;
+ seq = 0;
+ cx231xx_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq);
+ dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq);
+}
+
+static void cx231xx_codec_settings(struct cx231xx *dev)
+{
+ dprintk(1, "%s()\n", __func__);
+
+ /* assign frame size */
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
+ dev->ts1.height, dev->ts1.width);
+
+ dev->mpeg_params.width = dev->ts1.width;
+ dev->mpeg_params.height = dev->ts1.height;
+
+ cx2341x_update(dev, cx231xx_mbox_func, NULL, &dev->mpeg_params);
+
+ cx231xx_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 3, 1);
+ cx231xx_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 4, 1);
+}
+
+static int cx231xx_initialize_codec(struct cx231xx *dev)
+{
+ int version;
+ int retval;
+ u32 i, data[7];
+ u32 val = 0;
+
+ dprintk(1, "%s()\n", __func__);
+ cx231xx_disable656(dev);
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
+ if (retval < 0) {
+ dprintk(2, "%s() PING OK\n", __func__);
+ retval = cx231xx_load_firmware(dev);
+ if (retval < 0) {
+ printk(KERN_ERR "%s() f/w load failed\n", __func__);
+ return retval;
+ }
+ retval = cx231xx_find_mailbox(dev);
+ if (retval < 0) {
+ printk(KERN_ERR "%s() mailbox < 0, error\n",
+ __func__);
+ return -1;
+ }
+ dev->cx23417_mailbox = retval;
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
+ if (retval < 0) {
+ printk(KERN_ERR
+ "ERROR: cx23417 firmware ping failed!\n");
+ return -1;
+ }
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
+ &version);
+ if (retval < 0) {
+ printk(KERN_ERR "ERROR: cx23417 firmware get encoder :"
+ "version failed!\n");
+ return -1;
+ }
+ dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
+ msleep(200);
+ }
+
+ for (i = 0; i < 1; i++) {
+ retval = mc417_register_read(dev, 0x20f8, &val);
+ dprintk(3, "***before enable656() VIM Capture Lines =%d ***\n",
+ val);
+ if (retval < 0)
+ return retval;
+ }
+
+ cx231xx_enable656(dev);
+ /* stop mpeg capture */
+ cx231xx_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE,
+ 3, 0, 1, 3, 4);
+
+ cx231xx_codec_settings(dev);
+ msleep(60);
+
+/* cx231xx_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
+ CX231xx_FIELD1_SAA7115, CX231xx_FIELD2_SAA7115);
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
+ CX231xx_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0);
+*/
+ /* Setup to capture VBI */
+ data[0] = 0x0001BD00;
+ data[1] = 1; /* frames per interrupt */
+ data[2] = 4; /* total bufs */
+ data[3] = 0x91559155; /* start codes */
+ data[4] = 0x206080C0; /* stop codes */
+ data[5] = 6; /* lines */
+ data[6] = 64; /* BPL */
+/*
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_CONFIG, 7, 0, data[0], data[1],
+ data[2], data[3], data[4], data[5], data[6]);
+
+ for (i = 2; i <= 24; i++) {
+ int valid;
+
+ valid = ((i >= 19) && (i <= 21));
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0, i,
+ valid, 0 , 0, 0);
+ cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0,
+ i | 0x80000000, valid, 0, 0, 0);
+ }
+*/
+/* cx231xx_api_cmd(dev, CX2341X_ENC_MUTE_AUDIO, 1, 0, CX231xx_UNMUTE);
+ msleep(60);
+*/
+ /* initialize the video input */
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0);
+ if (retval < 0)
+ return retval;
+ msleep(60);
+
+ /* Enable VIP style pixel invalidation so we work with scaled mode */
+ mc417_memory_write(dev, 2120, 0x00000080);
+
+ /* start capturing to the host interface */
+ retval = cx231xx_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
+ CX231xx_MPEG_CAPTURE, CX231xx_RAW_BITS_NONE);
+ if (retval < 0)
+ return retval;
+ msleep(10);
+
+ for (i = 0; i < 1; i++) {
+ mc417_register_read(dev, 0x20f8, &val);
+ dprintk(3, "***VIM Capture Lines =%d ***\n", val);
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+
+static int bb_buf_setup(struct videobuf_queue *q,
+ unsigned int *count, unsigned int *size)
+{
+ struct cx231xx_fh *fh = q->priv_data;
+
+ fh->dev->ts1.ts_packet_size = mpeglinesize;
+ fh->dev->ts1.ts_packet_count = mpeglines;
+
+ *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+ *count = mpegbufs;
+
+ return 0;
+}
+static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
+{
+ struct cx231xx_fh *fh = vq->priv_data;
+ struct cx231xx *dev = fh->dev;
+ unsigned long flags = 0;
+
+ if (in_interrupt())
+ BUG();
+
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO) {
+ if (dev->video_mode.isoc_ctl.buf == buf)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ } else {
+ if (dev->video_mode.bulk_ctl.buf == buf)
+ dev->video_mode.bulk_ctl.buf = NULL;
+ }
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
+ videobuf_vmalloc_free(&buf->vb);
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *urb,
+ struct cx231xx_dmaqueue *dma_q)
+{
+ void *vbuf;
+ struct cx231xx_buffer *buf;
+ u32 tail_data = 0;
+ char *p_data;
+
+ if (dma_q->mpeg_buffer_done == 0) {
+ if (list_empty(&dma_q->active))
+ return;
+
+ buf = list_entry(dma_q->active.next,
+ struct cx231xx_buffer, vb.queue);
+ dev->video_mode.isoc_ctl.buf = buf;
+ dma_q->mpeg_buffer_done = 1;
+ }
+ /* Fill buffer */
+ buf = dev->video_mode.isoc_ctl.buf;
+ vbuf = videobuf_to_vmalloc(&buf->vb);
+
+ if ((dma_q->mpeg_buffer_completed+len) <
+ mpeglines*mpeglinesize) {
+ if (dma_q->add_ps_package_head ==
+ CX231XX_NEED_ADD_PS_PACKAGE_HEAD) {
+ memcpy(vbuf+dma_q->mpeg_buffer_completed,
+ dma_q->ps_head, 3);
+ dma_q->mpeg_buffer_completed =
+ dma_q->mpeg_buffer_completed + 3;
+ dma_q->add_ps_package_head =
+ CX231XX_NONEED_PS_PACKAGE_HEAD;
+ }
+ memcpy(vbuf+dma_q->mpeg_buffer_completed, data, len);
+ dma_q->mpeg_buffer_completed =
+ dma_q->mpeg_buffer_completed + len;
+ } else {
+ dma_q->mpeg_buffer_done = 0;
+
+ tail_data =
+ mpeglines*mpeglinesize - dma_q->mpeg_buffer_completed;
+ memcpy(vbuf+dma_q->mpeg_buffer_completed,
+ data, tail_data);
+
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ do_gettimeofday(&buf->vb.ts);
+ list_del(&buf->vb.queue);
+ wake_up(&buf->vb.done);
+ dma_q->mpeg_buffer_completed = 0;
+
+ if (len - tail_data > 0) {
+ p_data = data + tail_data;
+ dma_q->left_data_count = len - tail_data;
+ memcpy(dma_q->p_left_data,
+ p_data, len - tail_data);
+ }
+
+ }
+
+ return;
+}
+
+static void buffer_filled(char *data, int len, struct urb *urb,
+ struct cx231xx_dmaqueue *dma_q)
+{
+ void *vbuf;
+ struct cx231xx_buffer *buf;
+
+ if (list_empty(&dma_q->active))
+ return;
+
+
+ buf = list_entry(dma_q->active.next,
+ struct cx231xx_buffer, vb.queue);
+
+
+ /* Fill buffer */
+ vbuf = videobuf_to_vmalloc(&buf->vb);
+ memcpy(vbuf, data, len);
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ do_gettimeofday(&buf->vb.ts);
+ list_del(&buf->vb.queue);
+ wake_up(&buf->vb.done);
+
+ return;
+}
+static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
+{
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ unsigned char *p_buffer;
+ u32 buffer_size = 0;
+ u32 i = 0;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ if (dma_q->left_data_count > 0) {
+ buffer_copy(dev, dma_q->p_left_data,
+ dma_q->left_data_count, urb, dma_q);
+ dma_q->mpeg_buffer_completed = dma_q->left_data_count;
+ dma_q->left_data_count = 0;
+ }
+
+ p_buffer = urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset;
+ buffer_size = urb->iso_frame_desc[i].actual_length;
+
+ if (buffer_size > 0)
+ buffer_copy(dev, p_buffer, buffer_size, urb, dma_q);
+ }
+
+ return 0;
+}
+static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
+{
+
+ /*char *outp;*/
+ /*struct cx231xx_buffer *buf;*/
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ unsigned char *p_buffer, *buffer;
+ u32 buffer_size = 0;
+
+ p_buffer = urb->transfer_buffer;
+ buffer_size = urb->actual_length;
+
+ buffer = kmalloc(buffer_size, GFP_ATOMIC);
+
+ memcpy(buffer, dma_q->ps_head, 3);
+ memcpy(buffer+3, p_buffer, buffer_size-3);
+ memcpy(dma_q->ps_head, p_buffer+buffer_size-3, 3);
+
+ p_buffer = buffer;
+ buffer_filled(p_buffer, buffer_size, urb, dma_q);
+
+ kfree(buffer);
+ return 0;
+}
+
+static int bb_buf_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = fh->dev;
+ int rc = 0, urb_init = 0;
+ int size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+
+ dma_qq = &dev->video_mode.vidq;
+
+ if (0 != buf->vb.baddr && buf->vb.bsize < size)
+ return -EINVAL;
+ buf->vb.width = fh->dev->ts1.ts_packet_size;
+ buf->vb.height = fh->dev->ts1.ts_packet_count;
+ buf->vb.size = size;
+ buf->vb.field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
+ rc = videobuf_iolock(q, &buf->vb, NULL);
+ if (rc < 0)
+ goto fail;
+ }
+
+ if (dev->USE_ISO) {
+ if (!dev->video_mode.isoc_ctl.num_bufs)
+ urb_init = 1;
+ } else {
+ if (!dev->video_mode.bulk_ctl.num_bufs)
+ urb_init = 1;
+ }
+ /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n",
+ urb_init, dev->video_mode.max_pkt_size);*/
+ dev->mode_tv = 1;
+
+ if (urb_init) {
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ rc = cx231xx_unmute_audio(dev);
+ if (dev->USE_ISO) {
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
+ rc = cx231xx_init_isoc(dev, mpeglines,
+ mpegbufs,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ } else {
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ rc = cx231xx_init_bulk(dev, mpeglines,
+ mpegbufs,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ }
+ if (rc < 0)
+ goto fail;
+ }
+
+ buf->vb.state = VIDEOBUF_PREPARED;
+ return 0;
+
+fail:
+ free_buffer(q, buf);
+ return rc;
+}
+
+static void bb_buf_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct cx231xx_fh *fh = q->priv_data;
+
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = fh->dev;
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+
+ buf->vb.state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->vb.queue, &vidq->active);
+
+}
+
+static void bb_buf_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb);
+ /*struct cx231xx_fh *fh = q->priv_data;*/
+ /*struct cx231xx *dev = (struct cx231xx *)fh->dev;*/
+
+ free_buffer(q, buf);
+}
+
+static struct videobuf_queue_ops cx231xx_qops = {
+ .buf_setup = bb_buf_setup,
+ .buf_prepare = bb_buf_prepare,
+ .buf_queue = bb_buf_queue,
+ .buf_release = bb_buf_release,
+};
+
+/* ------------------------------------------------------------------ */
+
+static const u32 *ctrl_classes[] = {
+ cx2341x_mpeg_ctrls,
+ NULL
+};
+
+static int cx231xx_queryctrl(struct cx231xx *dev,
+ struct v4l2_queryctrl *qctrl)
+{
+ qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
+ if (qctrl->id == 0)
+ return -EINVAL;
+
+ /* MPEG V4L2 controls */
+ if (cx2341x_ctrl_query(&dev->mpeg_params, qctrl))
+ qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+
+ return 0;
+}
+
+static int cx231xx_querymenu(struct cx231xx *dev,
+ struct v4l2_querymenu *qmenu)
+{
+ struct v4l2_queryctrl qctrl;
+
+ qctrl.id = qmenu->id;
+ cx231xx_queryctrl(dev, &qctrl);
+ return v4l2_ctrl_query_menu(qmenu, &qctrl,
+ cx2341x_ctrl_get_menu(&dev->mpeg_params, qmenu->id));
+}
+
+static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ *norm = dev->encodernorm.id;
+ return 0;
+}
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cx231xx_tvnorms); i++)
+ if (*id & cx231xx_tvnorms[i].id)
+ break;
+ if (i == ARRAY_SIZE(cx231xx_tvnorms))
+ return -EINVAL;
+ dev->encodernorm = cx231xx_tvnorms[i];
+
+ if (dev->encodernorm.id & 0xb000) {
+ dprintk(3, "encodernorm set to NTSC\n");
+ dev->norm = V4L2_STD_NTSC;
+ dev->ts1.height = 480;
+ dev->mpeg_params.is_50hz = 0;
+ } else {
+ dprintk(3, "encodernorm set to PAL\n");
+ dev->norm = V4L2_STD_PAL_B;
+ dev->ts1.height = 576;
+ dev->mpeg_params.is_50hz = 1;
+ }
+ call_all(dev, core, s_std, dev->norm);
+ /* do mode control overrides */
+ cx231xx_do_mode_ctrl_overrides(dev);
+
+ dprintk(3, "exit vidioc_s_std() i=0x%x\n", i);
+ return 0;
+}
+static int vidioc_g_audio(struct file *file, void *fh,
+ struct v4l2_audio *a)
+{
+ struct v4l2_audio *vin = a;
+
+ int ret = -EINVAL;
+ if (vin->index > 0)
+ return ret;
+ strncpy(vin->name, "VideoGrabber Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+return 0;
+}
+static int vidioc_enumaudio(struct file *file, void *fh,
+ struct v4l2_audio *a)
+{
+ struct v4l2_audio *vin = a;
+
+ int ret = -EINVAL;
+
+ if (vin->index > 0)
+ return ret;
+ strncpy(vin->name, "VideoGrabber Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+
+
+return 0;
+}
+static const char *iname[] = {
+ [CX231XX_VMUX_COMPOSITE1] = "Composite1",
+ [CX231XX_VMUX_SVIDEO] = "S-Video",
+ [CX231XX_VMUX_TELEVISION] = "Television",
+ [CX231XX_VMUX_CABLE] = "Cable TV",
+ [CX231XX_VMUX_DVB] = "DVB",
+ [CX231XX_VMUX_DEBUG] = "for debug only",
+};
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ struct cx231xx_input *input;
+ int n;
+ dprintk(3, "enter vidioc_enum_input()i->index=%d\n", i->index);
+
+ if (i->index >= 4)
+ return -EINVAL;
+
+
+ input = &cx231xx_boards[dev->model].input[i->index];
+
+ if (input->type == 0)
+ return -EINVAL;
+
+ /* FIXME
+ * strcpy(i->name, input->name); */
+
+ n = i->index;
+ strcpy(i->name, iname[INPUT(n)->type]);
+
+ if (input->type == CX231XX_VMUX_TELEVISION ||
+ input->type == CX231XX_VMUX_CABLE)
+ i->type = V4L2_INPUT_TYPE_TUNER;
+ else
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ dprintk(3, "enter vidioc_s_input() i=%d\n", i);
+
+ mutex_lock(&dev->lock);
+
+ video_mux(dev, i);
+
+ mutex_unlock(&dev->lock);
+
+ if (i >= 4)
+ return -EINVAL;
+ dev->input = i;
+ dprintk(3, "exit vidioc_s_input()\n");
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ return 0;
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_s_ctrl()\n");
+ /* Update the A/V core */
+ call_all(dev, core, s_ctrl, ctl);
+ dprintk(3, "exit vidioc_s_ctrl()\n");
+ return 0;
+}
+static struct v4l2_capability pvr_capability = {
+ .driver = "cx231xx",
+ .card = "VideoGrabber",
+ .bus_info = "usb",
+ .version = 1,
+ .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE),
+ .reserved = {0, 0, 0, 0}
+};
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+
+
+
+ memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+
+ if (f->index != 0)
+ return -EINVAL;
+
+ strlcpy(f->description, "MPEG", sizeof(f->description));
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_g_fmt_vid_cap()\n");
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ f->fmt.pix.width = dev->ts1.width;
+ f->fmt.pix.height = dev->ts1.height;
+ f->fmt.pix.field = fh->vidq.field;
+ dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
+ dev->ts1.width, dev->ts1.height, fh->vidq.field);
+ dprintk(3, "exit vidioc_g_fmt_vid_cap()\n");
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_try_fmt_vid_cap()\n");
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
+ dev->ts1.width, dev->ts1.height, fh->vidq.field);
+ dprintk(3, "exit vidioc_try_fmt_vid_cap()\n");
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+
+ return 0;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_reqbufs(&fh->vidq, p);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_querybuf(&fh->vidq, p);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_qbuf(&fh->vidq, p);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct cx231xx_fh *fh = priv;
+
+ return videobuf_dqbuf(&fh->vidq, b, file->f_flags & O_NONBLOCK);
+}
+
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ struct cx231xx *dev = fh->dev;
+ int rc = 0;
+ dprintk(3, "enter vidioc_streamon()\n");
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (dev->USE_ISO)
+ rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else {
+ rc = cx231xx_init_bulk(dev, 320,
+ 5,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ }
+ dprintk(3, "exit vidioc_streamon()\n");
+ return videobuf_streamon(&fh->vidq);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct cx231xx_fh *fh = file->private_data;
+
+ return videobuf_streamoff(&fh->vidq);
+}
+
+static int vidioc_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *f)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_g_ext_ctrls()\n");
+ if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
+ return -EINVAL;
+ dprintk(3, "exit vidioc_g_ext_ctrls()\n");
+ return cx2341x_ext_ctrls(&dev->mpeg_params, 0, f, VIDIOC_G_EXT_CTRLS);
+}
+
+static int vidioc_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *f)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ struct cx2341x_mpeg_params p;
+ int err;
+ dprintk(3, "enter vidioc_s_ext_ctrls()\n");
+ if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
+ return -EINVAL;
+
+ p = dev->mpeg_params;
+ err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
+ if (err == 0) {
+ err = cx2341x_update(dev, cx231xx_mbox_func,
+ &dev->mpeg_params, &p);
+ dev->mpeg_params = p;
+ }
+
+ return err;
+
+
+return 0;
+}
+
+static int vidioc_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *f)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ struct cx2341x_mpeg_params p;
+ int err;
+ dprintk(3, "enter vidioc_try_ext_ctrls()\n");
+ if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
+ return -EINVAL;
+
+ p = dev->mpeg_params;
+ err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
+ dprintk(3, "exit vidioc_try_ext_ctrls() err=%d\n", err);
+ return err;
+}
+
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ char name[32 + 2];
+
+ snprintf(name, sizeof(name), "%s/2", dev->name);
+ dprintk(3,
+ "%s/2: ============ START LOG STATUS ============\n",
+ dev->name);
+ call_all(dev, core, log_status);
+ cx2341x_log_status(&dev->mpeg_params, name);
+ dprintk(3,
+ "%s/2: ============= END LOG STATUS =============\n",
+ dev->name);
+ return 0;
+}
+
+static int vidioc_querymenu(struct file *file, void *priv,
+ struct v4l2_querymenu *a)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_querymenu()\n");
+ dprintk(3, "exit vidioc_querymenu()\n");
+ return cx231xx_querymenu(dev, a);
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *c)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+ dprintk(3, "enter vidioc_queryctrl()\n");
+ dprintk(3, "exit vidioc_queryctrl()\n");
+ return cx231xx_queryctrl(dev, c);
+}
+
+static int mpeg_open(struct file *file)
+{
+ int minor = video_devdata(file)->minor;
+ struct cx231xx *h, *dev = NULL;
+ /*struct list_head *list;*/
+ struct cx231xx_fh *fh;
+ /*u32 value = 0;*/
+
+ dprintk(2, "%s()\n", __func__);
+
+ list_for_each_entry(h, &cx231xx_devlist, devlist) {
+ if (h->v4l_device->minor == minor)
+ dev = h;
+ }
+
+ if (dev == NULL) {
+ unlock_kernel();
+ return -ENODEV;
+ }
+ mutex_lock(&dev->lock);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh) {
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+
+ file->private_data = fh;
+ fh->dev = dev;
+
+
+ videobuf_queue_vmalloc_init(&fh->vidq, &cx231xx_qops,
+ NULL, &dev->video_mode.slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED,
+ sizeof(struct cx231xx_buffer), fh, NULL);
+/*
+ videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
+ &dev->udev->dev, &dev->ts1.slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct cx231xx_buffer),
+ fh, NULL);
+*/
+
+
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ cx231xx_set_gpio_value(dev, 2, 0);
+
+ cx231xx_initialize_codec(dev);
+
+ mutex_unlock(&dev->lock);
+ cx231xx_start_TS1(dev);
+
+ return 0;
+}
+
+static int mpeg_release(struct file *file)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ dprintk(3, "mpeg_release()! dev=0x%p\n", dev);
+
+ if (!dev) {
+ dprintk(3, "abort!!!\n");
+ return 0;
+ }
+
+ mutex_lock(&dev->lock);
+
+ cx231xx_stop_TS1(dev);
+
+ /* do this before setting alternate! */
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
+ cx231xx_set_mode(dev, CX231XX_SUSPEND);
+
+ cx231xx_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
+ CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_BITS_NONE);
+
+ /* FIXME: Review this crap */
+ /* Shut device down on last close */
+ if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
+ if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
+ /* stop mpeg capture */
+
+ msleep(500);
+ cx231xx_417_check_encoder(dev);
+
+ }
+ }
+
+ if (fh->vidq.streaming)
+ videobuf_streamoff(&fh->vidq);
+ if (fh->vidq.reading)
+ videobuf_read_stop(&fh->vidq);
+
+ videobuf_mmap_free(&fh->vidq);
+ file->private_data = NULL;
+ kfree(fh);
+ mutex_unlock(&dev->lock);
+ return 0;
+}
+
+static ssize_t mpeg_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+
+ /* Deal w/ A/V decoder * and mpeg encoder sync issues. */
+ /* Start mpeg encoder on first read. */
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
+ if (cx231xx_initialize_codec(dev) < 0)
+ return -EINVAL;
+ }
+ }
+
+ return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
+ file->f_flags & O_NONBLOCK);
+}
+
+static unsigned int mpeg_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ /*struct cx231xx *dev = fh->dev;*/
+
+ /*dprintk(2, "%s\n", __func__);*/
+
+ return videobuf_poll_stream(file, &fh->vidq, wait);
+}
+
+static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct cx231xx_fh *fh = file->private_data;
+ struct cx231xx *dev = fh->dev;
+
+ dprintk(2, "%s()\n", __func__);
+
+ return videobuf_mmap_mapper(&fh->vidq, vma);
+}
+
+static struct v4l2_file_operations mpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = mpeg_open,
+ .release = mpeg_release,
+ .read = mpeg_read,
+ .poll = mpeg_poll,
+ .mmap = mpeg_mmap,
+ .ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_enumaudio = vidioc_enumaudio,
+ .vidioc_g_audio = vidioc_g_audio,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_querymenu = vidioc_querymenu,
+ .vidioc_queryctrl = vidioc_queryctrl,
+/* .vidioc_g_chip_ident = cx231xx_g_chip_ident,*/
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/* .vidioc_g_register = cx231xx_g_register,*/
+/* .vidioc_s_register = cx231xx_s_register,*/
+#endif
+};
+
+static struct video_device cx231xx_mpeg_template = {
+ .name = "cx231xx",
+ .fops = &mpeg_fops,
+ .ioctl_ops = &mpeg_ioctl_ops,
+ .minor = -1,
+ .tvnorms = CX231xx_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+void cx231xx_417_unregister(struct cx231xx *dev)
+{
+ dprintk(1, "%s()\n", __func__);
+ dprintk(3, "%s()\n", __func__);
+
+ if (dev->v4l_device) {
+ if (-1 != dev->v4l_device->minor)
+ video_unregister_device(dev->v4l_device);
+ else
+ video_device_release(dev->v4l_device);
+ dev->v4l_device = NULL;
+ }
+}
+
+static struct video_device *cx231xx_video_dev_alloc(
+ struct cx231xx *dev,
+ struct usb_device *usbdev,
+ struct video_device *template,
+ char *type)
+{
+ struct video_device *vfd;
+
+ dprintk(1, "%s()\n", __func__);
+ vfd = video_device_alloc();
+ if (NULL == vfd)
+ return NULL;
+ *vfd = *template;
+ vfd->minor = -1;
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
+ type, cx231xx_boards[dev->model].name);
+
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->release = video_device_release;
+
+ return vfd;
+
+}
+
+int cx231xx_417_register(struct cx231xx *dev)
+{
+ /* FIXME: Port1 hardcoded here */
+ int err = -ENODEV;
+ struct cx231xx_tsport *tsport = &dev->ts1;
+
+ dprintk(1, "%s()\n", __func__);
+
+ /* Set default TV standard */
+ dev->encodernorm = cx231xx_tvnorms[0];
+
+ if (dev->encodernorm.id & V4L2_STD_525_60)
+ tsport->height = 480;
+ else
+ tsport->height = 576;
+
+ tsport->width = 720;
+ cx2341x_fill_defaults(&dev->mpeg_params);
+ dev->norm = V4L2_STD_NTSC;
+
+ dev->mpeg_params.port = CX2341X_PORT_SERIAL;
+
+ /* Allocate and initialize V4L video device */
+ dev->v4l_device = cx231xx_video_dev_alloc(dev,
+ dev->udev, &cx231xx_mpeg_template, "mpeg");
+ err = video_register_device(dev->v4l_device,
+ VFL_TYPE_GRABBER, -1);
+ if (err < 0) {
+ dprintk(3, "%s: can't register mpeg device\n", dev->name);
+ return err;
+ }
+
+ dprintk(3, "%s: registered device video%d [mpeg]\n",
+ dev->name, dev->v4l_device->num);
+
+ return 0;
+}
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 7cae95a2245e..30d13c15739a 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -75,6 +75,30 @@ static int cx231xx_isoc_audio_deinit(struct cx231xx *dev)
return 0;
}
+static int cx231xx_bulk_audio_deinit(struct cx231xx *dev)
+{
+ int i;
+
+ dprintk("Stopping bulk\n");
+
+ for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
+ if (dev->adev.urb[i]) {
+ if (!irqs_disabled())
+ usb_kill_urb(dev->adev.urb[i]);
+ else
+ usb_unlink_urb(dev->adev.urb[i]);
+
+ usb_free_urb(dev->adev.urb[i]);
+ dev->adev.urb[i] = NULL;
+
+ kfree(dev->adev.transfer_buffer[i]);
+ dev->adev.transfer_buffer[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
static void cx231xx_audio_isocirq(struct urb *urb)
{
struct cx231xx *dev = urb->context;
@@ -100,6 +124,9 @@ static void cx231xx_audio_isocirq(struct urb *urb)
break;
}
+ if (atomic_read(&dev->stream_started) == 0)
+ return;
+
if (dev->adev.capture_pcm_substream) {
substream = dev->adev.capture_pcm_substream;
runtime = substream->runtime;
@@ -158,14 +185,95 @@ static void cx231xx_audio_isocirq(struct urb *urb)
return;
}
+static void cx231xx_audio_bulkirq(struct urb *urb)
+{
+ struct cx231xx *dev = urb->context;
+ unsigned int oldptr;
+ int period_elapsed = 0;
+ int status;
+ unsigned char *cp;
+ unsigned int stride;
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+
+ switch (urb->status) {
+ case 0: /* success */
+ case -ETIMEDOUT: /* NAK */
+ break;
+ case -ECONNRESET: /* kill */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default: /* error */
+ dprintk("urb completition error %d.\n", urb->status);
+ break;
+ }
+
+ if (atomic_read(&dev->stream_started) == 0)
+ return;
+
+ if (dev->adev.capture_pcm_substream) {
+ substream = dev->adev.capture_pcm_substream;
+ runtime = substream->runtime;
+ stride = runtime->frame_bits >> 3;
+
+ if (1) {
+ int length = urb->actual_length /
+ stride;
+ cp = (unsigned char *)urb->transfer_buffer;
+
+ oldptr = dev->adev.hwptr_done_capture;
+ if (oldptr + length >= runtime->buffer_size) {
+ unsigned int cnt;
+
+ cnt = runtime->buffer_size - oldptr;
+ memcpy(runtime->dma_area + oldptr * stride, cp,
+ cnt * stride);
+ memcpy(runtime->dma_area, cp + cnt * stride,
+ length * stride - cnt * stride);
+ } else {
+ memcpy(runtime->dma_area + oldptr * stride, cp,
+ length * stride);
+ }
+
+ snd_pcm_stream_lock(substream);
+
+ dev->adev.hwptr_done_capture += length;
+ if (dev->adev.hwptr_done_capture >=
+ runtime->buffer_size)
+ dev->adev.hwptr_done_capture -=
+ runtime->buffer_size;
+
+ dev->adev.capture_transfer_done += length;
+ if (dev->adev.capture_transfer_done >=
+ runtime->period_size) {
+ dev->adev.capture_transfer_done -=
+ runtime->period_size;
+ period_elapsed = 1;
+ }
+ snd_pcm_stream_unlock(substream);
+ }
+ if (period_elapsed)
+ snd_pcm_period_elapsed(substream);
+ }
+ urb->status = 0;
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status < 0) {
+ cx231xx_errdev("resubmit of audio urb failed (error=%i)\n",
+ status);
+ }
+ return;
+}
+
static int cx231xx_init_audio_isoc(struct cx231xx *dev)
{
int i, errCode;
int sb_size;
- cx231xx_info("%s: Starting AUDIO transfers\n", __func__);
+ cx231xx_info("%s: Starting ISO AUDIO transfers\n", __func__);
- sb_size = CX231XX_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
+ sb_size = CX231XX_ISO_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
struct urb *urb;
@@ -176,7 +284,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
return -ENOMEM;
memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
- urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
+ urb = usb_alloc_urb(CX231XX_ISO_NUM_AUDIO_PACKETS, GFP_ATOMIC);
if (!urb) {
cx231xx_errdev("usb_alloc_urb failed!\n");
for (j = 0; j < i; j++) {
@@ -194,10 +302,10 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
urb->complete = cx231xx_audio_isocirq;
- urb->number_of_packets = CX231XX_NUM_AUDIO_PACKETS;
+ urb->number_of_packets = CX231XX_ISO_NUM_AUDIO_PACKETS;
urb->transfer_buffer_length = sb_size;
- for (j = k = 0; j < CX231XX_NUM_AUDIO_PACKETS;
+ for (j = k = 0; j < CX231XX_ISO_NUM_AUDIO_PACKETS;
j++, k += dev->adev.max_pkt_size) {
urb->iso_frame_desc[j].offset = k;
urb->iso_frame_desc[j].length = dev->adev.max_pkt_size;
@@ -216,27 +324,56 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
return errCode;
}
-static int cx231xx_cmd(struct cx231xx *dev, int cmd, int arg)
+static int cx231xx_init_audio_bulk(struct cx231xx *dev)
{
- dprintk("%s transfer\n", (dev->adev.capture_stream == STREAM_ON) ?
- "stop" : "start");
+ int i, errCode;
+ int sb_size;
- switch (cmd) {
- case CX231XX_CAPTURE_STREAM_EN:
- if (dev->adev.capture_stream == STREAM_OFF && arg == 1) {
- dev->adev.capture_stream = STREAM_ON;
- cx231xx_init_audio_isoc(dev);
- } else if (dev->adev.capture_stream == STREAM_ON && arg == 0) {
- dev->adev.capture_stream = STREAM_OFF;
- cx231xx_isoc_audio_deinit(dev);
- } else {
- cx231xx_errdev("An underrun very likely occurred. "
- "Ignoring it.\n");
+ cx231xx_info("%s: Starting BULK AUDIO transfers\n", __func__);
+
+ sb_size = CX231XX_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
+
+ for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
+ struct urb *urb;
+ int j;
+
+ dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC);
+ if (!dev->adev.transfer_buffer[i])
+ return -ENOMEM;
+
+ memset(dev->adev.transfer_buffer[i], 0x80, sb_size);
+ urb = usb_alloc_urb(CX231XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
+ if (!urb) {
+ cx231xx_errdev("usb_alloc_urb failed!\n");
+ for (j = 0; j < i; j++) {
+ usb_free_urb(dev->adev.urb[j]);
+ kfree(dev->adev.transfer_buffer[j]);
+ }
+ return -ENOMEM;
}
- return 0;
- default:
- return -EINVAL;
+
+ urb->dev = dev->udev;
+ urb->context = dev;
+ urb->pipe = usb_rcvbulkpipe(dev->udev,
+ dev->adev.end_point_addr);
+ urb->transfer_flags = 0;
+ urb->transfer_buffer = dev->adev.transfer_buffer[i];
+ urb->complete = cx231xx_audio_bulkirq;
+ urb->transfer_buffer_length = sb_size;
+
+ dev->adev.urb[i] = urb;
+
}
+
+ for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
+ errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
+ if (errCode < 0) {
+ cx231xx_bulk_audio_deinit(dev);
+ return errCode;
+ }
+ }
+
+ return errCode;
}
static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
@@ -300,19 +437,24 @@ static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
/* set alternate setting for audio interface */
/* 1 - 48000 samples per sec */
- ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 1);
+ mutex_lock(&dev->lock);
+ if (dev->USE_ISO)
+ ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 1);
+ else
+ ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
+ mutex_unlock(&dev->lock);
if (ret < 0) {
cx231xx_errdev("failed to set alternate setting !\n");
return ret;
}
- /* inform hardware to start streaming */
- ret = cx231xx_capture_start(dev, 1, Audio);
-
runtime->hw = snd_cx231xx_hw_capture;
mutex_lock(&dev->lock);
+ /* inform hardware to start streaming */
+ ret = cx231xx_capture_start(dev, 1, Audio);
+
dev->adev.users++;
mutex_unlock(&dev->lock);
@@ -330,20 +472,21 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
dprintk("closing device\n");
+ /* inform hardware to stop streaming */
+ mutex_lock(&dev->lock);
+ ret = cx231xx_capture_start(dev, 0, Audio);
+
/* set alternate setting for audio interface */
/* 1 - 48000 samples per sec */
ret = cx231xx_set_alt_setting(dev, INDEX_AUDIO, 0);
if (ret < 0) {
cx231xx_errdev("failed to set alternate setting !\n");
+ mutex_unlock(&dev->lock);
return ret;
}
- /* inform hardware to start streaming */
- ret = cx231xx_capture_start(dev, 0, Audio);
-
dev->mute = 1;
- mutex_lock(&dev->lock);
dev->adev.users--;
mutex_unlock(&dev->lock);
@@ -352,7 +495,10 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
dprintk("disabling audio stream!\n");
dev->adev.shutdown = 0;
dprintk("released lock\n");
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN, 0);
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
}
return 0;
}
@@ -383,43 +529,64 @@ static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
dprintk("Stop capture, if needed\n");
- if (dev->adev.capture_stream == STREAM_ON)
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN, CX231XX_STOP_AUDIO);
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
return 0;
}
static int snd_cx231xx_prepare(struct snd_pcm_substream *substream)
{
+ struct cx231xx *dev = snd_pcm_substream_chip(substream);
+
+ dev->adev.hwptr_done_capture = 0;
+ dev->adev.capture_transfer_done = 0;
+
return 0;
}
+static void audio_trigger(struct work_struct *work)
+{
+ struct cx231xx *dev = container_of(work, struct cx231xx, wq_trigger);
+
+ if (atomic_read(&dev->stream_started)) {
+ dprintk("starting capture");
+ if (is_fw_load(dev) == 0)
+ cx25840_call(dev, core, load_fw);
+ if (dev->USE_ISO)
+ cx231xx_init_audio_isoc(dev);
+ else
+ cx231xx_init_audio_bulk(dev);
+ } else {
+ dprintk("stopping capture");
+ cx231xx_isoc_audio_deinit(dev);
+ }
+}
+
static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
int retval;
- dprintk("Should %s capture\n", (cmd == SNDRV_PCM_TRIGGER_START) ?
- "start" : "stop");
-
spin_lock(&dev->adev.slock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN,
- CX231XX_START_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
- cx231xx_cmd(dev, CX231XX_CAPTURE_STREAM_EN, CX231XX_STOP_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 0);
break;
default:
retval = -EINVAL;
}
-
spin_unlock(&dev->adev.slock);
- return retval;
+
+ schedule_work(&dev->wq_trigger);
+
+ return 0;
}
static snd_pcm_uframes_t snd_cx231xx_capture_pointer(struct snd_pcm_substream
@@ -495,10 +662,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
+ snd_card_set_dev(card, &dev->udev->dev);
strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
+ INIT_WORK(&dev->wq_trigger, audio_trigger);
+
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index c2174413ab29..cf50fafa8abb 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -31,13 +31,16 @@
#include <linux/i2c.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <media/tuner.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
#include "cx231xx.h"
+#include "cx231xx-dif.h"
+#define TUNER_MODE_FM_RADIO 0
/******************************************************************************
-: BLOCK ARRANGEMENT :-
I2S block ----------------------|
@@ -50,6 +53,57 @@
[Video]
*******************************************************************************/
+/******************************************************************************
+ * VERVE REGISTER *
+ * *
+ ******************************************************************************/
+static int verve_write_byte(struct cx231xx *dev, u8 saddr, u8 data)
+{
+ return cx231xx_write_i2c_data(dev, VERVE_I2C_ADDRESS,
+ saddr, 1, data, 1);
+}
+
+static int verve_read_byte(struct cx231xx *dev, u8 saddr, u8 *data)
+{
+ int status;
+ u32 temp = 0;
+
+ status = cx231xx_read_i2c_data(dev, VERVE_I2C_ADDRESS,
+ saddr, 1, &temp, 1);
+ *data = (u8) temp;
+ return status;
+}
+void initGPIO(struct cx231xx *dev)
+{
+ u32 _gpio_direction = 0;
+ u32 value = 0;
+ u8 val = 0;
+
+ _gpio_direction = _gpio_direction & 0xFC0003FF;
+ _gpio_direction = _gpio_direction | 0x03FDFC00;
+ cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0);
+
+ verve_read_byte(dev, 0x07, &val);
+ cx231xx_info(" verve_read_byte address0x07=0x%x\n", val);
+ verve_write_byte(dev, 0x07, 0xF4);
+ verve_read_byte(dev, 0x07, &val);
+ cx231xx_info(" verve_read_byte address0x07=0x%x\n", val);
+
+ cx231xx_capture_start(dev, 1, 2);
+
+ cx231xx_mode_register(dev, EP_MODE_SET, 0x0500FE00);
+ cx231xx_mode_register(dev, GBULK_BIT_EN, 0xFFFDFFFF);
+
+}
+void uninitGPIO(struct cx231xx *dev)
+{
+ u8 value[4] = { 0, 0, 0, 0 };
+
+ cx231xx_capture_start(dev, 0, 2);
+ verve_write_byte(dev, 0x07, 0x14);
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ 0x68, value, 4);
+}
/******************************************************************************
* A F E - B L O C K C O N T R O L functions *
@@ -258,7 +312,7 @@ int cx231xx_afe_set_mode(struct cx231xx *dev, enum AFE_MODE mode)
switch (mode) {
case AFE_MODE_LOW_IF:
- /* SetupAFEforLowIF(); */
+ cx231xx_Setup_AFE_for_LowIF(dev);
break;
case AFE_MODE_BASEBAND:
status = cx231xx_afe_setup_AFE_for_baseband(dev);
@@ -291,8 +345,15 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
int status = 0;
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_CNXT_VIDEO_GRABBER:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ case CX231XX_BOARD_HAUPPAUGE_USBLIVE2:
if (avmode == POLARIS_AVMODE_ANALOGT_TV) {
while (afe_power_status != (FLD_PWRDN_TUNING_BIAS |
FLD_PWRDN_ENABLE_PLL)) {
@@ -483,6 +544,17 @@ static int vid_blk_read_word(struct cx231xx *dev, u16 saddr, u32 *data)
return cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS,
saddr, 2, data, 4);
}
+int cx231xx_check_fw(struct cx231xx *dev)
+{
+ u8 temp = 0;
+ int status = 0;
+ status = vid_blk_read_byte(dev, DL_CTL_ADDRESS_LOW, &temp);
+ if (status < 0)
+ return status;
+ else
+ return temp;
+
+}
int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
{
@@ -521,9 +593,15 @@ int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input)
return status;
}
}
- status = cx231xx_set_decoder_video_input(dev,
+ if (dev->tuner_type == TUNER_NXP_TDA18271)
+ status = cx231xx_set_decoder_video_input(dev,
+ CX231XX_VMUX_TELEVISION,
+ INPUT(input)->vmux);
+ else
+ status = cx231xx_set_decoder_video_input(dev,
CX231XX_VMUX_COMPOSITE1,
INPUT(input)->vmux);
+
break;
default:
cx231xx_errdev("%s: set_power_mode : Unknown Input %d !\n",
@@ -578,12 +656,12 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
value |= (1 << 7);
status = vid_blk_write_word(dev, OUT_CTRL1, value);
- /* Set vip 1.1 output mode */
+ /* Set output mode */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
OUT_CTRL1,
FLD_OUT_MODE,
- OUT_MODE_VIP11);
+ dev->board.output_mode);
/* Tell DIF object to go to baseband mode */
status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND);
@@ -681,7 +759,9 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
case CX231XX_VMUX_CABLE:
default:
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
/* Disable the use of DIF */
@@ -699,11 +779,11 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
value |= (1 << 7);
status = vid_blk_write_word(dev, OUT_CTRL1, value);
- /* Set vip 1.1 output mode */
+ /* Set output mode */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
OUT_CTRL1, FLD_OUT_MODE,
- OUT_MODE_VIP11);
+ dev->board.output_mode);
/* Tell DIF object to go to baseband mode */
status = cx231xx_dif_set_standard(dev,
@@ -790,11 +870,11 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
(FLD_OEF_AGC_IF);
status = vid_blk_write_word(dev, PIN_CTRL, value);
- /* Set vip 1.1 output mode */
+ /* Set output mode */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
OUT_CTRL1, FLD_OUT_MODE,
- OUT_MODE_VIP11);
+ dev->board.output_mode);
/* Disable auto config of registers */
status = cx231xx_read_modify_write_i2c_dword(dev,
@@ -816,9 +896,21 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
/* Set VGA_SEL (for audio control) (bit 7-8) */
status = vid_blk_read_word(dev, AFE_CTRL, &value);
+ /*Set Func mode:01-DIF 10-baseband 11-YUV*/
+ value &= (~(FLD_FUNC_MODE));
+ value |= 0x800000;
+
value |= FLD_VGA_SEL_CH3 | FLD_VGA_SEL_CH2;
status = vid_blk_write_word(dev, AFE_CTRL, value);
+
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ status = vid_blk_read_word(dev, PIN_CTRL,
+ &value);
+ status = vid_blk_write_word(dev, PIN_CTRL,
+ (value & 0xFFFFFFEF));
+ }
+
break;
}
@@ -840,6 +932,39 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
return status;
}
+void cx231xx_enable656(struct cx231xx *dev)
+{
+ u8 temp = 0;
+ int status;
+ /*enable TS1 data[0:7] as output to export 656*/
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF);
+
+ /*enable TS1 clock as output to export 656*/
+
+ status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ temp = temp|0x04;
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
+
+}
+EXPORT_SYMBOL_GPL(cx231xx_enable656);
+
+void cx231xx_disable656(struct cx231xx *dev)
+{
+ u8 temp = 0;
+ int status;
+
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00);
+
+ status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ temp = temp&0xFB;
+
+ status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
+}
+EXPORT_SYMBOL_GPL(cx231xx_disable656);
+
/*
* Handle any video-mode specific overrides that are different
* on a per video standards basis after touching the MODE_CTRL
@@ -868,12 +993,12 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
FLD_VACTIVE_CNT,
- 0x1E6000);
+ 0x1E7000);
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
FLD_V656BLANK_CNT,
- 0x1E000000);
+ 0x1C000000);
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
@@ -881,12 +1006,27 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
FLD_HBLANK_CNT,
cx231xx_set_field
(FLD_HBLANK_CNT, 0x79));
+
} else if (dev->norm & V4L2_STD_SECAM) {
cx231xx_info("do_mode_ctrl_overrides SECAM\n");
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
- FLD_VBLANK_CNT, 0x24);
+ FLD_VBLANK_CNT, 0x20);
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_VACTIVE_CNT,
+ cx231xx_set_field
+ (FLD_VACTIVE_CNT,
+ 0x244));
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_V656BLANK_CNT,
+ cx231xx_set_field
+ (FLD_V656BLANK_CNT,
+ 0x24));
/* Adjust the active video horizontal start point */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
@@ -899,7 +1039,21 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
VERT_TIM_CTRL,
- FLD_VBLANK_CNT, 0x24);
+ FLD_VBLANK_CNT, 0x20);
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_VACTIVE_CNT,
+ cx231xx_set_field
+ (FLD_VACTIVE_CNT,
+ 0x244));
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ VERT_TIM_CTRL,
+ FLD_V656BLANK_CNT,
+ cx231xx_set_field
+ (FLD_V656BLANK_CNT,
+ 0x24));
/* Adjust the active video horizontal start point */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
@@ -907,11 +1061,28 @@ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev)
FLD_HBLANK_CNT,
cx231xx_set_field
(FLD_HBLANK_CNT, 0x85));
+
}
return status;
}
+int cx231xx_unmute_audio(struct cx231xx *dev)
+{
+ return vid_blk_write_byte(dev, PATH1_VOL_CTL, 0x24);
+}
+EXPORT_SYMBOL_GPL(cx231xx_unmute_audio);
+
+int stopAudioFirmware(struct cx231xx *dev)
+{
+ return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03);
+}
+
+int restartAudioFirmware(struct cx231xx *dev)
+{
+ return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13);
+}
+
int cx231xx_set_audio_input(struct cx231xx *dev, u8 input)
{
int status = 0;
@@ -970,6 +1141,7 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
/* unmute all, AC97 in, independence mode
adr 08d0, data 0x00063073 */
+ status = vid_blk_write_word(dev, DL_CTL, 0x3000001);
status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063073);
/* set AVC maximum threshold, adr 08d4, dat ffff0024 */
@@ -985,7 +1157,7 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
case AUDIO_INPUT_TUNER_TV:
default:
-
+ status = stopAudioFirmware(dev);
/* Setup SRC sources and clocks */
status = vid_blk_write_word(dev, BAND_OUT_SEL,
cx231xx_set_field(FLD_SRC6_IN_SEL, 0x00) |
@@ -1013,18 +1185,32 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F063870);
/* setAudioStandard(_audio_standard); */
-
status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063870);
- switch (dev->model) {
- case CX231XX_BOARD_CNXT_RDE_250:
- case CX231XX_BOARD_CNXT_RDU_250:
+
+ status = restartAudioFirmware(dev);
+
+ switch (dev->board.tuner_type) {
+ case TUNER_XC5000:
+ /* SIF passthrough at 28.6363 MHz sample rate */
status = cx231xx_read_modify_write_i2c_dword(dev,
VID_BLK_I2C_ADDRESS,
CHIP_CTRL,
FLD_SIF_EN,
cx231xx_set_field(FLD_SIF_EN, 1));
break;
+ case TUNER_NXP_TDA18271:
+ /* Normal mode: SIF passthrough at 14.32 MHz */
+ status = cx231xx_read_modify_write_i2c_dword(dev,
+ VID_BLK_I2C_ADDRESS,
+ CHIP_CTRL,
+ FLD_SIF_EN,
+ cx231xx_set_field(FLD_SIF_EN, 0));
+ break;
default:
+ /* This is just a casual suggestion to people adding
+ new boards in case they use a tuner type we don't
+ currently know about */
+ printk(KERN_INFO "Unknown tuner type configuring SIF");
break;
}
break;
@@ -1049,18 +1235,6 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
return status;
}
-/* Set resolution of the video */
-int cx231xx_resolution_set(struct cx231xx *dev)
-{
- /* set horzontal scale */
- int status = vid_blk_write_word(dev, HSCALE_CTRL, dev->hscale);
- if (status)
- return status;
-
- /* set vertical scale */
- return vid_blk_write_word(dev, VSCALE_CTRL, dev->vscale);
-}
-
/******************************************************************************
* C H I P Specific C O N T R O L functions *
******************************************************************************/
@@ -1094,34 +1268,350 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
return status;
}
-int cx231xx_enable_i2c_for_tuner(struct cx231xx *dev, u8 I2CIndex)
+int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
{
u8 value[4] = { 0, 0, 0, 0 };
int status = 0;
-
- cx231xx_info("Changing the i2c port for tuner to %d\n", I2CIndex);
+ bool current_is_port_3;
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
PWR_CTL_EN, value, 4);
if (status < 0)
return status;
- if (I2CIndex == I2C_1) {
- if (value[0] & I2C_DEMOD_EN) {
- value[0] &= ~I2C_DEMOD_EN;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- PWR_CTL_EN, value, 4);
- }
+ current_is_port_3 = value[0] & I2C_DEMOD_EN ? true : false;
+
+ /* Just return, if already using the right port */
+ if (current_is_port_3 == is_port_3)
+ return 0;
+
+ if (is_port_3)
+ value[0] |= I2C_DEMOD_EN;
+ else
+ value[0] &= ~I2C_DEMOD_EN;
+
+ cx231xx_info("Changing the i2c master port to %d\n",
+ is_port_3 ? 3 : 1);
+
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+
+ return status;
+
+}
+EXPORT_SYMBOL_GPL(cx231xx_enable_i2c_port_3);
+
+void update_HH_register_after_set_DIF(struct cx231xx *dev)
+{
+/*
+ u8 status = 0;
+ u32 value = 0;
+
+ vid_blk_write_word(dev, PIN_CTRL, 0xA0FFF82F);
+ vid_blk_write_word(dev, DIF_MISC_CTRL, 0x0A203F11);
+ vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1BEFBF06);
+
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+*/
+}
+
+void cx231xx_dump_HH_reg(struct cx231xx *dev)
+{
+ u8 status = 0;
+ u32 value = 0;
+ u16 i = 0;
+
+ value = 0x45005390;
+ status = vid_blk_write_word(dev, 0x104, value);
+
+ for (i = 0x100; i < 0x140; i++) {
+ status = vid_blk_read_word(dev, i, &value);
+ cx231xx_info("reg0x%x=0x%x\n", i, value);
+ i = i+3;
+ }
+
+ for (i = 0x300; i < 0x400; i++) {
+ status = vid_blk_read_word(dev, i, &value);
+ cx231xx_info("reg0x%x=0x%x\n", i, value);
+ i = i+3;
+ }
+
+ for (i = 0x400; i < 0x440; i++) {
+ status = vid_blk_read_word(dev, i, &value);
+ cx231xx_info("reg0x%x=0x%x\n", i, value);
+ i = i+3;
+ }
+
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
+ vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
+ status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
+}
+
+void cx231xx_dump_SC_reg(struct cx231xx *dev)
+{
+ u8 value[4] = { 0, 0, 0, 0 };
+ int status = 0;
+ cx231xx_info("cx231xx_dump_SC_reg %s!\n", __TIME__);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0],
+ value[1], value[2], value[3]);
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0],
+ value[1], value[2], value[3]);
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ value, 4);
+ cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0],
+ value[1], value[2], value[3]);
+
+
+}
+
+void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
+
+{
+ u8 status = 0;
+ u8 value = 0;
+
+
+
+ status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ value = (value & 0xFE)|0x01;
+ status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+
+ status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ value = (value & 0xFE)|0x00;
+ status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+
+
+/*
+ config colibri to lo-if mode
+
+ FIXME: ntf_mode = 2'b00 by default. But set 0x1 would reduce
+ the diff IF input by half,
+
+ for low-if agc defect
+*/
+
+ status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value);
+ value = (value & 0xFC)|0x00;
+ status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value);
+
+ status = afe_read_byte(dev, ADC_INPUT_CH3, &value);
+ value = (value & 0xF9)|0x02;
+ status = afe_write_byte(dev, ADC_INPUT_CH3, value);
+
+ status = afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value);
+ value = (value & 0xFB)|0x04;
+ status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, value);
+
+ status = afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value);
+ value = (value & 0xFC)|0x03;
+ status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value);
+
+ status = afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value);
+ value = (value & 0xFB)|0x04;
+ status = afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value);
+
+ status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ value = (value & 0xF8)|0x06;
+ status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+
+ status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ value = (value & 0x8F)|0x40;
+ status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+
+ status = afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value);
+ value = (value & 0xDF)|0x20;
+ status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value);
+}
+
+void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode)
+{
+ u32 colibri_carrier_offset = 0;
+ u8 status = 0;
+ u32 func_mode = 0x01; /* Device has a DIF if this function is called */
+ u32 standard = 0;
+ u8 value[4] = { 0, 0, 0, 0 };
+
+ cx231xx_info("Enter cx231xx_set_Colibri_For_LowIF()\n");
+ value[0] = (u8) 0x6F;
+ value[1] = (u8) 0x6F;
+ value[2] = (u8) 0x6F;
+ value[3] = (u8) 0x6F;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+
+ /*Set colibri for low IF*/
+ status = cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF);
+
+ /* Set C2HH for low IF operation.*/
+ standard = dev->norm;
+ status = cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode,
+ func_mode, standard);
+
+ /* Get colibri offsets.*/
+ colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode,
+ standard);
+
+ cx231xx_info("colibri_carrier_offset=%d, standard=0x%x\n",
+ colibri_carrier_offset, standard);
+
+ /* Set the band Pass filter for DIF*/
+ cx231xx_set_DIF_bandpass(dev, (if_freq+colibri_carrier_offset),
+ spectral_invert, mode);
+}
+
+u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd)
+{
+ u32 colibri_carrier_offset = 0;
+
+ if (mode == TUNER_MODE_FM_RADIO) {
+ colibri_carrier_offset = 1100000;
+ } else if (standerd & (V4L2_STD_MN | V4L2_STD_NTSC_M_JP)) {
+ colibri_carrier_offset = 4832000; /*4.83MHz */
+ } else if (standerd & (V4L2_STD_PAL_B | V4L2_STD_PAL_G)) {
+ colibri_carrier_offset = 2700000; /*2.70MHz */
+ } else if (standerd & (V4L2_STD_PAL_D | V4L2_STD_PAL_I
+ | V4L2_STD_SECAM)) {
+ colibri_carrier_offset = 2100000; /*2.10MHz */
+ }
+
+ return colibri_carrier_offset;
+}
+
+void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode)
+{
+ unsigned long pll_freq_word;
+ int status = 0;
+ u32 dif_misc_ctrl_value = 0;
+ u64 pll_freq_u64 = 0;
+ u32 i = 0;
+
+ cx231xx_info("if_freq=%d;spectral_invert=0x%x;mode=0x%x\n",
+ if_freq, spectral_invert, mode);
+
+
+ if (mode == TUNER_MODE_FM_RADIO) {
+ pll_freq_word = 0x905A1CAC;
+ status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+
+ } else /*KSPROPERTY_TUNER_MODE_TV*/{
+ /* Calculate the PLL frequency word based on the adjusted if_freq*/
+ pll_freq_word = if_freq;
+ pll_freq_u64 = (u64)pll_freq_word << 28L;
+ do_div(pll_freq_u64, 50000000);
+ pll_freq_word = (u32)pll_freq_u64;
+ /*pll_freq_word = 0x3463497;*/
+ status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+
+ if (spectral_invert) {
+ if_freq -= 400000;
+ /* Enable Spectral Invert*/
+ status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ &dif_misc_ctrl_value);
+ dif_misc_ctrl_value = dif_misc_ctrl_value | 0x00200000;
+ status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ dif_misc_ctrl_value);
} else {
- if (!(value[0] & I2C_DEMOD_EN)) {
- value[0] |= I2C_DEMOD_EN;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- PWR_CTL_EN, value, 4);
- }
+ if_freq += 400000;
+ /* Disable Spectral Invert*/
+ status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ &dif_misc_ctrl_value);
+ dif_misc_ctrl_value = dif_misc_ctrl_value & 0xFFDFFFFF;
+ status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ dif_misc_ctrl_value);
}
- return status;
+ if_freq = (if_freq/100000)*100000;
+ if (if_freq < 3000000)
+ if_freq = 3000000;
+
+ if (if_freq > 16000000)
+ if_freq = 16000000;
+ }
+
+ cx231xx_info("Enter IF=%zd\n",
+ sizeof(Dif_set_array)/sizeof(struct dif_settings));
+ for (i = 0; i < sizeof(Dif_set_array)/sizeof(struct dif_settings); i++) {
+ if (Dif_set_array[i].if_freq == if_freq) {
+ status = vid_blk_write_word(dev,
+ Dif_set_array[i].register_address, Dif_set_array[i].value);
+ }
+ }
}
/******************************************************************************
@@ -1132,6 +1622,7 @@ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode,
{
int status = 0;
+
if (mode == V4L2_TUNER_RADIO) {
/* C2HH */
/* lo if big signal */
@@ -1174,6 +1665,7 @@ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode,
VID_BLK_I2C_ADDRESS, 32,
AUD_IO_CTRL, 0, 31, 0x00000003);
} else if ((standard == V4L2_STD_PAL_I) |
+ (standard & V4L2_STD_PAL_D) |
(standard & V4L2_STD_SECAM)) {
/* C2HH setup */
/* lo if big signal */
@@ -1232,10 +1724,18 @@ int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard)
dev->norm = standard;
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
+ case CX231XX_BOARD_CNXT_VIDEO_GRABBER:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
func_mode = 0x03;
break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ func_mode = 0x01;
+ break;
default:
func_mode = 0x01;
}
@@ -1617,17 +2117,27 @@ int cx231xx_tuner_post_channel_change(struct cx231xx *dev)
{
int status = 0;
u32 dwval;
-
+ cx231xx_info("cx231xx_tuner_post_channel_change dev->tuner_type =0%d\n",
+ dev->tuner_type);
/* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for
* SECAM L/B/D standards */
status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval);
dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF);
if (dev->norm & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_B |
- V4L2_STD_SECAM_D))
- dwval |= 0x88000000;
- else
- dwval |= 0x44000000;
+ V4L2_STD_SECAM_D)) {
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ dwval &= ~FLD_DIF_IF_REF;
+ dwval |= 0x88000300;
+ } else
+ dwval |= 0x88000000;
+ } else {
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ dwval &= ~FLD_DIF_IF_REF;
+ dwval |= 0xCC000300;
+ } else
+ dwval |= 0x44000000;
+ }
status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval);
@@ -1714,8 +2224,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
return 0;
}
- cx231xx_info(" setPowerMode::mode = %d\n", mode);
-
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value,
4);
if (status < 0)
@@ -1761,7 +2269,7 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
case POLARIS_AVMODE_ANALOGT_TV:
- tmp &= (~PWR_DEMOD_EN);
+ tmp |= PWR_DEMOD_EN;
tmp |= (I2C_DEMOD_EN);
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
@@ -1814,14 +2322,18 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
msleep(PWR_SLEEP_INTERVAL);
}
- if ((dev->model == CX231XX_BOARD_CNXT_RDE_250) ||
- (dev->model == CX231XX_BOARD_CNXT_RDU_250)) {
- /* tuner path to channel 1 from port 3 */
- cx231xx_enable_i2c_for_tuner(dev, I2C_3);
+ if (dev->board.tuner_type != TUNER_ABSENT) {
+ /* Enable tuner */
+ cx231xx_enable_i2c_port_3(dev, true);
+
+ /* reset the Tuner */
+ if (dev->board.tuner_gpio)
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
if (dev->cx231xx_reset_analog_tuner)
dev->cx231xx_reset_analog_tuner(dev);
}
+
break;
case POLARIS_AVMODE_DIGITAL:
@@ -1856,6 +2368,7 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
msleep(PWR_SLEEP_INTERVAL);
}
+ tmp &= (~PWR_AV_MODE);
tmp |= POLARIS_AVMODE_DIGITAL | I2C_DEMOD_EN;
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
@@ -1876,10 +2389,19 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
msleep(PWR_SLEEP_INTERVAL);
}
- if ((dev->model == CX231XX_BOARD_CNXT_RDE_250) ||
- (dev->model == CX231XX_BOARD_CNXT_RDU_250)) {
- /* tuner path to channel 1 from port 3 */
- cx231xx_enable_i2c_for_tuner(dev, I2C_3);
+ if (dev->board.tuner_type != TUNER_ABSENT) {
+ /*
+ * Enable tuner
+ * Hauppauge Exeter seems to need to do something different!
+ */
+ if (dev->model == CX231XX_BOARD_HAUPPAUGE_EXETER)
+ cx231xx_enable_i2c_port_3(dev, false);
+ else
+ cx231xx_enable_i2c_port_3(dev, true);
+
+ /* reset the Tuner */
+ if (dev->board.tuner_gpio)
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
if (dev->cx231xx_reset_analog_tuner)
dev->cx231xx_reset_analog_tuner(dev);
@@ -1913,9 +2435,6 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value,
4);
- cx231xx_info(" The data of PWR_CTL_EN register 0x74"
- "=0x%0x,0x%0x,0x%0x,0x%0x\n",
- value[0], value[1], value[2], value[3]);
return status;
}
@@ -2000,6 +2519,8 @@ int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask)
int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
{
int status = 0;
+ u32 value = 0;
+ u8 val[4] = { 0, 0, 0, 0 };
if (dev->udev->speed == USB_SPEED_HIGH) {
switch (media_type) {
@@ -2026,10 +2547,36 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
break;
case 4: /* ts1 */
- cx231xx_info("%s: set ts1 registers\n", __func__);
+ cx231xx_info("%s: set ts1 registers", __func__);
+
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) {
+ cx231xx_info(" MPEG\n");
+ value &= 0xFFFFFFFC;
+ value |= 0x3;
+
+ status = cx231xx_mode_register(dev, TS_MODE_REG, value);
+
+ val[0] = 0x04;
+ val[1] = 0xA3;
+ val[2] = 0x3B;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
+
+ val[0] = 0x00;
+ val[1] = 0x08;
+ val[2] = 0x00;
+ val[3] = 0x08;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_LENGTH_REG, val, 4);
+
+ } else {
+ cx231xx_info(" BDA\n");
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101);
- status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400);
+ status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010);
+ }
break;
+
case 6: /* ts1 parallel mode */
cx231xx_info("%s: set ts1 parrallel mode registers\n",
__func__);
@@ -2128,7 +2675,7 @@ EXPORT_SYMBOL_GPL(cx231xx_capture_start);
/*****************************************************************************
* G P I O B I T control functions *
******************************************************************************/
-int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val)
+int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val)
{
int status = 0;
@@ -2137,7 +2684,7 @@ int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val)
return status;
}
-int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val)
+int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val)
{
int status = 0;
@@ -2344,7 +2891,7 @@ int cx231xx_gpio_i2c_write_byte(struct cx231xx *dev, u8 data)
return status;
}
-int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 * buf)
+int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 *buf)
{
u8 value = 0;
int status = 0;
@@ -2494,7 +3041,7 @@ int cx231xx_gpio_i2c_write_nak(struct cx231xx *dev)
/* cx231xx_gpio_i2c_read
* Function to read data from gpio based I2C interface
*/
-int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 * buf, u8 len)
+int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
{
int status = 0;
int i = 0;
@@ -2538,7 +3085,7 @@ int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 * buf, u8 len)
/* cx231xx_gpio_i2c_write
* Function to write data to gpio based I2C interface
*/
-int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 * buf, u8 len)
+int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
{
int status = 0;
int i = 0;
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index f2a4900014bc..56c2d8195ac6 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -41,6 +41,10 @@ static int tuner = -1;
module_param(tuner, int, 0444);
MODULE_PARM_DESC(tuner, "tuner type");
+static int transfer_mode = 1;
+module_param(transfer_mode, int, 0444);
+MODULE_PARM_DESC(transfer_mode, "transfer mode (1-ISO or 0-BULK)");
+
static unsigned int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
@@ -86,8 +90,8 @@ struct cx231xx_board cx231xx_boards[] = {
}
},
},
- [CX231XX_BOARD_CNXT_RDE_250] = {
- .name = "Conexant Hybrid TV - RDE250",
+ [CX231XX_BOARD_CNXT_CARRAERA] = {
+ .name = "Conexant Hybrid TV - CARRAERA",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_gpio = RDE250_XCV_TUNER,
@@ -95,6 +99,7 @@ struct cx231xx_board cx231xx_boards[] = {
.tuner_scl_gpio = 0x1a,
.tuner_sda_gpio = 0x1b,
.decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
@@ -125,9 +130,8 @@ struct cx231xx_board cx231xx_boards[] = {
}
},
},
-
- [CX231XX_BOARD_CNXT_RDU_250] = {
- .name = "Conexant Hybrid TV - RDU250",
+ [CX231XX_BOARD_CNXT_SHELBY] = {
+ .name = "Conexant Hybrid TV - SHELBY",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_gpio = RDE250_XCV_TUNER,
@@ -135,6 +139,7 @@ struct cx231xx_board cx231xx_boards[] = {
.tuner_scl_gpio = 0x1a,
.tuner_sda_gpio = 0x1b,
.decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
.agc_analog_digital_select_gpio = 0x0c,
@@ -165,6 +170,231 @@ struct cx231xx_board cx231xx_boards[] = {
}
},
},
+ [CX231XX_BOARD_CNXT_RDE_253S] = {
+ .name = "Conexant Hybrid TV - RDE253S",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x1c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x02,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }
+ },
+ },
+
+ [CX231XX_BOARD_CNXT_RDU_253S] = {
+ .name = "Conexant Hybrid TV - RDU253S",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x1c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x02,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_CNXT_VIDEO_GRABBER] = {
+ .name = "Conexant VIDEO GRABBER",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x1c,
+ .gpio_pin_status_mask = 0x4001000,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_CNXT_RDE_250] = {
+ .name = "Conexant Hybrid TV - rde 250",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x61,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x02,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_CNXT_RDU_250] = {
+ .name = "Conexant Hybrid TV - RDU 250",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x61,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x32,
+ .norm = V4L2_STD_NTSC,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }
+ },
+ },
+ [CX231XX_BOARD_HAUPPAUGE_EXETER] = {
+ .name = "Hauppauge EXETER",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .demod_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x0e,
+ .norm = V4L2_STD_NTSC,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = 0,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ } },
+ },
+ [CX231XX_BOARD_HAUPPAUGE_USBLIVE2] = {
+ .name = "Hauppauge USB Live 2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .norm = V4L2_STD_NTSC,
+ .input = {{
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = 0,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -172,12 +402,28 @@ const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
struct usb_device_id cx231xx_id_table[] = {
{USB_DEVICE(0x0572, 0x5A3C),
.driver_info = CX231XX_BOARD_UNKNOWN},
+ {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
+ .driver_info = CX231XX_BOARD_UNKNOWN},
{USB_DEVICE(0x0572, 0x58A2),
- .driver_info = CX231XX_BOARD_CNXT_RDE_250},
+ .driver_info = CX231XX_BOARD_CNXT_CARRAERA},
{USB_DEVICE(0x0572, 0x58A1),
+ .driver_info = CX231XX_BOARD_CNXT_SHELBY},
+ {USB_DEVICE(0x0572, 0x58A4),
+ .driver_info = CX231XX_BOARD_CNXT_RDE_253S},
+ {USB_DEVICE(0x0572, 0x58A5),
+ .driver_info = CX231XX_BOARD_CNXT_RDU_253S},
+ {USB_DEVICE(0x0572, 0x58A6),
+ .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
+ {USB_DEVICE(0x0572, 0x589E),
+ .driver_info = CX231XX_BOARD_CNXT_RDE_250},
+ {USB_DEVICE(0x0572, 0x58A0),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
- {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
- .driver_info = CX231XX_BOARD_UNKNOWN},
+ {USB_DEVICE(0x2040, 0xb120),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
+ {USB_DEVICE(0x2040, 0xb140),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
+ {USB_DEVICE(0x2040, 0xc200),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_USBLIVE2},
{},
};
@@ -212,6 +458,23 @@ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg)
}
EXPORT_SYMBOL_GPL(cx231xx_tuner_callback);
+void cx231xx_reset_out(struct cx231xx *dev)
+{
+ cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
+ msleep(200);
+ cx231xx_set_gpio_value(dev, CX23417_RESET, 0);
+ msleep(200);
+ cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
+}
+void cx231xx_enable_OSC(struct cx231xx *dev)
+{
+ cx231xx_set_gpio_value(dev, CX23417_OSC_EN, 1);
+}
+void cx231xx_sleep_s5h1432(struct cx231xx *dev)
+{
+ cx231xx_set_gpio_value(dev, SLEEP_S5H1432, 0);
+}
+
static inline void cx231xx_set_model(struct cx231xx *dev)
{
memcpy(&dev->board, &cx231xx_boards[dev->model], sizeof(dev->board));
@@ -232,13 +495,11 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
if (dev->board.tuner_gpio) {
cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
+ }
+ if (dev->board.tuner_sif_gpio >= 0)
cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
- /* request some modules if any required */
-
- /* reset the Tuner */
- cx231xx_gpio_set(dev, dev->board.tuner_gpio);
- }
+ /* request some modules if any required */
/* set the mode to Analog mode initially */
cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
@@ -286,26 +547,6 @@ static void cx231xx_config_tuner(struct cx231xx *dev)
}
-/* ----------------------------------------------------------------------- */
-void cx231xx_register_i2c_ir(struct cx231xx *dev)
-{
- if (disable_ir)
- return;
-
- /* REVISIT: instantiate IR device */
-
- /* detect & configure */
- switch (dev->model) {
-
- case CX231XX_BOARD_CNXT_RDE_250:
- break;
- case CX231XX_BOARD_CNXT_RDU_250:
- break;
- default:
- break;
- }
-}
-
void cx231xx_card_setup(struct cx231xx *dev)
{
@@ -319,29 +560,24 @@ void cx231xx_card_setup(struct cx231xx *dev)
if (dev->board.decoder == CX231XX_AVDECODER) {
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[0].i2c_adap,
- "cx25840", "cx25840", 0x88 >> 1, NULL);
+ NULL, "cx25840", 0x88 >> 1, NULL);
if (dev->sd_cx25840 == NULL)
cx231xx_info("cx25840 subdev registration failure\n");
cx25840_call(dev, core, load_fw);
}
+ /* Initialize the tuner */
if (dev->board.tuner_type != TUNER_ABSENT) {
- dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_bus[1].i2c_adap,
- "tuner", "tuner", 0xc2 >> 1, NULL);
+ dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev,
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ NULL, "tuner",
+ dev->tuner_addr, NULL);
if (dev->sd_tuner == NULL)
cx231xx_info("tuner subdev registration failure\n");
-
- cx231xx_config_tuner(dev);
+ else
+ cx231xx_config_tuner(dev);
}
-
- cx231xx_config_tuner(dev);
-
-#if 0
- /* TBD IR will be added later */
- cx231xx_ir_init(dev);
-#endif
}
/*
@@ -375,12 +611,6 @@ void cx231xx_config_i2c(struct cx231xx *dev)
*/
void cx231xx_release_resources(struct cx231xx *dev)
{
-
-#if 0 /* TBD IR related */
- if (dev->ir)
- cx231xx_ir_fini(dev);
-#endif
-
cx231xx_release_analog_resources(dev);
cx231xx_remove_from_devlist(dev);
@@ -409,6 +639,7 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
mutex_init(&dev->lock);
mutex_init(&dev->ctrl_urb_lock);
mutex_init(&dev->gpio_i2c_lock);
+ mutex_init(&dev->i2c_lock);
spin_lock_init(&dev->video_mode.slock);
spin_lock_init(&dev->vbi_mode.slock);
@@ -427,6 +658,13 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
/* Query cx231xx to find what pcb config it is related to */
initialize_cx231xx(dev);
+ /*To workaround error number=-71 on EP0 for VideoGrabber,
+ need set alt here.*/
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER ||
+ dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2) {
+ cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3);
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ }
/* Cx231xx pre card setup */
cx231xx_pre_card_setup(dev);
@@ -442,6 +680,7 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
/* register i2c bus */
errCode = cx231xx_dev_init(dev);
if (errCode < 0) {
+ cx231xx_dev_uninit(dev);
cx231xx_errdev("%s: cx231xx_i2c_register - errCode [%d]!\n",
__func__, errCode);
return errCode;
@@ -460,8 +699,6 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
dev->width = maxw;
dev->height = maxh;
dev->interlaced = 0;
- dev->hscale = 0;
- dev->vscale = 0;
dev->video_input = 0;
errCode = cx231xx_config(dev);
@@ -480,9 +717,17 @@ static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued);
/* Reset other chips required if they are tied up with GPIO pins */
-
cx231xx_add_into_devlist(dev);
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) {
+ printk(KERN_INFO "attach 417 %d\n", dev->model);
+ if (cx231xx_417_register(dev) < 0) {
+ printk(KERN_ERR
+ "%s() Failed to register 417 on VID_B\n",
+ __func__);
+ }
+ }
+
retval = cx231xx_register_analog_devices(dev);
if (retval < 0) {
cx231xx_release_resources(dev);
@@ -537,13 +782,12 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
char *speed;
char descr[255] = "";
struct usb_interface *lif = NULL;
- int skip_interface = 0;
struct usb_interface_assoc_descriptor *assoc_desc;
udev = usb_get_dev(interface_to_usbdev(interface));
ifnum = interface->altsetting[0].desc.bInterfaceNumber;
- if (!ifnum) {
+ if (ifnum == 1) {
/*
* Interface number 0 - IR interface
*/
@@ -552,8 +796,8 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused |= 1 << nr;
if (nr >= CX231XX_MAXBOARDS) {
- cx231xx_err(DRIVER_NAME ": Supports only %i cx231xx boards.\n",
- CX231XX_MAXBOARDS);
+ cx231xx_err(DRIVER_NAME
+ ": Supports only %i cx231xx boards.\n", CX231XX_MAXBOARDS);
cx231xx_devused &= ~(1 << nr);
return -ENOMEM;
}
@@ -578,6 +822,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
dev->xc_fw_load_done = 0;
dev->has_alsa_audio = 1;
dev->power_mode = -1;
+ atomic_set(&dev->devlist_count, 0);
/* 0 - vbi ; 1 -sliced cc mode */
dev->vbi_or_sliced_cc_mode = 0;
@@ -591,6 +836,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* store the current interface */
lif = interface;
+ /*mode_tv: digital=1 or analog=0*/
+ dev->mode_tv = 0;
+
+ dev->USE_ISO = transfer_mode;
+
switch (udev->speed) {
case USB_SPEED_LOW:
speed = "1.5";
@@ -624,13 +874,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
dev->max_iad_interface_count);
- } else {
- /* Get dev structure first */
- dev = usb_get_intfdata(udev->actconfig->interface[0]);
- if (dev == NULL) {
- cx231xx_err(DRIVER_NAME ": out of first interface!\n");
- return -ENODEV;
- }
/* store the interface 0 back */
lif = udev->actconfig->interface[0];
@@ -641,35 +884,21 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* get device number */
nr = dev->devno;
- /*
- * set skip interface, for all interfaces but
- * interface 1 and the last one
- */
- if ((ifnum != 1) && ((dev->interface_count - 1)
- != dev->max_iad_interface_count))
- skip_interface = 1;
-
- if (ifnum == 1) {
- assoc_desc = udev->actconfig->intf_assoc[0];
- if (assoc_desc->bFirstInterface != ifnum) {
- cx231xx_err(DRIVER_NAME ": Not found "
- "matching IAD interface\n");
- return -ENODEV;
- }
+ assoc_desc = udev->actconfig->intf_assoc[0];
+ if (assoc_desc->bFirstInterface != ifnum) {
+ cx231xx_err(DRIVER_NAME ": Not found "
+ "matching IAD interface\n");
+ return -ENODEV;
}
- }
-
- if (skip_interface)
+ } else {
return -ENODEV;
+ }
cx231xx_info("registering interface %d\n", ifnum);
/* save our data pointer in this interface device */
usb_set_intfdata(lif, dev);
- if ((dev->interface_count - 1) != dev->max_iad_interface_count)
- return 0;
-
/*
* AV device initialization - only done at the last interface
*/
@@ -680,15 +909,18 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_errdev("v4l2_device_register failed\n");
cx231xx_devused &= ~(1 << nr);
kfree(dev);
+ dev = NULL;
return -EIO;
}
-
/* allocate device struct */
retval = cx231xx_init_dev(&dev, udev, nr);
if (retval) {
cx231xx_devused &= ~(1 << dev->devno);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
+ usb_set_intfdata(lif, NULL);
+
return retval;
}
@@ -711,6 +943,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -744,6 +977,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -778,6 +1012,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -813,6 +1048,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_devused &= ~(1 << nr);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
+ dev = NULL;
return -ENOMEM;
}
@@ -827,6 +1063,15 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
}
}
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) {
+ cx231xx_enable_OSC(dev);
+ cx231xx_reset_out(dev);
+ cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3);
+ }
+
+ if (dev->model == CX231XX_BOARD_CNXT_RDE_253S)
+ cx231xx_sleep_s5h1432(dev);
+
/* load other modules required */
request_modules(dev);
@@ -867,7 +1112,10 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
video_device_node_name(dev->vdev));
dev->state |= DEV_MISCONFIGURED;
- cx231xx_uninit_isoc(dev);
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
dev->state |= DEV_DISCONNECTED;
wake_up_interruptible(&dev->wait_frame);
wake_up_interruptible(&dev->wait_stream);
@@ -886,6 +1134,7 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
kfree(dev->sliced_cc_mode.alt_max_pkt_size);
kfree(dev->ts1_mode.alt_max_pkt_size);
kfree(dev);
+ dev = NULL;
}
}
diff --git a/drivers/media/video/cx231xx/cx231xx-conf-reg.h b/drivers/media/video/cx231xx/cx231xx-conf-reg.h
index 31a8759f6e54..25593f212abf 100644
--- a/drivers/media/video/cx231xx/cx231xx-conf-reg.h
+++ b/drivers/media/video/cx231xx/cx231xx-conf-reg.h
@@ -39,6 +39,7 @@
#define CIR_CAR_REG 0x38
#define CIR_OT_CFG1 0x40
#define CIR_OT_CFG2 0x44
+#define GBULK_BIT_EN 0x68
#define PWR_CTL_EN 0x74
/* Polaris Endpoints capture mask for register EP_MODE_SET */
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index 912a4d740206..4af46fca9b0a 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -27,6 +27,7 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <media/v4l2-common.h>
+#include <media/tuner.h>
#include "cx231xx.h"
#include "cx231xx-reg.h"
@@ -46,11 +47,6 @@ static unsigned int reg_debug;
module_param(reg_debug, int, 0644);
MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
-#define cx231xx_regdbg(fmt, arg...) do {\
- if (reg_debug) \
- printk(KERN_INFO "%s %s :"fmt, \
- dev->name, __func__ , ##arg); } while (0)
-
static int alt = CX231XX_PINOUT;
module_param(alt, int, 0644);
MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
@@ -64,7 +60,7 @@ MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
* Device control list functions *
******************************************************************/
-static LIST_HEAD(cx231xx_devlist);
+LIST_HEAD(cx231xx_devlist);
static DEFINE_MUTEX(cx231xx_devlist_mutex);
/*
@@ -74,33 +70,39 @@ static DEFINE_MUTEX(cx231xx_devlist_mutex);
*/
void cx231xx_remove_from_devlist(struct cx231xx *dev)
{
- mutex_lock(&cx231xx_devlist_mutex);
- list_del(&dev->devlist);
- mutex_unlock(&cx231xx_devlist_mutex);
+ if (dev == NULL)
+ return;
+ if (dev->udev == NULL)
+ return;
+
+ if (atomic_read(&dev->devlist_count) > 0) {
+ mutex_lock(&cx231xx_devlist_mutex);
+ list_del(&dev->devlist);
+ atomic_dec(&dev->devlist_count);
+ mutex_unlock(&cx231xx_devlist_mutex);
+ }
};
void cx231xx_add_into_devlist(struct cx231xx *dev)
{
mutex_lock(&cx231xx_devlist_mutex);
list_add_tail(&dev->devlist, &cx231xx_devlist);
+ atomic_inc(&dev->devlist_count);
mutex_unlock(&cx231xx_devlist_mutex);
};
static LIST_HEAD(cx231xx_extension_devlist);
-static DEFINE_MUTEX(cx231xx_extension_devlist_lock);
int cx231xx_register_extension(struct cx231xx_ops *ops)
{
struct cx231xx *dev = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- mutex_lock(&cx231xx_extension_devlist_lock);
list_add_tail(&ops->next, &cx231xx_extension_devlist);
list_for_each_entry(dev, &cx231xx_devlist, devlist)
ops->init(dev);
printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name);
- mutex_unlock(&cx231xx_extension_devlist_lock);
mutex_unlock(&cx231xx_devlist_mutex);
return 0;
}
@@ -114,10 +116,9 @@ void cx231xx_unregister_extension(struct cx231xx_ops *ops)
list_for_each_entry(dev, &cx231xx_devlist, devlist)
ops->fini(dev);
- mutex_lock(&cx231xx_extension_devlist_lock);
+
printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name);
list_del(&ops->next);
- mutex_unlock(&cx231xx_extension_devlist_lock);
mutex_unlock(&cx231xx_devlist_mutex);
}
EXPORT_SYMBOL(cx231xx_unregister_extension);
@@ -126,28 +127,28 @@ void cx231xx_init_extension(struct cx231xx *dev)
{
struct cx231xx_ops *ops = NULL;
- mutex_lock(&cx231xx_extension_devlist_lock);
+ mutex_lock(&cx231xx_devlist_mutex);
if (!list_empty(&cx231xx_extension_devlist)) {
list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
if (ops->init)
ops->init(dev);
}
}
- mutex_unlock(&cx231xx_extension_devlist_lock);
+ mutex_unlock(&cx231xx_devlist_mutex);
}
void cx231xx_close_extension(struct cx231xx *dev)
{
struct cx231xx_ops *ops = NULL;
- mutex_lock(&cx231xx_extension_devlist_lock);
+ mutex_lock(&cx231xx_devlist_mutex);
if (!list_empty(&cx231xx_extension_devlist)) {
list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
if (ops->fini)
ops->fini(dev);
}
}
- mutex_unlock(&cx231xx_extension_devlist_lock);
+ mutex_unlock(&cx231xx_devlist_mutex);
}
/****************************************************************
@@ -234,6 +235,66 @@ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus,
EXPORT_SYMBOL_GPL(cx231xx_send_usb_command);
/*
+ * Sends/Receives URB control messages, assuring to use a kalloced buffer
+ * for all operations (dev->urb_buf), to avoid using stacked buffers, as
+ * they aren't safe for usage with USB, due to DMA restrictions.
+ * Also implements the debug code for control URB's.
+ */
+static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe,
+ __u8 request, __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size, int timeout)
+{
+ int rc, i;
+
+ if (reg_debug) {
+ printk(KERN_DEBUG "%s: (pipe 0x%08x): "
+ "%s: %02x %02x %02x %02x %02x %02x %02x %02x ",
+ dev->name,
+ pipe,
+ (requesttype & USB_DIR_IN) ? "IN" : "OUT",
+ requesttype,
+ request,
+ value & 0xff, value >> 8,
+ index & 0xff, index >> 8,
+ size & 0xff, size >> 8);
+ if (!(requesttype & USB_DIR_IN)) {
+ printk(KERN_CONT ">>>");
+ for (i = 0; i < size; i++)
+ printk(KERN_CONT " %02x",
+ ((unsigned char *)data)[i]);
+ }
+ }
+
+ /* Do the real call to usb_control_msg */
+ mutex_lock(&dev->ctrl_urb_lock);
+ if (!(requesttype & USB_DIR_IN) && size)
+ memcpy(dev->urb_buf, data, size);
+ rc = usb_control_msg(dev->udev, pipe, request, requesttype, value,
+ index, dev->urb_buf, size, timeout);
+ if ((requesttype & USB_DIR_IN) && size)
+ memcpy(data, dev->urb_buf, size);
+ mutex_unlock(&dev->ctrl_urb_lock);
+
+ if (reg_debug) {
+ if (unlikely(rc < 0)) {
+ printk(KERN_CONT "FAILED!\n");
+ return rc;
+ }
+
+ if ((requesttype & USB_DIR_IN)) {
+ printk(KERN_CONT "<<<");
+ for (i = 0; i < size; i++)
+ printk(KERN_CONT " %02x",
+ ((unsigned char *)data)[i]);
+ }
+ printk(KERN_CONT "\n");
+ }
+
+ return rc;
+}
+
+
+/*
* cx231xx_read_ctrl_reg()
* reads data from the usb device specifying bRequest and wValue
*/
@@ -270,39 +331,9 @@ int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg,
if (val == 0xFF)
return -EINVAL;
- if (reg_debug) {
- cx231xx_isocdbg("(pipe 0x%08x): "
- "IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
- pipe,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, val,
- reg & 0xff, reg >> 8, len & 0xff, len >> 8);
- }
-
- mutex_lock(&dev->ctrl_urb_lock);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = __usb_control_msg(dev, pipe, req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- val, reg, dev->urb_buf, len, HZ);
- if (ret < 0) {
- cx231xx_isocdbg(" failed!\n");
- /* mutex_unlock(&dev->ctrl_urb_lock); */
- return ret;
- }
-
- if (len)
- memcpy(buf, dev->urb_buf, len);
-
- mutex_unlock(&dev->ctrl_urb_lock);
-
- if (reg_debug) {
- int byte;
-
- cx231xx_isocdbg("<<<");
- for (byte = 0; byte < len; byte++)
- cx231xx_isocdbg(" %02x", (unsigned char)buf[byte]);
- cx231xx_isocdbg("\n");
- }
-
+ val, reg, buf, len, HZ);
return ret;
}
@@ -311,6 +342,8 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
{
int ret;
int pipe = 0;
+ int unsend_size = 0;
+ u8 *pdata;
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
@@ -323,31 +356,54 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
else
pipe = usb_sndctrlpipe(dev->udev, 0);
- if (reg_debug) {
- int byte;
+ /*
+ * If the cx23102 read more than 4 bytes with i2c bus,
+ * need chop to 4 byte per request
+ */
+ if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
+ (ven_req->bRequest == 0x5) ||
+ (ven_req->bRequest == 0x6))) {
+ unsend_size = 0;
+ pdata = ven_req->pBuff;
+
+
+ unsend_size = ven_req->wLength;
+
+ /* the first package */
+ ven_req->wValue = ven_req->wValue & 0xFFFB;
+ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x2;
+ ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex, pdata,
+ 0x0004, HZ);
+ unsend_size = unsend_size - 4;
+
+ /* the middle package */
+ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x42;
+ while (unsend_size - 4 > 0) {
+ pdata = pdata + 4;
+ ret = __usb_control_msg(dev, pipe,
+ ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex, pdata,
+ 0x0004, HZ);
+ unsend_size = unsend_size - 4;
+ }
- cx231xx_isocdbg("(pipe 0x%08x): "
- "OUT: %02x %02x %02x %04x %04x %04x >>>",
- pipe,
- ven_req->
- direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- ven_req->bRequest, 0, ven_req->wValue,
- ven_req->wIndex, ven_req->wLength);
-
- for (byte = 0; byte < ven_req->wLength; byte++)
- cx231xx_isocdbg(" %02x",
- (unsigned char)ven_req->pBuff[byte]);
- cx231xx_isocdbg("\n");
+ /* the last package */
+ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x40;
+ pdata = pdata + 4;
+ ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex, pdata,
+ unsend_size, HZ);
+ } else {
+ ret = __usb_control_msg(dev, pipe, ven_req->bRequest,
+ ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ ven_req->wValue, ven_req->wIndex,
+ ven_req->pBuff, ven_req->wLength, HZ);
}
- mutex_lock(&dev->ctrl_urb_lock);
- ret = usb_control_msg(dev->udev, pipe, ven_req->bRequest,
- ven_req->
- direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- ven_req->wValue, ven_req->wIndex, ven_req->pBuff,
- ven_req->wLength, HZ);
- mutex_unlock(&dev->ctrl_urb_lock);
-
return ret;
}
@@ -403,12 +459,9 @@ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf,
cx231xx_isocdbg("\n");
}
- mutex_lock(&dev->ctrl_urb_lock);
- memcpy(dev->urb_buf, buf, len);
- ret = usb_control_msg(dev->udev, pipe, req,
+ ret = __usb_control_msg(dev, pipe, req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- val, reg, dev->urb_buf, len, HZ);
- mutex_unlock(&dev->ctrl_urb_lock);
+ val, reg, buf, len, HZ);
return ret;
}
@@ -444,6 +497,11 @@ int cx231xx_set_video_alternate(struct cx231xx *dev)
dev->video_mode.alt = 0;
}
+ if (dev->USE_ISO == 0)
+ dev->video_mode.alt = 0;
+
+ cx231xx_coredbg("dev->video_mode.alt= %d\n", dev->video_mode.alt);
+
/* Get the correct video interface Index */
usb_interface_index =
dev->current_pcb_config.hs_config_info[0].interface_info.
@@ -452,15 +510,13 @@ int cx231xx_set_video_alternate(struct cx231xx *dev)
if (dev->video_mode.alt != prev_alt) {
cx231xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
min_pkt_size, dev->video_mode.alt);
- dev->video_mode.max_pkt_size =
- dev->video_mode.alt_max_pkt_size[dev->video_mode.alt];
+
+ if (dev->video_mode.alt_max_pkt_size != NULL)
+ dev->video_mode.max_pkt_size =
+ dev->video_mode.alt_max_pkt_size[dev->video_mode.alt];
cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
dev->video_mode.alt,
dev->video_mode.max_pkt_size);
- cx231xx_info
- (" setting alt %d with wMaxPktSize=%u , Interface = %d\n",
- dev->video_mode.alt, dev->video_mode.max_pkt_size,
- usb_interface_index);
errCode =
usb_set_interface(dev->udev, usb_interface_index,
dev->video_mode.alt);
@@ -485,7 +541,7 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
usb_interface_index =
dev->current_pcb_config.hs_config_info[0].interface_info.
ts1_index + 1;
- dev->video_mode.alt = alt;
+ dev->ts1_mode.alt = alt;
if (dev->ts1_mode.alt_max_pkt_size != NULL)
max_pkt_size = dev->ts1_mode.max_pkt_size =
dev->ts1_mode.alt_max_pkt_size[dev->ts1_mode.alt];
@@ -542,12 +598,16 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
cx231xx_errdev
("can't change interface %d alt no. to %d: Max. Pkt size = 0\n",
usb_interface_index, alt);
- return -1;
+ /*To workaround error number=-71 on EP0 for videograbber,
+ need add following codes.*/
+ if (dev->model != CX231XX_BOARD_CNXT_VIDEO_GRABBER &&
+ dev->model != CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ return -1;
}
- cx231xx_info
- (" setting alternate %d with wMaxPacketSize=%u , Interface = %d\n",
- alt, max_pkt_size, usb_interface_index);
+ cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,"
+ "Interface = %d\n", alt, max_pkt_size,
+ usb_interface_index);
if (usb_interface_index > 0) {
status = usb_set_interface(dev->udev, usb_interface_index, alt);
@@ -584,8 +644,56 @@ int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio)
return rc;
}
+int cx231xx_demod_reset(struct cx231xx *dev)
+{
+
+ u8 status = 0;
+ u8 value[4] = { 0, 0, 0, 0 };
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ value, 4);
+
+ cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN,
+ value[0], value[1], value[2], value[3]);
+
+ cx231xx_coredbg("Enter cx231xx_demod_reset()\n");
+
+ value[1] = (u8) 0x3;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+ msleep(10);
+
+ value[1] = (u8) 0x0;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+ msleep(10);
+
+ value[1] = (u8) 0x3;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ PWR_CTL_EN, value, 4);
+ msleep(10);
+
+
+
+ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ value, 4);
+
+ cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN,
+ value[0], value[1], value[2], value[3]);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(cx231xx_demod_reset);
+int is_fw_load(struct cx231xx *dev)
+{
+ return cx231xx_check_fw(dev);
+}
+EXPORT_SYMBOL_GPL(is_fw_load);
+
int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
{
+ int errCode = 0;
+
if (dev->mode == set_mode)
return 0;
@@ -600,15 +708,75 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
dev->mode = set_mode;
- if (dev->mode == CX231XX_DIGITAL_MODE)
- ;/* Set Digital power mode */
- else
- ;/* Set Analog Power mode */
+ if (dev->mode == CX231XX_DIGITAL_MODE)/* Set Digital power mode */ {
+ /* set AGC mode to Digital */
+ switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
+ case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
+ case CX231XX_BOARD_CNXT_RDU_250:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ break;
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ errCode = cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_DIGITAL);
+ break;
+ default:
+ break;
+ }
+ } else/* Set Analog Power mode */ {
+ /* set AGC mode to Analog */
+ switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
+ case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
+ case CX231XX_BOARD_CNXT_RDU_250:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ break;
+ default:
+ break;
+ }
+ }
return 0;
}
EXPORT_SYMBOL_GPL(cx231xx_set_mode);
+int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
+{
+ int errCode = 0;
+ int actlen, ret = -ENOMEM;
+ u32 *buffer;
+
+buffer = kzalloc(4096, GFP_KERNEL);
+ if (buffer == NULL) {
+ cx231xx_info("out of mem\n");
+ return -ENOMEM;
+ }
+ memcpy(&buffer[0], firmware, 4096);
+
+ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
+ buffer, 4096, &actlen, 2000);
+
+ if (ret)
+ cx231xx_info("bulk message failed: %d (%d/%d)", ret,
+ size, actlen);
+ else {
+ errCode = actlen != size ? -1 : 0;
+ }
+kfree(buffer);
+ return 0;
+}
+
/*****************************************************************
* URB Streaming functions *
******************************************************************/
@@ -616,7 +784,7 @@ EXPORT_SYMBOL_GPL(cx231xx_set_mode);
/*
* IRQ callback, called by URB callback
*/
-static void cx231xx_irq_callback(struct urb *urb)
+static void cx231xx_isoc_irq_callback(struct urb *urb)
{
struct cx231xx_dmaqueue *dma_q = urb->context;
struct cx231xx_video_mode *vmode =
@@ -655,12 +823,54 @@ static void cx231xx_irq_callback(struct urb *urb)
urb->status);
}
}
+/*****************************************************************
+* URB Streaming functions *
+******************************************************************/
/*
+ * IRQ callback, called by URB callback
+ */
+static void cx231xx_bulk_irq_callback(struct urb *urb)
+{
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ struct cx231xx_video_mode *vmode =
+ container_of(dma_q, struct cx231xx_video_mode, vidq);
+ struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
+ int rc;
+
+ switch (urb->status) {
+ case 0: /* success */
+ case -ETIMEDOUT: /* NAK */
+ break;
+ case -ECONNRESET: /* kill */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default: /* error */
+ cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+ break;
+ }
+
+ /* Copy data from URB */
+ spin_lock(&dev->video_mode.slock);
+ rc = dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
+ spin_unlock(&dev->video_mode.slock);
+
+ /* Reset urb buffers */
+ urb->status = 0;
+
+ urb->status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (urb->status) {
+ cx231xx_isocdbg("urb resubmit failed (error=%i)\n",
+ urb->status);
+ }
+}
+/*
* Stop and Deallocate URBs
*/
void cx231xx_uninit_isoc(struct cx231xx *dev)
{
+ struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
struct urb *urb;
int i;
@@ -690,16 +900,71 @@ void cx231xx_uninit_isoc(struct cx231xx *dev)
kfree(dev->video_mode.isoc_ctl.urb);
kfree(dev->video_mode.isoc_ctl.transfer_buffer);
+ kfree(dma_q->p_left_data);
dev->video_mode.isoc_ctl.urb = NULL;
dev->video_mode.isoc_ctl.transfer_buffer = NULL;
dev->video_mode.isoc_ctl.num_bufs = 0;
+ dma_q->p_left_data = NULL;
+
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 0, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 0, TS1_serial_mode);
+
- cx231xx_capture_start(dev, 0, Raw_Video);
}
EXPORT_SYMBOL_GPL(cx231xx_uninit_isoc);
/*
+ * Stop and Deallocate URBs
+ */
+void cx231xx_uninit_bulk(struct cx231xx *dev)
+{
+ struct urb *urb;
+ int i;
+
+ cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n");
+
+ dev->video_mode.bulk_ctl.nfields = -1;
+ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
+ urb = dev->video_mode.bulk_ctl.urb[i];
+ if (urb) {
+ if (!irqs_disabled())
+ usb_kill_urb(urb);
+ else
+ usb_unlink_urb(urb);
+
+ if (dev->video_mode.bulk_ctl.transfer_buffer[i]) {
+ usb_free_coherent(dev->udev,
+ urb->transfer_buffer_length,
+ dev->video_mode.isoc_ctl.
+ transfer_buffer[i],
+ urb->transfer_dma);
+ }
+ usb_free_urb(urb);
+ dev->video_mode.bulk_ctl.urb[i] = NULL;
+ }
+ dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL;
+ }
+
+ kfree(dev->video_mode.bulk_ctl.urb);
+ kfree(dev->video_mode.bulk_ctl.transfer_buffer);
+
+ dev->video_mode.bulk_ctl.urb = NULL;
+ dev->video_mode.bulk_ctl.transfer_buffer = NULL;
+ dev->video_mode.bulk_ctl.num_bufs = 0;
+
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 0, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 0, TS1_serial_mode);
+
+
+}
+EXPORT_SYMBOL_GPL(cx231xx_uninit_bulk);
+
+/*
* Allocate URBs and start IRQ
*/
int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
@@ -713,15 +978,16 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
int j, k;
int rc;
- cx231xx_isocdbg("cx231xx: called cx231xx_prepare_isoc\n");
+ /* De-allocates all pending stuff */
+ cx231xx_uninit_isoc(dev);
- dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
+ dma_q->p_left_data = kzalloc(4096, GFP_KERNEL);
+ if (dma_q->p_left_data == NULL) {
+ cx231xx_info("out of mem\n");
+ return -ENOMEM;
+ }
- cx231xx_info("Setting Video mux to %d\n", dev->video_input);
- video_mux(dev, dev->video_input);
- /* De-allocates all pending stuff */
- cx231xx_uninit_isoc(dev);
dev->video_mode.isoc_ctl.isoc_copy = isoc_copy;
dev->video_mode.isoc_ctl.num_bufs = num_bufs;
@@ -733,6 +999,14 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dma_q->lines_per_field = dev->height / 2;
dma_q->bytes_left_in_line = dev->width << 1;
dma_q->lines_completed = 0;
+ dma_q->mpeg_buffer_done = 0;
+ dma_q->left_data_count = 0;
+ dma_q->mpeg_buffer_completed = 0;
+ dma_q->add_ps_package_head = CX231XX_NEED_ADD_PS_PACKAGE_HEAD;
+ dma_q->ps_head[0] = 0x00;
+ dma_q->ps_head[1] = 0x00;
+ dma_q->ps_head[2] = 0x01;
+ dma_q->ps_head[3] = 0xBA;
for (i = 0; i < 8; i++)
dma_q->partial_buf[i] = 0;
@@ -756,6 +1030,12 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
sb_size = max_packets * dev->video_mode.isoc_ctl.max_pkt_size;
+ if (dev->mode_tv == 1)
+ dev->video_mode.end_point_addr = 0x81;
+ else
+ dev->video_mode.end_point_addr = 0x84;
+
+
/* allocate urbs and transfer buffers */
for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) {
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
@@ -784,7 +1064,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
usb_fill_int_urb(urb, dev->udev, pipe,
dev->video_mode.isoc_ctl.transfer_buffer[i],
- sb_size, cx231xx_irq_callback, dma_q, 1);
+ sb_size, cx231xx_isoc_irq_callback, dma_q, 1);
urb->number_of_packets = max_packets;
urb->transfer_flags = URB_ISO_ASAP;
@@ -812,12 +1092,176 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
}
}
- cx231xx_capture_start(dev, 1, Raw_Video);
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 1, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 1, TS1_serial_mode);
return 0;
}
EXPORT_SYMBOL_GPL(cx231xx_init_isoc);
+/*
+ * Allocate URBs and start IRQ
+ */
+int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
+ int num_bufs, int max_pkt_size,
+ int (*bulk_copy) (struct cx231xx *dev, struct urb *urb))
+{
+ struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
+ int i;
+ int sb_size, pipe;
+ struct urb *urb;
+ int rc;
+
+ dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
+
+ cx231xx_coredbg("Setting Video mux to %d\n", dev->video_input);
+
+ video_mux(dev, dev->video_input);
+
+ /* De-allocates all pending stuff */
+ cx231xx_uninit_bulk(dev);
+
+ dev->video_mode.bulk_ctl.bulk_copy = bulk_copy;
+ dev->video_mode.bulk_ctl.num_bufs = num_bufs;
+ dma_q->pos = 0;
+ dma_q->is_partial_line = 0;
+ dma_q->last_sav = 0;
+ dma_q->current_field = -1;
+ dma_q->field1_done = 0;
+ dma_q->lines_per_field = dev->height / 2;
+ dma_q->bytes_left_in_line = dev->width << 1;
+ dma_q->lines_completed = 0;
+ dma_q->mpeg_buffer_done = 0;
+ dma_q->left_data_count = 0;
+ dma_q->mpeg_buffer_completed = 0;
+ dma_q->ps_head[0] = 0x00;
+ dma_q->ps_head[1] = 0x00;
+ dma_q->ps_head[2] = 0x01;
+ dma_q->ps_head[3] = 0xBA;
+ for (i = 0; i < 8; i++)
+ dma_q->partial_buf[i] = 0;
+
+ dev->video_mode.bulk_ctl.urb =
+ kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+ if (!dev->video_mode.bulk_ctl.urb) {
+ cx231xx_errdev("cannot alloc memory for usb buffers\n");
+ return -ENOMEM;
+ }
+
+ dev->video_mode.bulk_ctl.transfer_buffer =
+ kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
+ if (!dev->video_mode.bulk_ctl.transfer_buffer) {
+ cx231xx_errdev("cannot allocate memory for usbtransfer\n");
+ kfree(dev->video_mode.bulk_ctl.urb);
+ return -ENOMEM;
+ }
+
+ dev->video_mode.bulk_ctl.max_pkt_size = max_pkt_size;
+ dev->video_mode.bulk_ctl.buf = NULL;
+
+ sb_size = max_packets * dev->video_mode.bulk_ctl.max_pkt_size;
+
+ if (dev->mode_tv == 1)
+ dev->video_mode.end_point_addr = 0x81;
+ else
+ dev->video_mode.end_point_addr = 0x84;
+
+
+ /* allocate urbs and transfer buffers */
+ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ cx231xx_err("cannot alloc bulk_ctl.urb %i\n", i);
+ cx231xx_uninit_bulk(dev);
+ return -ENOMEM;
+ }
+ dev->video_mode.bulk_ctl.urb[i] = urb;
+ urb->transfer_flags = 0;
+
+ dev->video_mode.bulk_ctl.transfer_buffer[i] =
+ usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) {
+ cx231xx_err("unable to allocate %i bytes for transfer"
+ " buffer %i%s\n",
+ sb_size, i,
+ in_interrupt() ? " while in int" : "");
+ cx231xx_uninit_bulk(dev);
+ return -ENOMEM;
+ }
+ memset(dev->video_mode.bulk_ctl.transfer_buffer[i], 0, sb_size);
+
+ pipe = usb_rcvbulkpipe(dev->udev,
+ dev->video_mode.end_point_addr);
+ usb_fill_bulk_urb(urb, dev->udev, pipe,
+ dev->video_mode.bulk_ctl.transfer_buffer[i],
+ sb_size, cx231xx_bulk_irq_callback, dma_q);
+ }
+
+ init_waitqueue_head(&dma_q->wq);
+
+ /* submit urbs and enables IRQ */
+ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) {
+ rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i],
+ GFP_ATOMIC);
+ if (rc) {
+ cx231xx_err("submit of urb %i failed (error=%i)\n", i,
+ rc);
+ cx231xx_uninit_bulk(dev);
+ return rc;
+ }
+ }
+
+ if (dev->mode_tv == 0)
+ cx231xx_capture_start(dev, 1, Raw_Video);
+ else
+ cx231xx_capture_start(dev, 1, TS1_serial_mode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cx231xx_init_bulk);
+void cx231xx_stop_TS1(struct cx231xx *dev)
+{
+ int status = 0;
+ u8 val[4] = { 0, 0, 0, 0 };
+
+ val[0] = 0x00;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x00;
+ val[1] = 0x70;
+ val[2] = 0x04;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
+}
+/* EXPORT_SYMBOL_GPL(cx231xx_stop_TS1); */
+void cx231xx_start_TS1(struct cx231xx *dev)
+{
+ int status = 0;
+ u8 val[4] = { 0, 0, 0, 0 };
+
+ val[0] = 0x03;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x04;
+ val[1] = 0xA3;
+ val[2] = 0x3B;
+ val[3] = 0x00;
+ status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
+}
+/* EXPORT_SYMBOL_GPL(cx231xx_start_TS1); */
/*****************************************************************
* Device Init/UnInit functions *
******************************************************************/
@@ -830,14 +1274,14 @@ int cx231xx_dev_init(struct cx231xx *dev)
/* External Master 1 Bus */
dev->i2c_bus[0].nr = 0;
dev->i2c_bus[0].dev = dev;
- dev->i2c_bus[0].i2c_period = I2C_SPEED_1M; /* 1MHz */
+ dev->i2c_bus[0].i2c_period = I2C_SPEED_100K; /* 100 KHz */
dev->i2c_bus[0].i2c_nostop = 0;
dev->i2c_bus[0].i2c_reserve = 0;
/* External Master 2 Bus */
dev->i2c_bus[1].nr = 1;
dev->i2c_bus[1].dev = dev;
- dev->i2c_bus[1].i2c_period = I2C_SPEED_1M; /* 1MHz */
+ dev->i2c_bus[1].i2c_period = I2C_SPEED_100K; /* 100 KHz */
dev->i2c_bus[1].i2c_nostop = 0;
dev->i2c_bus[1].i2c_reserve = 0;
@@ -856,14 +1300,34 @@ int cx231xx_dev_init(struct cx231xx *dev)
/* init hardware */
/* Note : with out calling set power mode function,
afe can not be set up correctly */
- errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
- if (errCode < 0) {
- cx231xx_errdev
- ("%s: Failed to set Power - errCode [%d]!\n",
- __func__, errCode);
- return errCode;
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER ||
+ dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2) {
+ errCode = cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_ENXTERNAL_AV);
+ if (errCode < 0) {
+ cx231xx_errdev
+ ("%s: Failed to set Power - errCode [%d]!\n",
+ __func__, errCode);
+ return errCode;
+ }
+ } else {
+ errCode = cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_ANALOGT_TV);
+ if (errCode < 0) {
+ cx231xx_errdev
+ ("%s: Failed to set Power - errCode [%d]!\n",
+ __func__, errCode);
+ return errCode;
+ }
}
+ /* reset the Tuner */
+ if ((dev->model == CX231XX_BOARD_CNXT_CARRAERA) ||
+ (dev->model == CX231XX_BOARD_CNXT_RDE_250) ||
+ (dev->model == CX231XX_BOARD_CNXT_SHELBY) ||
+ (dev->model == CX231XX_BOARD_CNXT_RDU_250))
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+
/* initialize Colibri block */
errCode = cx231xx_afe_init_super_block(dev, 0x23c);
if (errCode < 0) {
@@ -907,7 +1371,21 @@ int cx231xx_dev_init(struct cx231xx *dev)
}
/* set AGC mode to Analog */
+ switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
+ case CX231XX_BOARD_CNXT_RDE_250:
+ case CX231XX_BOARD_CNXT_SHELBY:
+ case CX231XX_BOARD_CNXT_RDU_250:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ break;
+ default:
+ break;
+ }
if (errCode < 0) {
cx231xx_errdev
("%s: cx231xx_AGC mode to Analog - errCode [%d]!\n",
@@ -923,7 +1401,7 @@ int cx231xx_dev_init(struct cx231xx *dev)
cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
/* set the I2C master port to 3 on channel 1 */
- errCode = cx231xx_enable_i2c_for_tuner(dev, I2C_3);
+ errCode = cx231xx_enable_i2c_port_3(dev, true);
return errCode;
}
@@ -941,7 +1419,7 @@ EXPORT_SYMBOL_GPL(cx231xx_dev_uninit);
/*****************************************************************
* G P I O related functions *
******************************************************************/
-int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 * gpio_val,
+int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val,
u8 len, u8 request, u8 direction)
{
int status = 0;
@@ -1026,6 +1504,91 @@ int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode)
/*****************************************************************
* I 2 C Internal C O N T R O L functions *
*****************************************************************/
+int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 *data, u8 data_len, int master)
+{
+ int status = 0;
+ struct cx231xx_i2c_xfer_data req_data;
+ u8 value[64] = "0";
+
+ if (saddr_len == 0)
+ saddr = 0;
+ else if (saddr_len == 0)
+ saddr &= 0xff;
+
+ /* prepare xfer_data struct */
+ req_data.dev_addr = dev_addr >> 1;
+ req_data.direction = I2C_M_RD;
+ req_data.saddr_len = saddr_len;
+ req_data.saddr_dat = saddr;
+ req_data.buf_size = data_len;
+ req_data.p_buffer = (u8 *) value;
+
+ /* usb send command */
+ if (master == 0)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0],
+ &req_data);
+ else if (master == 1)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1],
+ &req_data);
+ else if (master == 2)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2],
+ &req_data);
+
+ if (status >= 0) {
+ /* Copy the data read back to main buffer */
+ if (data_len == 1)
+ *data = value[0];
+ else if (data_len == 4)
+ *data =
+ value[0] | value[1] << 8 | value[2] << 16 | value[3]
+ << 24;
+ else if (data_len > 4)
+ *data = value[saddr];
+ }
+
+ return status;
+}
+
+int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 data, u8 data_len, int master)
+{
+ int status = 0;
+ u8 value[4] = { 0, 0, 0, 0 };
+ struct cx231xx_i2c_xfer_data req_data;
+
+ value[0] = (u8) data;
+ value[1] = (u8) (data >> 8);
+ value[2] = (u8) (data >> 16);
+ value[3] = (u8) (data >> 24);
+
+ if (saddr_len == 0)
+ saddr = 0;
+ else if (saddr_len == 0)
+ saddr &= 0xff;
+
+ /* prepare xfer_data struct */
+ req_data.dev_addr = dev_addr >> 1;
+ req_data.direction = 0;
+ req_data.saddr_len = saddr_len;
+ req_data.saddr_dat = saddr;
+ req_data.buf_size = data_len;
+ req_data.p_buffer = value;
+
+ /* usb send command */
+ if (master == 0)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0],
+ &req_data);
+ else if (master == 1)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1],
+ &req_data);
+ else if (master == 2)
+ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2],
+ &req_data);
+
+ return status;
+}
+
int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr,
u8 saddr_len, u32 *data, u8 data_len)
{
diff --git a/drivers/media/video/cx231xx/cx231xx-dif.h b/drivers/media/video/cx231xx/cx231xx-dif.h
new file mode 100644
index 000000000000..2b63c2f6d3b0
--- /dev/null
+++ b/drivers/media/video/cx231xx/cx231xx-dif.h
@@ -0,0 +1,3178 @@
+/*
+ * cx231xx-dif.h - driver for Conexant Cx23100/101/102 USB video capture devices
+ *
+ * Copyright {C} 2009 <Bill.Liu@conexant.com>
+ *
+ * This program is free software, you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program, if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _CX231XX_DIF_H
+#define _CX231XX_DIF_H
+
+#include "cx231xx-reg.h"
+
+struct dif_settings{
+ u32 if_freq;
+ u32 register_address;
+ u32 value;
+};
+
+static struct dif_settings Dif_set_array[] = {
+
+/*case 3000000:*/
+/* BEGIN - DIF BPF register values from 30_quant.dat*/
+{3000000, DIF_BPF_COEFF01, 0x00000002},
+{3000000, DIF_BPF_COEFF23, 0x00080012},
+{3000000, DIF_BPF_COEFF45, 0x001e0024},
+{3000000, DIF_BPF_COEFF67, 0x001bfff8},
+{3000000, DIF_BPF_COEFF89, 0xffb4ff50},
+{3000000, DIF_BPF_COEFF1011, 0xfed8fe68},
+{3000000, DIF_BPF_COEFF1213, 0xfe24fe34},
+{3000000, DIF_BPF_COEFF1415, 0xfebaffc7},
+{3000000, DIF_BPF_COEFF1617, 0x014d031f},
+{3000000, DIF_BPF_COEFF1819, 0x04f0065d},
+{3000000, DIF_BPF_COEFF2021, 0x07010688},
+{3000000, DIF_BPF_COEFF2223, 0x04c901d6},
+{3000000, DIF_BPF_COEFF2425, 0xfe00f9d3},
+{3000000, DIF_BPF_COEFF2627, 0xf600f342},
+{3000000, DIF_BPF_COEFF2829, 0xf235f337},
+{3000000, DIF_BPF_COEFF3031, 0xf64efb22},
+{3000000, DIF_BPF_COEFF3233, 0x0105070f},
+{3000000, DIF_BPF_COEFF3435, 0x0c460fce},
+{3000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 30_quant.dat*/
+
+
+/*case 3100000:*/
+/* BEGIN - DIF BPF register values from 31_quant.dat*/
+{3100000, DIF_BPF_COEFF01, 0x00000001},
+{3100000, DIF_BPF_COEFF23, 0x00070012},
+{3100000, DIF_BPF_COEFF45, 0x00220032},
+{3100000, DIF_BPF_COEFF67, 0x00370026},
+{3100000, DIF_BPF_COEFF89, 0xfff0ff91},
+{3100000, DIF_BPF_COEFF1011, 0xff0efe7c},
+{3100000, DIF_BPF_COEFF1213, 0xfe01fdcc},
+{3100000, DIF_BPF_COEFF1415, 0xfe0afedb},
+{3100000, DIF_BPF_COEFF1617, 0x00440224},
+{3100000, DIF_BPF_COEFF1819, 0x0434060c},
+{3100000, DIF_BPF_COEFF2021, 0x0738074e},
+{3100000, DIF_BPF_COEFF2223, 0x06090361},
+{3100000, DIF_BPF_COEFF2425, 0xff99fb39},
+{3100000, DIF_BPF_COEFF2627, 0xf6fef3b6},
+{3100000, DIF_BPF_COEFF2829, 0xf21af2a5},
+{3100000, DIF_BPF_COEFF3031, 0xf573fa33},
+{3100000, DIF_BPF_COEFF3233, 0x0034067d},
+{3100000, DIF_BPF_COEFF3435, 0x0bfb0fb9},
+{3100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 31_quant.dat*/
+
+
+/*case 3200000:*/
+/* BEGIN - DIF BPF register values from 32_quant.dat*/
+{3200000, DIF_BPF_COEFF01, 0x00000000},
+{3200000, DIF_BPF_COEFF23, 0x0004000e},
+{3200000, DIF_BPF_COEFF45, 0x00200038},
+{3200000, DIF_BPF_COEFF67, 0x004c004f},
+{3200000, DIF_BPF_COEFF89, 0x002fffdf},
+{3200000, DIF_BPF_COEFF1011, 0xff5cfeb6},
+{3200000, DIF_BPF_COEFF1213, 0xfe0dfd92},
+{3200000, DIF_BPF_COEFF1415, 0xfd7ffe03},
+{3200000, DIF_BPF_COEFF1617, 0xff36010a},
+{3200000, DIF_BPF_COEFF1819, 0x03410575},
+{3200000, DIF_BPF_COEFF2021, 0x072607d2},
+{3200000, DIF_BPF_COEFF2223, 0x071804d5},
+{3200000, DIF_BPF_COEFF2425, 0x0134fcb7},
+{3200000, DIF_BPF_COEFF2627, 0xf81ff451},
+{3200000, DIF_BPF_COEFF2829, 0xf223f22e},
+{3200000, DIF_BPF_COEFF3031, 0xf4a7f94b},
+{3200000, DIF_BPF_COEFF3233, 0xff6405e8},
+{3200000, DIF_BPF_COEFF3435, 0x0bae0fa4},
+{3200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 32_quant.dat*/
+
+
+/*case 3300000:*/
+/* BEGIN - DIF BPF register values from 33_quant.dat*/
+{3300000, DIF_BPF_COEFF01, 0x0000ffff},
+{3300000, DIF_BPF_COEFF23, 0x00000008},
+{3300000, DIF_BPF_COEFF45, 0x001a0036},
+{3300000, DIF_BPF_COEFF67, 0x0056006d},
+{3300000, DIF_BPF_COEFF89, 0x00670030},
+{3300000, DIF_BPF_COEFF1011, 0xffbdff10},
+{3300000, DIF_BPF_COEFF1213, 0xfe46fd8d},
+{3300000, DIF_BPF_COEFF1415, 0xfd25fd4f},
+{3300000, DIF_BPF_COEFF1617, 0xfe35ffe0},
+{3300000, DIF_BPF_COEFF1819, 0x0224049f},
+{3300000, DIF_BPF_COEFF2021, 0x06c9080e},
+{3300000, DIF_BPF_COEFF2223, 0x07ef0627},
+{3300000, DIF_BPF_COEFF2425, 0x02c9fe45},
+{3300000, DIF_BPF_COEFF2627, 0xf961f513},
+{3300000, DIF_BPF_COEFF2829, 0xf250f1d2},
+{3300000, DIF_BPF_COEFF3031, 0xf3ecf869},
+{3300000, DIF_BPF_COEFF3233, 0xfe930552},
+{3300000, DIF_BPF_COEFF3435, 0x0b5f0f8f},
+{3300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 33_quant.dat*/
+
+
+/*case 3400000:*/
+/* BEGIN - DIF BPF register values from 34_quant.dat*/
+{3400000, DIF_BPF_COEFF01, 0xfffffffe},
+{3400000, DIF_BPF_COEFF23, 0xfffd0001},
+{3400000, DIF_BPF_COEFF45, 0x000f002c},
+{3400000, DIF_BPF_COEFF67, 0x0054007d},
+{3400000, DIF_BPF_COEFF89, 0x0093007c},
+{3400000, DIF_BPF_COEFF1011, 0x0024ff82},
+{3400000, DIF_BPF_COEFF1213, 0xfea6fdbb},
+{3400000, DIF_BPF_COEFF1415, 0xfd03fcca},
+{3400000, DIF_BPF_COEFF1617, 0xfd51feb9},
+{3400000, DIF_BPF_COEFF1819, 0x00eb0392},
+{3400000, DIF_BPF_COEFF2021, 0x06270802},
+{3400000, DIF_BPF_COEFF2223, 0x08880750},
+{3400000, DIF_BPF_COEFF2425, 0x044dffdb},
+{3400000, DIF_BPF_COEFF2627, 0xfabdf5f8},
+{3400000, DIF_BPF_COEFF2829, 0xf2a0f193},
+{3400000, DIF_BPF_COEFF3031, 0xf342f78f},
+{3400000, DIF_BPF_COEFF3233, 0xfdc404b9},
+{3400000, DIF_BPF_COEFF3435, 0x0b0e0f78},
+{3400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 34_quant.dat*/
+
+
+/*case 3500000:*/
+/* BEGIN - DIF BPF register values from 35_quant.dat*/
+{3500000, DIF_BPF_COEFF01, 0xfffffffd},
+{3500000, DIF_BPF_COEFF23, 0xfffafff9},
+{3500000, DIF_BPF_COEFF45, 0x0002001b},
+{3500000, DIF_BPF_COEFF67, 0x0046007d},
+{3500000, DIF_BPF_COEFF89, 0x00ad00ba},
+{3500000, DIF_BPF_COEFF1011, 0x00870000},
+{3500000, DIF_BPF_COEFF1213, 0xff26fe1a},
+{3500000, DIF_BPF_COEFF1415, 0xfd1bfc7e},
+{3500000, DIF_BPF_COEFF1617, 0xfc99fda4},
+{3500000, DIF_BPF_COEFF1819, 0xffa5025c},
+{3500000, DIF_BPF_COEFF2021, 0x054507ad},
+{3500000, DIF_BPF_COEFF2223, 0x08dd0847},
+{3500000, DIF_BPF_COEFF2425, 0x05b80172},
+{3500000, DIF_BPF_COEFF2627, 0xfc2ef6ff},
+{3500000, DIF_BPF_COEFF2829, 0xf313f170},
+{3500000, DIF_BPF_COEFF3031, 0xf2abf6bd},
+{3500000, DIF_BPF_COEFF3233, 0xfcf6041f},
+{3500000, DIF_BPF_COEFF3435, 0x0abc0f61},
+{3500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 35_quant.dat*/
+
+
+/*case 3600000:*/
+/* BEGIN - DIF BPF register values from 36_quant.dat*/
+{3600000, DIF_BPF_COEFF01, 0xfffffffd},
+{3600000, DIF_BPF_COEFF23, 0xfff8fff3},
+{3600000, DIF_BPF_COEFF45, 0xfff50006},
+{3600000, DIF_BPF_COEFF67, 0x002f006c},
+{3600000, DIF_BPF_COEFF89, 0x00b200e3},
+{3600000, DIF_BPF_COEFF1011, 0x00dc007e},
+{3600000, DIF_BPF_COEFF1213, 0xffb9fea0},
+{3600000, DIF_BPF_COEFF1415, 0xfd6bfc71},
+{3600000, DIF_BPF_COEFF1617, 0xfc17fcb1},
+{3600000, DIF_BPF_COEFF1819, 0xfe65010b},
+{3600000, DIF_BPF_COEFF2021, 0x042d0713},
+{3600000, DIF_BPF_COEFF2223, 0x08ec0906},
+{3600000, DIF_BPF_COEFF2425, 0x07020302},
+{3600000, DIF_BPF_COEFF2627, 0xfdaff823},
+{3600000, DIF_BPF_COEFF2829, 0xf3a7f16a},
+{3600000, DIF_BPF_COEFF3031, 0xf228f5f5},
+{3600000, DIF_BPF_COEFF3233, 0xfc2a0384},
+{3600000, DIF_BPF_COEFF3435, 0x0a670f4a},
+{3600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 36_quant.dat*/
+
+
+/*case 3700000:*/
+/* BEGIN - DIF BPF register values from 37_quant.dat*/
+{3700000, DIF_BPF_COEFF01, 0x0000fffd},
+{3700000, DIF_BPF_COEFF23, 0xfff7ffef},
+{3700000, DIF_BPF_COEFF45, 0xffe9fff1},
+{3700000, DIF_BPF_COEFF67, 0x0010004d},
+{3700000, DIF_BPF_COEFF89, 0x00a100f2},
+{3700000, DIF_BPF_COEFF1011, 0x011a00f0},
+{3700000, DIF_BPF_COEFF1213, 0x0053ff44},
+{3700000, DIF_BPF_COEFF1415, 0xfdedfca2},
+{3700000, DIF_BPF_COEFF1617, 0xfbd3fbef},
+{3700000, DIF_BPF_COEFF1819, 0xfd39ffae},
+{3700000, DIF_BPF_COEFF2021, 0x02ea0638},
+{3700000, DIF_BPF_COEFF2223, 0x08b50987},
+{3700000, DIF_BPF_COEFF2425, 0x08230483},
+{3700000, DIF_BPF_COEFF2627, 0xff39f960},
+{3700000, DIF_BPF_COEFF2829, 0xf45bf180},
+{3700000, DIF_BPF_COEFF3031, 0xf1b8f537},
+{3700000, DIF_BPF_COEFF3233, 0xfb6102e7},
+{3700000, DIF_BPF_COEFF3435, 0x0a110f32},
+{3700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 37_quant.dat*/
+
+
+/*case 3800000:*/
+/* BEGIN - DIF BPF register values from 38_quant.dat*/
+{3800000, DIF_BPF_COEFF01, 0x0000fffe},
+{3800000, DIF_BPF_COEFF23, 0xfff9ffee},
+{3800000, DIF_BPF_COEFF45, 0xffe1ffdd},
+{3800000, DIF_BPF_COEFF67, 0xfff00024},
+{3800000, DIF_BPF_COEFF89, 0x007c00e5},
+{3800000, DIF_BPF_COEFF1011, 0x013a014a},
+{3800000, DIF_BPF_COEFF1213, 0x00e6fff8},
+{3800000, DIF_BPF_COEFF1415, 0xfe98fd0f},
+{3800000, DIF_BPF_COEFF1617, 0xfbd3fb67},
+{3800000, DIF_BPF_COEFF1819, 0xfc32fe54},
+{3800000, DIF_BPF_COEFF2021, 0x01880525},
+{3800000, DIF_BPF_COEFF2223, 0x083909c7},
+{3800000, DIF_BPF_COEFF2425, 0x091505ee},
+{3800000, DIF_BPF_COEFF2627, 0x00c7fab3},
+{3800000, DIF_BPF_COEFF2829, 0xf52df1b4},
+{3800000, DIF_BPF_COEFF3031, 0xf15df484},
+{3800000, DIF_BPF_COEFF3233, 0xfa9b0249},
+{3800000, DIF_BPF_COEFF3435, 0x09ba0f19},
+{3800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 38_quant.dat*/
+
+
+/*case 3900000:*/
+/* BEGIN - DIF BPF register values from 39_quant.dat*/
+{3900000, DIF_BPF_COEFF01, 0x00000000},
+{3900000, DIF_BPF_COEFF23, 0xfffbfff0},
+{3900000, DIF_BPF_COEFF45, 0xffdeffcf},
+{3900000, DIF_BPF_COEFF67, 0xffd1fff6},
+{3900000, DIF_BPF_COEFF89, 0x004800be},
+{3900000, DIF_BPF_COEFF1011, 0x01390184},
+{3900000, DIF_BPF_COEFF1213, 0x016300ac},
+{3900000, DIF_BPF_COEFF1415, 0xff5efdb1},
+{3900000, DIF_BPF_COEFF1617, 0xfc17fb23},
+{3900000, DIF_BPF_COEFF1819, 0xfb5cfd0d},
+{3900000, DIF_BPF_COEFF2021, 0x001703e4},
+{3900000, DIF_BPF_COEFF2223, 0x077b09c4},
+{3900000, DIF_BPF_COEFF2425, 0x09d2073c},
+{3900000, DIF_BPF_COEFF2627, 0x0251fc18},
+{3900000, DIF_BPF_COEFF2829, 0xf61cf203},
+{3900000, DIF_BPF_COEFF3031, 0xf118f3dc},
+{3900000, DIF_BPF_COEFF3233, 0xf9d801aa},
+{3900000, DIF_BPF_COEFF3435, 0x09600eff},
+{3900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 39_quant.dat*/
+
+
+/*case 4000000:*/
+/* BEGIN - DIF BPF register values from 40_quant.dat*/
+{4000000, DIF_BPF_COEFF01, 0x00000001},
+{4000000, DIF_BPF_COEFF23, 0xfffefff4},
+{4000000, DIF_BPF_COEFF45, 0xffe1ffc8},
+{4000000, DIF_BPF_COEFF67, 0xffbaffca},
+{4000000, DIF_BPF_COEFF89, 0x000b0082},
+{4000000, DIF_BPF_COEFF1011, 0x01170198},
+{4000000, DIF_BPF_COEFF1213, 0x01c10152},
+{4000000, DIF_BPF_COEFF1415, 0x0030fe7b},
+{4000000, DIF_BPF_COEFF1617, 0xfc99fb24},
+{4000000, DIF_BPF_COEFF1819, 0xfac3fbe9},
+{4000000, DIF_BPF_COEFF2021, 0xfea5027f},
+{4000000, DIF_BPF_COEFF2223, 0x0683097f},
+{4000000, DIF_BPF_COEFF2425, 0x0a560867},
+{4000000, DIF_BPF_COEFF2627, 0x03d2fd89},
+{4000000, DIF_BPF_COEFF2829, 0xf723f26f},
+{4000000, DIF_BPF_COEFF3031, 0xf0e8f341},
+{4000000, DIF_BPF_COEFF3233, 0xf919010a},
+{4000000, DIF_BPF_COEFF3435, 0x09060ee5},
+{4000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 40_quant.dat*/
+
+
+/*case 4100000:*/
+/* BEGIN - DIF BPF register values from 41_quant.dat*/
+{4100000, DIF_BPF_COEFF01, 0x00010002},
+{4100000, DIF_BPF_COEFF23, 0x0002fffb},
+{4100000, DIF_BPF_COEFF45, 0xffe8ffca},
+{4100000, DIF_BPF_COEFF67, 0xffacffa4},
+{4100000, DIF_BPF_COEFF89, 0xffcd0036},
+{4100000, DIF_BPF_COEFF1011, 0x00d70184},
+{4100000, DIF_BPF_COEFF1213, 0x01f601dc},
+{4100000, DIF_BPF_COEFF1415, 0x00ffff60},
+{4100000, DIF_BPF_COEFF1617, 0xfd51fb6d},
+{4100000, DIF_BPF_COEFF1819, 0xfa6efaf5},
+{4100000, DIF_BPF_COEFF2021, 0xfd410103},
+{4100000, DIF_BPF_COEFF2223, 0x055708f9},
+{4100000, DIF_BPF_COEFF2425, 0x0a9e0969},
+{4100000, DIF_BPF_COEFF2627, 0x0543ff02},
+{4100000, DIF_BPF_COEFF2829, 0xf842f2f5},
+{4100000, DIF_BPF_COEFF3031, 0xf0cef2b2},
+{4100000, DIF_BPF_COEFF3233, 0xf85e006b},
+{4100000, DIF_BPF_COEFF3435, 0x08aa0ecb},
+{4100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 41_quant.dat*/
+
+
+/*case 4200000:*/
+/* BEGIN - DIF BPF register values from 42_quant.dat*/
+{4200000, DIF_BPF_COEFF01, 0x00010003},
+{4200000, DIF_BPF_COEFF23, 0x00050003},
+{4200000, DIF_BPF_COEFF45, 0xfff3ffd3},
+{4200000, DIF_BPF_COEFF67, 0xffaaff8b},
+{4200000, DIF_BPF_COEFF89, 0xff95ffe5},
+{4200000, DIF_BPF_COEFF1011, 0x0080014a},
+{4200000, DIF_BPF_COEFF1213, 0x01fe023f},
+{4200000, DIF_BPF_COEFF1415, 0x01ba0050},
+{4200000, DIF_BPF_COEFF1617, 0xfe35fbf8},
+{4200000, DIF_BPF_COEFF1819, 0xfa62fa3b},
+{4200000, DIF_BPF_COEFF2021, 0xfbf9ff7e},
+{4200000, DIF_BPF_COEFF2223, 0x04010836},
+{4200000, DIF_BPF_COEFF2425, 0x0aa90a3d},
+{4200000, DIF_BPF_COEFF2627, 0x069f007f},
+{4200000, DIF_BPF_COEFF2829, 0xf975f395},
+{4200000, DIF_BPF_COEFF3031, 0xf0cbf231},
+{4200000, DIF_BPF_COEFF3233, 0xf7a9ffcb},
+{4200000, DIF_BPF_COEFF3435, 0x084c0eaf},
+{4200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 42_quant.dat*/
+
+
+/*case 4300000:*/
+/* BEGIN - DIF BPF register values from 43_quant.dat*/
+{4300000, DIF_BPF_COEFF01, 0x00010003},
+{4300000, DIF_BPF_COEFF23, 0x0008000a},
+{4300000, DIF_BPF_COEFF45, 0x0000ffe4},
+{4300000, DIF_BPF_COEFF67, 0xffb4ff81},
+{4300000, DIF_BPF_COEFF89, 0xff6aff96},
+{4300000, DIF_BPF_COEFF1011, 0x001c00f0},
+{4300000, DIF_BPF_COEFF1213, 0x01d70271},
+{4300000, DIF_BPF_COEFF1415, 0x0254013b},
+{4300000, DIF_BPF_COEFF1617, 0xff36fcbd},
+{4300000, DIF_BPF_COEFF1819, 0xfa9ff9c5},
+{4300000, DIF_BPF_COEFF2021, 0xfadbfdfe},
+{4300000, DIF_BPF_COEFF2223, 0x028c073b},
+{4300000, DIF_BPF_COEFF2425, 0x0a750adf},
+{4300000, DIF_BPF_COEFF2627, 0x07e101fa},
+{4300000, DIF_BPF_COEFF2829, 0xfab8f44e},
+{4300000, DIF_BPF_COEFF3031, 0xf0ddf1be},
+{4300000, DIF_BPF_COEFF3233, 0xf6f9ff2b},
+{4300000, DIF_BPF_COEFF3435, 0x07ed0e94},
+{4300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 43_quant.dat*/
+
+
+/*case 4400000:*/
+/* BEGIN - DIF BPF register values from 44_quant.dat*/
+{4400000, DIF_BPF_COEFF01, 0x00000003},
+{4400000, DIF_BPF_COEFF23, 0x0009000f},
+{4400000, DIF_BPF_COEFF45, 0x000efff8},
+{4400000, DIF_BPF_COEFF67, 0xffc9ff87},
+{4400000, DIF_BPF_COEFF89, 0xff52ff54},
+{4400000, DIF_BPF_COEFF1011, 0xffb5007e},
+{4400000, DIF_BPF_COEFF1213, 0x01860270},
+{4400000, DIF_BPF_COEFF1415, 0x02c00210},
+{4400000, DIF_BPF_COEFF1617, 0x0044fdb2},
+{4400000, DIF_BPF_COEFF1819, 0xfb22f997},
+{4400000, DIF_BPF_COEFF2021, 0xf9f2fc90},
+{4400000, DIF_BPF_COEFF2223, 0x0102060f},
+{4400000, DIF_BPF_COEFF2425, 0x0a050b4c},
+{4400000, DIF_BPF_COEFF2627, 0x0902036e},
+{4400000, DIF_BPF_COEFF2829, 0xfc0af51e},
+{4400000, DIF_BPF_COEFF3031, 0xf106f15a},
+{4400000, DIF_BPF_COEFF3233, 0xf64efe8b},
+{4400000, DIF_BPF_COEFF3435, 0x078d0e77},
+{4400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 44_quant.dat*/
+
+
+/*case 4500000:*/
+/* BEGIN - DIF BPF register values from 45_quant.dat*/
+{4500000, DIF_BPF_COEFF01, 0x00000002},
+{4500000, DIF_BPF_COEFF23, 0x00080012},
+{4500000, DIF_BPF_COEFF45, 0x0019000e},
+{4500000, DIF_BPF_COEFF67, 0xffe5ff9e},
+{4500000, DIF_BPF_COEFF89, 0xff4fff25},
+{4500000, DIF_BPF_COEFF1011, 0xff560000},
+{4500000, DIF_BPF_COEFF1213, 0x0112023b},
+{4500000, DIF_BPF_COEFF1415, 0x02f702c0},
+{4500000, DIF_BPF_COEFF1617, 0x014dfec8},
+{4500000, DIF_BPF_COEFF1819, 0xfbe5f9b3},
+{4500000, DIF_BPF_COEFF2021, 0xf947fb41},
+{4500000, DIF_BPF_COEFF2223, 0xff7004b9},
+{4500000, DIF_BPF_COEFF2425, 0x095a0b81},
+{4500000, DIF_BPF_COEFF2627, 0x0a0004d8},
+{4500000, DIF_BPF_COEFF2829, 0xfd65f603},
+{4500000, DIF_BPF_COEFF3031, 0xf144f104},
+{4500000, DIF_BPF_COEFF3233, 0xf5aafdec},
+{4500000, DIF_BPF_COEFF3435, 0x072b0e5a},
+{4500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 45_quant.dat*/
+
+
+/*case 4600000:*/
+/* BEGIN - DIF BPF register values from 46_quant.dat*/
+{4600000, DIF_BPF_COEFF01, 0x00000001},
+{4600000, DIF_BPF_COEFF23, 0x00060012},
+{4600000, DIF_BPF_COEFF45, 0x00200022},
+{4600000, DIF_BPF_COEFF67, 0x0005ffc1},
+{4600000, DIF_BPF_COEFF89, 0xff61ff10},
+{4600000, DIF_BPF_COEFF1011, 0xff09ff82},
+{4600000, DIF_BPF_COEFF1213, 0x008601d7},
+{4600000, DIF_BPF_COEFF1415, 0x02f50340},
+{4600000, DIF_BPF_COEFF1617, 0x0241fff0},
+{4600000, DIF_BPF_COEFF1819, 0xfcddfa19},
+{4600000, DIF_BPF_COEFF2021, 0xf8e2fa1e},
+{4600000, DIF_BPF_COEFF2223, 0xfde30343},
+{4600000, DIF_BPF_COEFF2425, 0x08790b7f},
+{4600000, DIF_BPF_COEFF2627, 0x0ad50631},
+{4600000, DIF_BPF_COEFF2829, 0xfec7f6fc},
+{4600000, DIF_BPF_COEFF3031, 0xf198f0bd},
+{4600000, DIF_BPF_COEFF3233, 0xf50dfd4e},
+{4600000, DIF_BPF_COEFF3435, 0x06c90e3d},
+{4600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 46_quant.dat*/
+
+
+/*case 4700000:*/
+/* BEGIN - DIF BPF register values from 47_quant.dat*/
+{4700000, DIF_BPF_COEFF01, 0x0000ffff},
+{4700000, DIF_BPF_COEFF23, 0x0003000f},
+{4700000, DIF_BPF_COEFF45, 0x00220030},
+{4700000, DIF_BPF_COEFF67, 0x0025ffed},
+{4700000, DIF_BPF_COEFF89, 0xff87ff15},
+{4700000, DIF_BPF_COEFF1011, 0xfed6ff10},
+{4700000, DIF_BPF_COEFF1213, 0xffed014c},
+{4700000, DIF_BPF_COEFF1415, 0x02b90386},
+{4700000, DIF_BPF_COEFF1617, 0x03110119},
+{4700000, DIF_BPF_COEFF1819, 0xfdfefac4},
+{4700000, DIF_BPF_COEFF2021, 0xf8c6f92f},
+{4700000, DIF_BPF_COEFF2223, 0xfc6701b7},
+{4700000, DIF_BPF_COEFF2425, 0x07670b44},
+{4700000, DIF_BPF_COEFF2627, 0x0b7e0776},
+{4700000, DIF_BPF_COEFF2829, 0x002df807},
+{4700000, DIF_BPF_COEFF3031, 0xf200f086},
+{4700000, DIF_BPF_COEFF3233, 0xf477fcb1},
+{4700000, DIF_BPF_COEFF3435, 0x06650e1e},
+{4700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 47_quant.dat*/
+
+
+/*case 4800000:*/
+/* BEGIN - DIF BPF register values from 48_quant.dat*/
+{4800000, DIF_BPF_COEFF01, 0xfffffffe},
+{4800000, DIF_BPF_COEFF23, 0xffff0009},
+{4800000, DIF_BPF_COEFF45, 0x001e0038},
+{4800000, DIF_BPF_COEFF67, 0x003f001b},
+{4800000, DIF_BPF_COEFF89, 0xffbcff36},
+{4800000, DIF_BPF_COEFF1011, 0xfec2feb6},
+{4800000, DIF_BPF_COEFF1213, 0xff5600a5},
+{4800000, DIF_BPF_COEFF1415, 0x0248038d},
+{4800000, DIF_BPF_COEFF1617, 0x03b00232},
+{4800000, DIF_BPF_COEFF1819, 0xff39fbab},
+{4800000, DIF_BPF_COEFF2021, 0xf8f4f87f},
+{4800000, DIF_BPF_COEFF2223, 0xfb060020},
+{4800000, DIF_BPF_COEFF2425, 0x062a0ad2},
+{4800000, DIF_BPF_COEFF2627, 0x0bf908a3},
+{4800000, DIF_BPF_COEFF2829, 0x0192f922},
+{4800000, DIF_BPF_COEFF3031, 0xf27df05e},
+{4800000, DIF_BPF_COEFF3233, 0xf3e8fc14},
+{4800000, DIF_BPF_COEFF3435, 0x06000e00},
+{4800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 48_quant.dat*/
+
+
+/*case 4900000:*/
+/* BEGIN - DIF BPF register values from 49_quant.dat*/
+{4900000, DIF_BPF_COEFF01, 0xfffffffd},
+{4900000, DIF_BPF_COEFF23, 0xfffc0002},
+{4900000, DIF_BPF_COEFF45, 0x00160037},
+{4900000, DIF_BPF_COEFF67, 0x00510046},
+{4900000, DIF_BPF_COEFF89, 0xfff9ff6d},
+{4900000, DIF_BPF_COEFF1011, 0xfed0fe7c},
+{4900000, DIF_BPF_COEFF1213, 0xfecefff0},
+{4900000, DIF_BPF_COEFF1415, 0x01aa0356},
+{4900000, DIF_BPF_COEFF1617, 0x0413032b},
+{4900000, DIF_BPF_COEFF1819, 0x007ffcc5},
+{4900000, DIF_BPF_COEFF2021, 0xf96cf812},
+{4900000, DIF_BPF_COEFF2223, 0xf9cefe87},
+{4900000, DIF_BPF_COEFF2425, 0x04c90a2c},
+{4900000, DIF_BPF_COEFF2627, 0x0c4309b4},
+{4900000, DIF_BPF_COEFF2829, 0x02f3fa4a},
+{4900000, DIF_BPF_COEFF3031, 0xf30ef046},
+{4900000, DIF_BPF_COEFF3233, 0xf361fb7a},
+{4900000, DIF_BPF_COEFF3435, 0x059b0de0},
+{4900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 49_quant.dat*/
+
+
+/*case 5000000:*/
+/* BEGIN - DIF BPF register values from 50_quant.dat*/
+{5000000, DIF_BPF_COEFF01, 0xfffffffd},
+{5000000, DIF_BPF_COEFF23, 0xfff9fffa},
+{5000000, DIF_BPF_COEFF45, 0x000a002d},
+{5000000, DIF_BPF_COEFF67, 0x00570067},
+{5000000, DIF_BPF_COEFF89, 0x0037ffb5},
+{5000000, DIF_BPF_COEFF1011, 0xfefffe68},
+{5000000, DIF_BPF_COEFF1213, 0xfe62ff3d},
+{5000000, DIF_BPF_COEFF1415, 0x00ec02e3},
+{5000000, DIF_BPF_COEFF1617, 0x043503f6},
+{5000000, DIF_BPF_COEFF1819, 0x01befe05},
+{5000000, DIF_BPF_COEFF2021, 0xfa27f7ee},
+{5000000, DIF_BPF_COEFF2223, 0xf8c6fcf8},
+{5000000, DIF_BPF_COEFF2425, 0x034c0954},
+{5000000, DIF_BPF_COEFF2627, 0x0c5c0aa4},
+{5000000, DIF_BPF_COEFF2829, 0x044cfb7e},
+{5000000, DIF_BPF_COEFF3031, 0xf3b1f03f},
+{5000000, DIF_BPF_COEFF3233, 0xf2e2fae1},
+{5000000, DIF_BPF_COEFF3435, 0x05340dc0},
+{5000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 50_quant.dat*/
+
+
+/*case 5100000:*/
+/* BEGIN - DIF BPF register values from 51_quant.dat*/
+{5100000, DIF_BPF_COEFF01, 0x0000fffd},
+{5100000, DIF_BPF_COEFF23, 0xfff8fff4},
+{5100000, DIF_BPF_COEFF45, 0xfffd001e},
+{5100000, DIF_BPF_COEFF67, 0x0051007b},
+{5100000, DIF_BPF_COEFF89, 0x006e0006},
+{5100000, DIF_BPF_COEFF1011, 0xff48fe7c},
+{5100000, DIF_BPF_COEFF1213, 0xfe1bfe9a},
+{5100000, DIF_BPF_COEFF1415, 0x001d023e},
+{5100000, DIF_BPF_COEFF1617, 0x04130488},
+{5100000, DIF_BPF_COEFF1819, 0x02e6ff5b},
+{5100000, DIF_BPF_COEFF2021, 0xfb1ef812},
+{5100000, DIF_BPF_COEFF2223, 0xf7f7fb7f},
+{5100000, DIF_BPF_COEFF2425, 0x01bc084e},
+{5100000, DIF_BPF_COEFF2627, 0x0c430b72},
+{5100000, DIF_BPF_COEFF2829, 0x059afcba},
+{5100000, DIF_BPF_COEFF3031, 0xf467f046},
+{5100000, DIF_BPF_COEFF3233, 0xf26cfa4a},
+{5100000, DIF_BPF_COEFF3435, 0x04cd0da0},
+{5100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 51_quant.dat*/
+
+
+/*case 5200000:*/
+/* BEGIN - DIF BPF register values from 52_quant.dat*/
+{5200000, DIF_BPF_COEFF01, 0x0000fffe},
+{5200000, DIF_BPF_COEFF23, 0xfff8ffef},
+{5200000, DIF_BPF_COEFF45, 0xfff00009},
+{5200000, DIF_BPF_COEFF67, 0x003f007f},
+{5200000, DIF_BPF_COEFF89, 0x00980056},
+{5200000, DIF_BPF_COEFF1011, 0xffa5feb6},
+{5200000, DIF_BPF_COEFF1213, 0xfe00fe15},
+{5200000, DIF_BPF_COEFF1415, 0xff4b0170},
+{5200000, DIF_BPF_COEFF1617, 0x03b004d7},
+{5200000, DIF_BPF_COEFF1819, 0x03e800b9},
+{5200000, DIF_BPF_COEFF2021, 0xfc48f87f},
+{5200000, DIF_BPF_COEFF2223, 0xf768fa23},
+{5200000, DIF_BPF_COEFF2425, 0x0022071f},
+{5200000, DIF_BPF_COEFF2627, 0x0bf90c1b},
+{5200000, DIF_BPF_COEFF2829, 0x06dafdfd},
+{5200000, DIF_BPF_COEFF3031, 0xf52df05e},
+{5200000, DIF_BPF_COEFF3233, 0xf1fef9b5},
+{5200000, DIF_BPF_COEFF3435, 0x04640d7f},
+{5200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 52_quant.dat*/
+
+
+/*case 5300000:*/
+/* BEGIN - DIF BPF register values from 53_quant.dat*/
+{5300000, DIF_BPF_COEFF01, 0x0000ffff},
+{5300000, DIF_BPF_COEFF23, 0xfff9ffee},
+{5300000, DIF_BPF_COEFF45, 0xffe6fff3},
+{5300000, DIF_BPF_COEFF67, 0x00250072},
+{5300000, DIF_BPF_COEFF89, 0x00af009c},
+{5300000, DIF_BPF_COEFF1011, 0x000cff10},
+{5300000, DIF_BPF_COEFF1213, 0xfe13fdb8},
+{5300000, DIF_BPF_COEFF1415, 0xfe870089},
+{5300000, DIF_BPF_COEFF1617, 0x031104e1},
+{5300000, DIF_BPF_COEFF1819, 0x04b8020f},
+{5300000, DIF_BPF_COEFF2021, 0xfd98f92f},
+{5300000, DIF_BPF_COEFF2223, 0xf71df8f0},
+{5300000, DIF_BPF_COEFF2425, 0xfe8805ce},
+{5300000, DIF_BPF_COEFF2627, 0x0b7e0c9c},
+{5300000, DIF_BPF_COEFF2829, 0x0808ff44},
+{5300000, DIF_BPF_COEFF3031, 0xf603f086},
+{5300000, DIF_BPF_COEFF3233, 0xf19af922},
+{5300000, DIF_BPF_COEFF3435, 0x03fb0d5e},
+{5300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 53_quant.dat*/
+
+
+/*case 5400000:*/
+/* BEGIN - DIF BPF register values from 54_quant.dat*/
+{5400000, DIF_BPF_COEFF01, 0x00000001},
+{5400000, DIF_BPF_COEFF23, 0xfffcffef},
+{5400000, DIF_BPF_COEFF45, 0xffe0ffe0},
+{5400000, DIF_BPF_COEFF67, 0x00050056},
+{5400000, DIF_BPF_COEFF89, 0x00b000d1},
+{5400000, DIF_BPF_COEFF1011, 0x0071ff82},
+{5400000, DIF_BPF_COEFF1213, 0xfe53fd8c},
+{5400000, DIF_BPF_COEFF1415, 0xfddfff99},
+{5400000, DIF_BPF_COEFF1617, 0x024104a3},
+{5400000, DIF_BPF_COEFF1819, 0x054a034d},
+{5400000, DIF_BPF_COEFF2021, 0xff01fa1e},
+{5400000, DIF_BPF_COEFF2223, 0xf717f7ed},
+{5400000, DIF_BPF_COEFF2425, 0xfcf50461},
+{5400000, DIF_BPF_COEFF2627, 0x0ad50cf4},
+{5400000, DIF_BPF_COEFF2829, 0x0921008d},
+{5400000, DIF_BPF_COEFF3031, 0xf6e7f0bd},
+{5400000, DIF_BPF_COEFF3233, 0xf13ff891},
+{5400000, DIF_BPF_COEFF3435, 0x03920d3b},
+{5400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 54_quant.dat*/
+
+
+/*case 5500000:*/
+/* BEGIN - DIF BPF register values from 55_quant.dat*/
+{5500000, DIF_BPF_COEFF01, 0x00010002},
+{5500000, DIF_BPF_COEFF23, 0xfffffff3},
+{5500000, DIF_BPF_COEFF45, 0xffdeffd1},
+{5500000, DIF_BPF_COEFF67, 0xffe5002f},
+{5500000, DIF_BPF_COEFF89, 0x009c00ed},
+{5500000, DIF_BPF_COEFF1011, 0x00cb0000},
+{5500000, DIF_BPF_COEFF1213, 0xfebafd94},
+{5500000, DIF_BPF_COEFF1415, 0xfd61feb0},
+{5500000, DIF_BPF_COEFF1617, 0x014d0422},
+{5500000, DIF_BPF_COEFF1819, 0x05970464},
+{5500000, DIF_BPF_COEFF2021, 0x0074fb41},
+{5500000, DIF_BPF_COEFF2223, 0xf759f721},
+{5500000, DIF_BPF_COEFF2425, 0xfb7502de},
+{5500000, DIF_BPF_COEFF2627, 0x0a000d21},
+{5500000, DIF_BPF_COEFF2829, 0x0a2201d4},
+{5500000, DIF_BPF_COEFF3031, 0xf7d9f104},
+{5500000, DIF_BPF_COEFF3233, 0xf0edf804},
+{5500000, DIF_BPF_COEFF3435, 0x03280d19},
+{5500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 55_quant.dat*/
+
+
+/*case 5600000:*/
+/* BEGIN - DIF BPF register values from 56_quant.dat*/
+{5600000, DIF_BPF_COEFF01, 0x00010003},
+{5600000, DIF_BPF_COEFF23, 0x0003fffa},
+{5600000, DIF_BPF_COEFF45, 0xffe3ffc9},
+{5600000, DIF_BPF_COEFF67, 0xffc90002},
+{5600000, DIF_BPF_COEFF89, 0x007500ef},
+{5600000, DIF_BPF_COEFF1011, 0x010e007e},
+{5600000, DIF_BPF_COEFF1213, 0xff3dfdcf},
+{5600000, DIF_BPF_COEFF1415, 0xfd16fddd},
+{5600000, DIF_BPF_COEFF1617, 0x00440365},
+{5600000, DIF_BPF_COEFF1819, 0x059b0548},
+{5600000, DIF_BPF_COEFF2021, 0x01e3fc90},
+{5600000, DIF_BPF_COEFF2223, 0xf7dff691},
+{5600000, DIF_BPF_COEFF2425, 0xfa0f014d},
+{5600000, DIF_BPF_COEFF2627, 0x09020d23},
+{5600000, DIF_BPF_COEFF2829, 0x0b0a0318},
+{5600000, DIF_BPF_COEFF3031, 0xf8d7f15a},
+{5600000, DIF_BPF_COEFF3233, 0xf0a5f779},
+{5600000, DIF_BPF_COEFF3435, 0x02bd0cf6},
+{5600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 56_quant.dat*/
+
+
+/*case 5700000:*/
+/* BEGIN - DIF BPF register values from 57_quant.dat*/
+{5700000, DIF_BPF_COEFF01, 0x00010003},
+{5700000, DIF_BPF_COEFF23, 0x00060001},
+{5700000, DIF_BPF_COEFF45, 0xffecffc9},
+{5700000, DIF_BPF_COEFF67, 0xffb4ffd4},
+{5700000, DIF_BPF_COEFF89, 0x004000d5},
+{5700000, DIF_BPF_COEFF1011, 0x013600f0},
+{5700000, DIF_BPF_COEFF1213, 0xffd3fe39},
+{5700000, DIF_BPF_COEFF1415, 0xfd04fd31},
+{5700000, DIF_BPF_COEFF1617, 0xff360277},
+{5700000, DIF_BPF_COEFF1819, 0x055605ef},
+{5700000, DIF_BPF_COEFF2021, 0x033efdfe},
+{5700000, DIF_BPF_COEFF2223, 0xf8a5f642},
+{5700000, DIF_BPF_COEFF2425, 0xf8cbffb6},
+{5700000, DIF_BPF_COEFF2627, 0x07e10cfb},
+{5700000, DIF_BPF_COEFF2829, 0x0bd50456},
+{5700000, DIF_BPF_COEFF3031, 0xf9dff1be},
+{5700000, DIF_BPF_COEFF3233, 0xf067f6f2},
+{5700000, DIF_BPF_COEFF3435, 0x02520cd2},
+{5700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 57_quant.dat*/
+
+
+/*case 5800000:*/
+/* BEGIN - DIF BPF register values from 58_quant.dat*/
+{5800000, DIF_BPF_COEFF01, 0x00000003},
+{5800000, DIF_BPF_COEFF23, 0x00080009},
+{5800000, DIF_BPF_COEFF45, 0xfff8ffd2},
+{5800000, DIF_BPF_COEFF67, 0xffaaffac},
+{5800000, DIF_BPF_COEFF89, 0x000200a3},
+{5800000, DIF_BPF_COEFF1011, 0x013c014a},
+{5800000, DIF_BPF_COEFF1213, 0x006dfec9},
+{5800000, DIF_BPF_COEFF1415, 0xfd2bfcb7},
+{5800000, DIF_BPF_COEFF1617, 0xfe350165},
+{5800000, DIF_BPF_COEFF1819, 0x04cb0651},
+{5800000, DIF_BPF_COEFF2021, 0x0477ff7e},
+{5800000, DIF_BPF_COEFF2223, 0xf9a5f635},
+{5800000, DIF_BPF_COEFF2425, 0xf7b1fe20},
+{5800000, DIF_BPF_COEFF2627, 0x069f0ca8},
+{5800000, DIF_BPF_COEFF2829, 0x0c81058b},
+{5800000, DIF_BPF_COEFF3031, 0xfaf0f231},
+{5800000, DIF_BPF_COEFF3233, 0xf033f66d},
+{5800000, DIF_BPF_COEFF3435, 0x01e60cae},
+{5800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 58_quant.dat*/
+
+
+/*case 5900000:*/
+/* BEGIN - DIF BPF register values from 59_quant.dat*/
+{5900000, DIF_BPF_COEFF01, 0x00000002},
+{5900000, DIF_BPF_COEFF23, 0x0009000e},
+{5900000, DIF_BPF_COEFF45, 0x0005ffe1},
+{5900000, DIF_BPF_COEFF67, 0xffacff90},
+{5900000, DIF_BPF_COEFF89, 0xffc5005f},
+{5900000, DIF_BPF_COEFF1011, 0x01210184},
+{5900000, DIF_BPF_COEFF1213, 0x00fcff72},
+{5900000, DIF_BPF_COEFF1415, 0xfd8afc77},
+{5900000, DIF_BPF_COEFF1617, 0xfd51003f},
+{5900000, DIF_BPF_COEFF1819, 0x04020669},
+{5900000, DIF_BPF_COEFF2021, 0x05830103},
+{5900000, DIF_BPF_COEFF2223, 0xfad7f66b},
+{5900000, DIF_BPF_COEFF2425, 0xf6c8fc93},
+{5900000, DIF_BPF_COEFF2627, 0x05430c2b},
+{5900000, DIF_BPF_COEFF2829, 0x0d0d06b5},
+{5900000, DIF_BPF_COEFF3031, 0xfc08f2b2},
+{5900000, DIF_BPF_COEFF3233, 0xf00af5ec},
+{5900000, DIF_BPF_COEFF3435, 0x017b0c89},
+{5900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 59_quant.dat*/
+
+
+/*case 6000000:*/
+/* BEGIN - DIF BPF register values from 60_quant.dat*/
+{6000000, DIF_BPF_COEFF01, 0x00000001},
+{6000000, DIF_BPF_COEFF23, 0x00070012},
+{6000000, DIF_BPF_COEFF45, 0x0012fff5},
+{6000000, DIF_BPF_COEFF67, 0xffbaff82},
+{6000000, DIF_BPF_COEFF89, 0xff8e000f},
+{6000000, DIF_BPF_COEFF1011, 0x00e80198},
+{6000000, DIF_BPF_COEFF1213, 0x01750028},
+{6000000, DIF_BPF_COEFF1415, 0xfe18fc75},
+{6000000, DIF_BPF_COEFF1617, 0xfc99ff15},
+{6000000, DIF_BPF_COEFF1819, 0x03050636},
+{6000000, DIF_BPF_COEFF2021, 0x0656027f},
+{6000000, DIF_BPF_COEFF2223, 0xfc32f6e2},
+{6000000, DIF_BPF_COEFF2425, 0xf614fb17},
+{6000000, DIF_BPF_COEFF2627, 0x03d20b87},
+{6000000, DIF_BPF_COEFF2829, 0x0d7707d2},
+{6000000, DIF_BPF_COEFF3031, 0xfd26f341},
+{6000000, DIF_BPF_COEFF3233, 0xefeaf56f},
+{6000000, DIF_BPF_COEFF3435, 0x010f0c64},
+{6000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 60_quant.dat*/
+
+
+/*case 6100000:*/
+/* BEGIN - DIF BPF register values from 61_quant.dat*/
+{6100000, DIF_BPF_COEFF01, 0xffff0000},
+{6100000, DIF_BPF_COEFF23, 0x00050012},
+{6100000, DIF_BPF_COEFF45, 0x001c000b},
+{6100000, DIF_BPF_COEFF67, 0xffd1ff84},
+{6100000, DIF_BPF_COEFF89, 0xff66ffbe},
+{6100000, DIF_BPF_COEFF1011, 0x00960184},
+{6100000, DIF_BPF_COEFF1213, 0x01cd00da},
+{6100000, DIF_BPF_COEFF1415, 0xfeccfcb2},
+{6100000, DIF_BPF_COEFF1617, 0xfc17fdf9},
+{6100000, DIF_BPF_COEFF1819, 0x01e005bc},
+{6100000, DIF_BPF_COEFF2021, 0x06e703e4},
+{6100000, DIF_BPF_COEFF2223, 0xfdabf798},
+{6100000, DIF_BPF_COEFF2425, 0xf599f9b3},
+{6100000, DIF_BPF_COEFF2627, 0x02510abd},
+{6100000, DIF_BPF_COEFF2829, 0x0dbf08df},
+{6100000, DIF_BPF_COEFF3031, 0xfe48f3dc},
+{6100000, DIF_BPF_COEFF3233, 0xefd5f4f6},
+{6100000, DIF_BPF_COEFF3435, 0x00a20c3e},
+{6100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 61_quant.dat*/
+
+
+/*case 6200000:*/
+/* BEGIN - DIF BPF register values from 62_quant.dat*/
+{6200000, DIF_BPF_COEFF01, 0xfffffffe},
+{6200000, DIF_BPF_COEFF23, 0x0002000f},
+{6200000, DIF_BPF_COEFF45, 0x0021001f},
+{6200000, DIF_BPF_COEFF67, 0xfff0ff97},
+{6200000, DIF_BPF_COEFF89, 0xff50ff74},
+{6200000, DIF_BPF_COEFF1011, 0x0034014a},
+{6200000, DIF_BPF_COEFF1213, 0x01fa0179},
+{6200000, DIF_BPF_COEFF1415, 0xff97fd2a},
+{6200000, DIF_BPF_COEFF1617, 0xfbd3fcfa},
+{6200000, DIF_BPF_COEFF1819, 0x00a304fe},
+{6200000, DIF_BPF_COEFF2021, 0x07310525},
+{6200000, DIF_BPF_COEFF2223, 0xff37f886},
+{6200000, DIF_BPF_COEFF2425, 0xf55cf86e},
+{6200000, DIF_BPF_COEFF2627, 0x00c709d0},
+{6200000, DIF_BPF_COEFF2829, 0x0de209db},
+{6200000, DIF_BPF_COEFF3031, 0xff6df484},
+{6200000, DIF_BPF_COEFF3233, 0xefcbf481},
+{6200000, DIF_BPF_COEFF3435, 0x00360c18},
+{6200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 62_quant.dat*/
+
+
+/*case 6300000:*/
+/* BEGIN - DIF BPF register values from 63_quant.dat*/
+{6300000, DIF_BPF_COEFF01, 0xfffffffd},
+{6300000, DIF_BPF_COEFF23, 0xfffe000a},
+{6300000, DIF_BPF_COEFF45, 0x0021002f},
+{6300000, DIF_BPF_COEFF67, 0x0010ffb8},
+{6300000, DIF_BPF_COEFF89, 0xff50ff3b},
+{6300000, DIF_BPF_COEFF1011, 0xffcc00f0},
+{6300000, DIF_BPF_COEFF1213, 0x01fa01fa},
+{6300000, DIF_BPF_COEFF1415, 0x0069fdd4},
+{6300000, DIF_BPF_COEFF1617, 0xfbd3fc26},
+{6300000, DIF_BPF_COEFF1819, 0xff5d0407},
+{6300000, DIF_BPF_COEFF2021, 0x07310638},
+{6300000, DIF_BPF_COEFF2223, 0x00c9f9a8},
+{6300000, DIF_BPF_COEFF2425, 0xf55cf74e},
+{6300000, DIF_BPF_COEFF2627, 0xff3908c3},
+{6300000, DIF_BPF_COEFF2829, 0x0de20ac3},
+{6300000, DIF_BPF_COEFF3031, 0x0093f537},
+{6300000, DIF_BPF_COEFF3233, 0xefcbf410},
+{6300000, DIF_BPF_COEFF3435, 0xffca0bf2},
+{6300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 63_quant.dat*/
+
+
+/*case 6400000:*/
+/* BEGIN - DIF BPF register values from 64_quant.dat*/
+{6400000, DIF_BPF_COEFF01, 0xfffffffd},
+{6400000, DIF_BPF_COEFF23, 0xfffb0003},
+{6400000, DIF_BPF_COEFF45, 0x001c0037},
+{6400000, DIF_BPF_COEFF67, 0x002fffe2},
+{6400000, DIF_BPF_COEFF89, 0xff66ff17},
+{6400000, DIF_BPF_COEFF1011, 0xff6a007e},
+{6400000, DIF_BPF_COEFF1213, 0x01cd0251},
+{6400000, DIF_BPF_COEFF1415, 0x0134fea5},
+{6400000, DIF_BPF_COEFF1617, 0xfc17fb8b},
+{6400000, DIF_BPF_COEFF1819, 0xfe2002e0},
+{6400000, DIF_BPF_COEFF2021, 0x06e70713},
+{6400000, DIF_BPF_COEFF2223, 0x0255faf5},
+{6400000, DIF_BPF_COEFF2425, 0xf599f658},
+{6400000, DIF_BPF_COEFF2627, 0xfdaf0799},
+{6400000, DIF_BPF_COEFF2829, 0x0dbf0b96},
+{6400000, DIF_BPF_COEFF3031, 0x01b8f5f5},
+{6400000, DIF_BPF_COEFF3233, 0xefd5f3a3},
+{6400000, DIF_BPF_COEFF3435, 0xff5e0bca},
+{6400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 64_quant.dat*/
+
+
+/*case 6500000:*/
+/* BEGIN - DIF BPF register values from 65_quant.dat*/
+{6500000, DIF_BPF_COEFF01, 0x0000fffd},
+{6500000, DIF_BPF_COEFF23, 0xfff9fffb},
+{6500000, DIF_BPF_COEFF45, 0x00120037},
+{6500000, DIF_BPF_COEFF67, 0x00460010},
+{6500000, DIF_BPF_COEFF89, 0xff8eff0f},
+{6500000, DIF_BPF_COEFF1011, 0xff180000},
+{6500000, DIF_BPF_COEFF1213, 0x01750276},
+{6500000, DIF_BPF_COEFF1415, 0x01e8ff8d},
+{6500000, DIF_BPF_COEFF1617, 0xfc99fb31},
+{6500000, DIF_BPF_COEFF1819, 0xfcfb0198},
+{6500000, DIF_BPF_COEFF2021, 0x065607ad},
+{6500000, DIF_BPF_COEFF2223, 0x03cefc64},
+{6500000, DIF_BPF_COEFF2425, 0xf614f592},
+{6500000, DIF_BPF_COEFF2627, 0xfc2e0656},
+{6500000, DIF_BPF_COEFF2829, 0x0d770c52},
+{6500000, DIF_BPF_COEFF3031, 0x02daf6bd},
+{6500000, DIF_BPF_COEFF3233, 0xefeaf33b},
+{6500000, DIF_BPF_COEFF3435, 0xfef10ba3},
+{6500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 65_quant.dat*/
+
+
+/*case 6600000:*/
+/* BEGIN - DIF BPF register values from 66_quant.dat*/
+{6600000, DIF_BPF_COEFF01, 0x0000fffe},
+{6600000, DIF_BPF_COEFF23, 0xfff7fff5},
+{6600000, DIF_BPF_COEFF45, 0x0005002f},
+{6600000, DIF_BPF_COEFF67, 0x0054003c},
+{6600000, DIF_BPF_COEFF89, 0xffc5ff22},
+{6600000, DIF_BPF_COEFF1011, 0xfedfff82},
+{6600000, DIF_BPF_COEFF1213, 0x00fc0267},
+{6600000, DIF_BPF_COEFF1415, 0x0276007e},
+{6600000, DIF_BPF_COEFF1617, 0xfd51fb1c},
+{6600000, DIF_BPF_COEFF1819, 0xfbfe003e},
+{6600000, DIF_BPF_COEFF2021, 0x05830802},
+{6600000, DIF_BPF_COEFF2223, 0x0529fdec},
+{6600000, DIF_BPF_COEFF2425, 0xf6c8f4fe},
+{6600000, DIF_BPF_COEFF2627, 0xfabd04ff},
+{6600000, DIF_BPF_COEFF2829, 0x0d0d0cf6},
+{6600000, DIF_BPF_COEFF3031, 0x03f8f78f},
+{6600000, DIF_BPF_COEFF3233, 0xf00af2d7},
+{6600000, DIF_BPF_COEFF3435, 0xfe850b7b},
+{6600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 66_quant.dat*/
+
+
+/*case 6700000:*/
+/* BEGIN - DIF BPF register values from 67_quant.dat*/
+{6700000, DIF_BPF_COEFF01, 0x0000ffff},
+{6700000, DIF_BPF_COEFF23, 0xfff8fff0},
+{6700000, DIF_BPF_COEFF45, 0xfff80020},
+{6700000, DIF_BPF_COEFF67, 0x00560060},
+{6700000, DIF_BPF_COEFF89, 0x0002ff4e},
+{6700000, DIF_BPF_COEFF1011, 0xfec4ff10},
+{6700000, DIF_BPF_COEFF1213, 0x006d0225},
+{6700000, DIF_BPF_COEFF1415, 0x02d50166},
+{6700000, DIF_BPF_COEFF1617, 0xfe35fb4e},
+{6700000, DIF_BPF_COEFF1819, 0xfb35fee1},
+{6700000, DIF_BPF_COEFF2021, 0x0477080e},
+{6700000, DIF_BPF_COEFF2223, 0x065bff82},
+{6700000, DIF_BPF_COEFF2425, 0xf7b1f4a0},
+{6700000, DIF_BPF_COEFF2627, 0xf9610397},
+{6700000, DIF_BPF_COEFF2829, 0x0c810d80},
+{6700000, DIF_BPF_COEFF3031, 0x0510f869},
+{6700000, DIF_BPF_COEFF3233, 0xf033f278},
+{6700000, DIF_BPF_COEFF3435, 0xfe1a0b52},
+{6700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 67_quant.dat*/
+
+
+/*case 6800000:*/
+/* BEGIN - DIF BPF register values from 68_quant.dat*/
+{6800000, DIF_BPF_COEFF01, 0x00010000},
+{6800000, DIF_BPF_COEFF23, 0xfffaffee},
+{6800000, DIF_BPF_COEFF45, 0xffec000c},
+{6800000, DIF_BPF_COEFF67, 0x004c0078},
+{6800000, DIF_BPF_COEFF89, 0x0040ff8e},
+{6800000, DIF_BPF_COEFF1011, 0xfecafeb6},
+{6800000, DIF_BPF_COEFF1213, 0xffd301b6},
+{6800000, DIF_BPF_COEFF1415, 0x02fc0235},
+{6800000, DIF_BPF_COEFF1617, 0xff36fbc5},
+{6800000, DIF_BPF_COEFF1819, 0xfaaafd90},
+{6800000, DIF_BPF_COEFF2021, 0x033e07d2},
+{6800000, DIF_BPF_COEFF2223, 0x075b011b},
+{6800000, DIF_BPF_COEFF2425, 0xf8cbf47a},
+{6800000, DIF_BPF_COEFF2627, 0xf81f0224},
+{6800000, DIF_BPF_COEFF2829, 0x0bd50def},
+{6800000, DIF_BPF_COEFF3031, 0x0621f94b},
+{6800000, DIF_BPF_COEFF3233, 0xf067f21e},
+{6800000, DIF_BPF_COEFF3435, 0xfdae0b29},
+{6800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 68_quant.dat*/
+
+
+/*case 6900000:*/
+/* BEGIN - DIF BPF register values from 69_quant.dat*/
+{6900000, DIF_BPF_COEFF01, 0x00010001},
+{6900000, DIF_BPF_COEFF23, 0xfffdffef},
+{6900000, DIF_BPF_COEFF45, 0xffe3fff6},
+{6900000, DIF_BPF_COEFF67, 0x0037007f},
+{6900000, DIF_BPF_COEFF89, 0x0075ffdc},
+{6900000, DIF_BPF_COEFF1011, 0xfef2fe7c},
+{6900000, DIF_BPF_COEFF1213, 0xff3d0122},
+{6900000, DIF_BPF_COEFF1415, 0x02ea02dd},
+{6900000, DIF_BPF_COEFF1617, 0x0044fc79},
+{6900000, DIF_BPF_COEFF1819, 0xfa65fc5d},
+{6900000, DIF_BPF_COEFF2021, 0x01e3074e},
+{6900000, DIF_BPF_COEFF2223, 0x082102ad},
+{6900000, DIF_BPF_COEFF2425, 0xfa0ff48c},
+{6900000, DIF_BPF_COEFF2627, 0xf6fe00a9},
+{6900000, DIF_BPF_COEFF2829, 0x0b0a0e43},
+{6900000, DIF_BPF_COEFF3031, 0x0729fa33},
+{6900000, DIF_BPF_COEFF3233, 0xf0a5f1c9},
+{6900000, DIF_BPF_COEFF3435, 0xfd430b00},
+{6900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 69_quant.dat*/
+
+
+/*case 7000000:*/
+/* BEGIN - DIF BPF register values from 70_quant.dat*/
+{7000000, DIF_BPF_COEFF01, 0x00010002},
+{7000000, DIF_BPF_COEFF23, 0x0001fff3},
+{7000000, DIF_BPF_COEFF45, 0xffdeffe2},
+{7000000, DIF_BPF_COEFF67, 0x001b0076},
+{7000000, DIF_BPF_COEFF89, 0x009c002d},
+{7000000, DIF_BPF_COEFF1011, 0xff35fe68},
+{7000000, DIF_BPF_COEFF1213, 0xfeba0076},
+{7000000, DIF_BPF_COEFF1415, 0x029f0352},
+{7000000, DIF_BPF_COEFF1617, 0x014dfd60},
+{7000000, DIF_BPF_COEFF1819, 0xfa69fb53},
+{7000000, DIF_BPF_COEFF2021, 0x00740688},
+{7000000, DIF_BPF_COEFF2223, 0x08a7042d},
+{7000000, DIF_BPF_COEFF2425, 0xfb75f4d6},
+{7000000, DIF_BPF_COEFF2627, 0xf600ff2d},
+{7000000, DIF_BPF_COEFF2829, 0x0a220e7a},
+{7000000, DIF_BPF_COEFF3031, 0x0827fb22},
+{7000000, DIF_BPF_COEFF3233, 0xf0edf17a},
+{7000000, DIF_BPF_COEFF3435, 0xfcd80ad6},
+{7000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 70_quant.dat*/
+
+
+/*case 7100000:*/
+/* BEGIN - DIF BPF register values from 71_quant.dat*/
+{7100000, DIF_BPF_COEFF01, 0x00000003},
+{7100000, DIF_BPF_COEFF23, 0x0004fff9},
+{7100000, DIF_BPF_COEFF45, 0xffe0ffd2},
+{7100000, DIF_BPF_COEFF67, 0xfffb005e},
+{7100000, DIF_BPF_COEFF89, 0x00b0007a},
+{7100000, DIF_BPF_COEFF1011, 0xff8ffe7c},
+{7100000, DIF_BPF_COEFF1213, 0xfe53ffc1},
+{7100000, DIF_BPF_COEFF1415, 0x0221038c},
+{7100000, DIF_BPF_COEFF1617, 0x0241fe6e},
+{7100000, DIF_BPF_COEFF1819, 0xfab6fa80},
+{7100000, DIF_BPF_COEFF2021, 0xff010587},
+{7100000, DIF_BPF_COEFF2223, 0x08e90590},
+{7100000, DIF_BPF_COEFF2425, 0xfcf5f556},
+{7100000, DIF_BPF_COEFF2627, 0xf52bfdb3},
+{7100000, DIF_BPF_COEFF2829, 0x09210e95},
+{7100000, DIF_BPF_COEFF3031, 0x0919fc15},
+{7100000, DIF_BPF_COEFF3233, 0xf13ff12f},
+{7100000, DIF_BPF_COEFF3435, 0xfc6e0aab},
+{7100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 71_quant.dat*/
+
+
+/*case 7200000:*/
+/* BEGIN - DIF BPF register values from 72_quant.dat*/
+{7200000, DIF_BPF_COEFF01, 0x00000003},
+{7200000, DIF_BPF_COEFF23, 0x00070000},
+{7200000, DIF_BPF_COEFF45, 0xffe6ffc9},
+{7200000, DIF_BPF_COEFF67, 0xffdb0039},
+{7200000, DIF_BPF_COEFF89, 0x00af00b8},
+{7200000, DIF_BPF_COEFF1011, 0xfff4feb6},
+{7200000, DIF_BPF_COEFF1213, 0xfe13ff10},
+{7200000, DIF_BPF_COEFF1415, 0x01790388},
+{7200000, DIF_BPF_COEFF1617, 0x0311ff92},
+{7200000, DIF_BPF_COEFF1819, 0xfb48f9ed},
+{7200000, DIF_BPF_COEFF2021, 0xfd980453},
+{7200000, DIF_BPF_COEFF2223, 0x08e306cd},
+{7200000, DIF_BPF_COEFF2425, 0xfe88f60a},
+{7200000, DIF_BPF_COEFF2627, 0xf482fc40},
+{7200000, DIF_BPF_COEFF2829, 0x08080e93},
+{7200000, DIF_BPF_COEFF3031, 0x09fdfd0c},
+{7200000, DIF_BPF_COEFF3233, 0xf19af0ea},
+{7200000, DIF_BPF_COEFF3435, 0xfc050a81},
+{7200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 72_quant.dat*/
+
+
+/*case 7300000:*/
+/* BEGIN - DIF BPF register values from 73_quant.dat*/
+{7300000, DIF_BPF_COEFF01, 0x00000002},
+{7300000, DIF_BPF_COEFF23, 0x00080008},
+{7300000, DIF_BPF_COEFF45, 0xfff0ffc9},
+{7300000, DIF_BPF_COEFF67, 0xffc1000d},
+{7300000, DIF_BPF_COEFF89, 0x009800e2},
+{7300000, DIF_BPF_COEFF1011, 0x005bff10},
+{7300000, DIF_BPF_COEFF1213, 0xfe00fe74},
+{7300000, DIF_BPF_COEFF1415, 0x00b50345},
+{7300000, DIF_BPF_COEFF1617, 0x03b000bc},
+{7300000, DIF_BPF_COEFF1819, 0xfc18f9a1},
+{7300000, DIF_BPF_COEFF2021, 0xfc4802f9},
+{7300000, DIF_BPF_COEFF2223, 0x089807dc},
+{7300000, DIF_BPF_COEFF2425, 0x0022f6f0},
+{7300000, DIF_BPF_COEFF2627, 0xf407fada},
+{7300000, DIF_BPF_COEFF2829, 0x06da0e74},
+{7300000, DIF_BPF_COEFF3031, 0x0ad3fe06},
+{7300000, DIF_BPF_COEFF3233, 0xf1fef0ab},
+{7300000, DIF_BPF_COEFF3435, 0xfb9c0a55},
+{7300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 73_quant.dat*/
+
+
+/*case 7400000:*/
+/* BEGIN - DIF BPF register values from 74_quant.dat*/
+{7400000, DIF_BPF_COEFF01, 0x00000001},
+{7400000, DIF_BPF_COEFF23, 0x0008000e},
+{7400000, DIF_BPF_COEFF45, 0xfffdffd0},
+{7400000, DIF_BPF_COEFF67, 0xffafffdf},
+{7400000, DIF_BPF_COEFF89, 0x006e00f2},
+{7400000, DIF_BPF_COEFF1011, 0x00b8ff82},
+{7400000, DIF_BPF_COEFF1213, 0xfe1bfdf8},
+{7400000, DIF_BPF_COEFF1415, 0xffe302c8},
+{7400000, DIF_BPF_COEFF1617, 0x041301dc},
+{7400000, DIF_BPF_COEFF1819, 0xfd1af99e},
+{7400000, DIF_BPF_COEFF2021, 0xfb1e0183},
+{7400000, DIF_BPF_COEFF2223, 0x080908b5},
+{7400000, DIF_BPF_COEFF2425, 0x01bcf801},
+{7400000, DIF_BPF_COEFF2627, 0xf3bdf985},
+{7400000, DIF_BPF_COEFF2829, 0x059a0e38},
+{7400000, DIF_BPF_COEFF3031, 0x0b99ff03},
+{7400000, DIF_BPF_COEFF3233, 0xf26cf071},
+{7400000, DIF_BPF_COEFF3435, 0xfb330a2a},
+{7400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 74_quant.dat*/
+
+
+/*case 7500000:*/
+/* BEGIN - DIF BPF register values from 75_quant.dat*/
+{7500000, DIF_BPF_COEFF01, 0xffff0000},
+{7500000, DIF_BPF_COEFF23, 0x00070011},
+{7500000, DIF_BPF_COEFF45, 0x000affdf},
+{7500000, DIF_BPF_COEFF67, 0xffa9ffb5},
+{7500000, DIF_BPF_COEFF89, 0x003700e6},
+{7500000, DIF_BPF_COEFF1011, 0x01010000},
+{7500000, DIF_BPF_COEFF1213, 0xfe62fda8},
+{7500000, DIF_BPF_COEFF1415, 0xff140219},
+{7500000, DIF_BPF_COEFF1617, 0x043502e1},
+{7500000, DIF_BPF_COEFF1819, 0xfe42f9e6},
+{7500000, DIF_BPF_COEFF2021, 0xfa270000},
+{7500000, DIF_BPF_COEFF2223, 0x073a0953},
+{7500000, DIF_BPF_COEFF2425, 0x034cf939},
+{7500000, DIF_BPF_COEFF2627, 0xf3a4f845},
+{7500000, DIF_BPF_COEFF2829, 0x044c0de1},
+{7500000, DIF_BPF_COEFF3031, 0x0c4f0000},
+{7500000, DIF_BPF_COEFF3233, 0xf2e2f03c},
+{7500000, DIF_BPF_COEFF3435, 0xfacc09fe},
+{7500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 75_quant.dat*/
+
+
+/*case 7600000:*/
+/* BEGIN - DIF BPF register values from 76_quant.dat*/
+{7600000, DIF_BPF_COEFF01, 0xffffffff},
+{7600000, DIF_BPF_COEFF23, 0x00040012},
+{7600000, DIF_BPF_COEFF45, 0x0016fff3},
+{7600000, DIF_BPF_COEFF67, 0xffafff95},
+{7600000, DIF_BPF_COEFF89, 0xfff900c0},
+{7600000, DIF_BPF_COEFF1011, 0x0130007e},
+{7600000, DIF_BPF_COEFF1213, 0xfecefd89},
+{7600000, DIF_BPF_COEFF1415, 0xfe560146},
+{7600000, DIF_BPF_COEFF1617, 0x041303bc},
+{7600000, DIF_BPF_COEFF1819, 0xff81fa76},
+{7600000, DIF_BPF_COEFF2021, 0xf96cfe7d},
+{7600000, DIF_BPF_COEFF2223, 0x063209b1},
+{7600000, DIF_BPF_COEFF2425, 0x04c9fa93},
+{7600000, DIF_BPF_COEFF2627, 0xf3bdf71e},
+{7600000, DIF_BPF_COEFF2829, 0x02f30d6e},
+{7600000, DIF_BPF_COEFF3031, 0x0cf200fd},
+{7600000, DIF_BPF_COEFF3233, 0xf361f00e},
+{7600000, DIF_BPF_COEFF3435, 0xfa6509d1},
+{7600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 76_quant.dat*/
+
+
+/*case 7700000:*/
+/* BEGIN - DIF BPF register values from 77_quant.dat*/
+{7700000, DIF_BPF_COEFF01, 0xfffffffe},
+{7700000, DIF_BPF_COEFF23, 0x00010010},
+{7700000, DIF_BPF_COEFF45, 0x001e0008},
+{7700000, DIF_BPF_COEFF67, 0xffc1ff84},
+{7700000, DIF_BPF_COEFF89, 0xffbc0084},
+{7700000, DIF_BPF_COEFF1011, 0x013e00f0},
+{7700000, DIF_BPF_COEFF1213, 0xff56fd9f},
+{7700000, DIF_BPF_COEFF1415, 0xfdb8005c},
+{7700000, DIF_BPF_COEFF1617, 0x03b00460},
+{7700000, DIF_BPF_COEFF1819, 0x00c7fb45},
+{7700000, DIF_BPF_COEFF2021, 0xf8f4fd07},
+{7700000, DIF_BPF_COEFF2223, 0x04fa09ce},
+{7700000, DIF_BPF_COEFF2425, 0x062afc07},
+{7700000, DIF_BPF_COEFF2627, 0xf407f614},
+{7700000, DIF_BPF_COEFF2829, 0x01920ce0},
+{7700000, DIF_BPF_COEFF3031, 0x0d8301fa},
+{7700000, DIF_BPF_COEFF3233, 0xf3e8efe5},
+{7700000, DIF_BPF_COEFF3435, 0xfa0009a4},
+{7700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 77_quant.dat*/
+
+
+/*case 7800000:*/
+/* BEGIN - DIF BPF register values from 78_quant.dat*/
+{7800000, DIF_BPF_COEFF01, 0x0000fffd},
+{7800000, DIF_BPF_COEFF23, 0xfffd000b},
+{7800000, DIF_BPF_COEFF45, 0x0022001d},
+{7800000, DIF_BPF_COEFF67, 0xffdbff82},
+{7800000, DIF_BPF_COEFF89, 0xff870039},
+{7800000, DIF_BPF_COEFF1011, 0x012a014a},
+{7800000, DIF_BPF_COEFF1213, 0xffedfde7},
+{7800000, DIF_BPF_COEFF1415, 0xfd47ff6b},
+{7800000, DIF_BPF_COEFF1617, 0x031104c6},
+{7800000, DIF_BPF_COEFF1819, 0x0202fc4c},
+{7800000, DIF_BPF_COEFF2021, 0xf8c6fbad},
+{7800000, DIF_BPF_COEFF2223, 0x039909a7},
+{7800000, DIF_BPF_COEFF2425, 0x0767fd8e},
+{7800000, DIF_BPF_COEFF2627, 0xf482f52b},
+{7800000, DIF_BPF_COEFF2829, 0x002d0c39},
+{7800000, DIF_BPF_COEFF3031, 0x0e0002f4},
+{7800000, DIF_BPF_COEFF3233, 0xf477efc2},
+{7800000, DIF_BPF_COEFF3435, 0xf99b0977},
+{7800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 78_quant.dat*/
+
+
+/*case 7900000:*/
+/* BEGIN - DIF BPF register values from 79_quant.dat*/
+{7900000, DIF_BPF_COEFF01, 0x0000fffd},
+{7900000, DIF_BPF_COEFF23, 0xfffa0004},
+{7900000, DIF_BPF_COEFF45, 0x0020002d},
+{7900000, DIF_BPF_COEFF67, 0xfffbff91},
+{7900000, DIF_BPF_COEFF89, 0xff61ffe8},
+{7900000, DIF_BPF_COEFF1011, 0x00f70184},
+{7900000, DIF_BPF_COEFF1213, 0x0086fe5c},
+{7900000, DIF_BPF_COEFF1415, 0xfd0bfe85},
+{7900000, DIF_BPF_COEFF1617, 0x024104e5},
+{7900000, DIF_BPF_COEFF1819, 0x0323fd7d},
+{7900000, DIF_BPF_COEFF2021, 0xf8e2fa79},
+{7900000, DIF_BPF_COEFF2223, 0x021d093f},
+{7900000, DIF_BPF_COEFF2425, 0x0879ff22},
+{7900000, DIF_BPF_COEFF2627, 0xf52bf465},
+{7900000, DIF_BPF_COEFF2829, 0xfec70b79},
+{7900000, DIF_BPF_COEFF3031, 0x0e6803eb},
+{7900000, DIF_BPF_COEFF3233, 0xf50defa5},
+{7900000, DIF_BPF_COEFF3435, 0xf937094a},
+{7900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 79_quant.dat*/
+
+
+/*case 8000000:*/
+/* BEGIN - DIF BPF register values from 80_quant.dat*/
+{8000000, DIF_BPF_COEFF01, 0x0000fffe},
+{8000000, DIF_BPF_COEFF23, 0xfff8fffd},
+{8000000, DIF_BPF_COEFF45, 0x00190036},
+{8000000, DIF_BPF_COEFF67, 0x001bffaf},
+{8000000, DIF_BPF_COEFF89, 0xff4fff99},
+{8000000, DIF_BPF_COEFF1011, 0x00aa0198},
+{8000000, DIF_BPF_COEFF1213, 0x0112fef3},
+{8000000, DIF_BPF_COEFF1415, 0xfd09fdb9},
+{8000000, DIF_BPF_COEFF1617, 0x014d04be},
+{8000000, DIF_BPF_COEFF1819, 0x041bfecc},
+{8000000, DIF_BPF_COEFF2021, 0xf947f978},
+{8000000, DIF_BPF_COEFF2223, 0x00900897},
+{8000000, DIF_BPF_COEFF2425, 0x095a00b9},
+{8000000, DIF_BPF_COEFF2627, 0xf600f3c5},
+{8000000, DIF_BPF_COEFF2829, 0xfd650aa3},
+{8000000, DIF_BPF_COEFF3031, 0x0ebc04de},
+{8000000, DIF_BPF_COEFF3233, 0xf5aaef8e},
+{8000000, DIF_BPF_COEFF3435, 0xf8d5091c},
+{8000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 80_quant.dat*/
+
+
+/*case 8100000:*/
+/* BEGIN - DIF BPF register values from 81_quant.dat*/
+{8100000, DIF_BPF_COEFF01, 0x0000ffff},
+{8100000, DIF_BPF_COEFF23, 0xfff7fff6},
+{8100000, DIF_BPF_COEFF45, 0x000e0038},
+{8100000, DIF_BPF_COEFF67, 0x0037ffd7},
+{8100000, DIF_BPF_COEFF89, 0xff52ff56},
+{8100000, DIF_BPF_COEFF1011, 0x004b0184},
+{8100000, DIF_BPF_COEFF1213, 0x0186ffa1},
+{8100000, DIF_BPF_COEFF1415, 0xfd40fd16},
+{8100000, DIF_BPF_COEFF1617, 0x00440452},
+{8100000, DIF_BPF_COEFF1819, 0x04de0029},
+{8100000, DIF_BPF_COEFF2021, 0xf9f2f8b2},
+{8100000, DIF_BPF_COEFF2223, 0xfefe07b5},
+{8100000, DIF_BPF_COEFF2425, 0x0a05024d},
+{8100000, DIF_BPF_COEFF2627, 0xf6fef34d},
+{8100000, DIF_BPF_COEFF2829, 0xfc0a09b8},
+{8100000, DIF_BPF_COEFF3031, 0x0efa05cd},
+{8100000, DIF_BPF_COEFF3233, 0xf64eef7d},
+{8100000, DIF_BPF_COEFF3435, 0xf87308ed},
+{8100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 81_quant.dat*/
+
+
+/*case 8200000:*/
+/* BEGIN - DIF BPF register values from 82_quant.dat*/
+{8200000, DIF_BPF_COEFF01, 0x00010000},
+{8200000, DIF_BPF_COEFF23, 0xfff8fff0},
+{8200000, DIF_BPF_COEFF45, 0x00000031},
+{8200000, DIF_BPF_COEFF67, 0x004c0005},
+{8200000, DIF_BPF_COEFF89, 0xff6aff27},
+{8200000, DIF_BPF_COEFF1011, 0xffe4014a},
+{8200000, DIF_BPF_COEFF1213, 0x01d70057},
+{8200000, DIF_BPF_COEFF1415, 0xfdacfca6},
+{8200000, DIF_BPF_COEFF1617, 0xff3603a7},
+{8200000, DIF_BPF_COEFF1819, 0x05610184},
+{8200000, DIF_BPF_COEFF2021, 0xfadbf82e},
+{8200000, DIF_BPF_COEFF2223, 0xfd74069f},
+{8200000, DIF_BPF_COEFF2425, 0x0a7503d6},
+{8200000, DIF_BPF_COEFF2627, 0xf81ff2ff},
+{8200000, DIF_BPF_COEFF2829, 0xfab808b9},
+{8200000, DIF_BPF_COEFF3031, 0x0f2306b5},
+{8200000, DIF_BPF_COEFF3233, 0xf6f9ef72},
+{8200000, DIF_BPF_COEFF3435, 0xf81308bf},
+{8200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 82_quant.dat*/
+
+
+/*case 8300000:*/
+/* BEGIN - DIF BPF register values from 83_quant.dat*/
+{8300000, DIF_BPF_COEFF01, 0x00010001},
+{8300000, DIF_BPF_COEFF23, 0xfffbffee},
+{8300000, DIF_BPF_COEFF45, 0xfff30022},
+{8300000, DIF_BPF_COEFF67, 0x00560032},
+{8300000, DIF_BPF_COEFF89, 0xff95ff10},
+{8300000, DIF_BPF_COEFF1011, 0xff8000f0},
+{8300000, DIF_BPF_COEFF1213, 0x01fe0106},
+{8300000, DIF_BPF_COEFF1415, 0xfe46fc71},
+{8300000, DIF_BPF_COEFF1617, 0xfe3502c7},
+{8300000, DIF_BPF_COEFF1819, 0x059e02ce},
+{8300000, DIF_BPF_COEFF2021, 0xfbf9f7f2},
+{8300000, DIF_BPF_COEFF2223, 0xfbff055b},
+{8300000, DIF_BPF_COEFF2425, 0x0aa9054c},
+{8300000, DIF_BPF_COEFF2627, 0xf961f2db},
+{8300000, DIF_BPF_COEFF2829, 0xf97507aa},
+{8300000, DIF_BPF_COEFF3031, 0x0f350797},
+{8300000, DIF_BPF_COEFF3233, 0xf7a9ef6d},
+{8300000, DIF_BPF_COEFF3435, 0xf7b40890},
+{8300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 83_quant.dat*/
+
+
+/*case 8400000:*/
+/* BEGIN - DIF BPF register values from 84_quant.dat*/
+{8400000, DIF_BPF_COEFF01, 0x00010002},
+{8400000, DIF_BPF_COEFF23, 0xfffeffee},
+{8400000, DIF_BPF_COEFF45, 0xffe8000f},
+{8400000, DIF_BPF_COEFF67, 0x00540058},
+{8400000, DIF_BPF_COEFF89, 0xffcdff14},
+{8400000, DIF_BPF_COEFF1011, 0xff29007e},
+{8400000, DIF_BPF_COEFF1213, 0x01f6019e},
+{8400000, DIF_BPF_COEFF1415, 0xff01fc7c},
+{8400000, DIF_BPF_COEFF1617, 0xfd5101bf},
+{8400000, DIF_BPF_COEFF1819, 0x059203f6},
+{8400000, DIF_BPF_COEFF2021, 0xfd41f7fe},
+{8400000, DIF_BPF_COEFF2223, 0xfaa903f3},
+{8400000, DIF_BPF_COEFF2425, 0x0a9e06a9},
+{8400000, DIF_BPF_COEFF2627, 0xfabdf2e2},
+{8400000, DIF_BPF_COEFF2829, 0xf842068b},
+{8400000, DIF_BPF_COEFF3031, 0x0f320871},
+{8400000, DIF_BPF_COEFF3233, 0xf85eef6e},
+{8400000, DIF_BPF_COEFF3435, 0xf7560860},
+{8400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 84_quant.dat*/
+
+
+/*case 8500000:*/
+/* BEGIN - DIF BPF register values from 85_quant.dat*/
+{8500000, DIF_BPF_COEFF01, 0x00000003},
+{8500000, DIF_BPF_COEFF23, 0x0002fff2},
+{8500000, DIF_BPF_COEFF45, 0xffe1fff9},
+{8500000, DIF_BPF_COEFF67, 0x00460073},
+{8500000, DIF_BPF_COEFF89, 0x000bff34},
+{8500000, DIF_BPF_COEFF1011, 0xfee90000},
+{8500000, DIF_BPF_COEFF1213, 0x01c10215},
+{8500000, DIF_BPF_COEFF1415, 0xffd0fcc5},
+{8500000, DIF_BPF_COEFF1617, 0xfc99009d},
+{8500000, DIF_BPF_COEFF1819, 0x053d04f1},
+{8500000, DIF_BPF_COEFF2021, 0xfea5f853},
+{8500000, DIF_BPF_COEFF2223, 0xf97d0270},
+{8500000, DIF_BPF_COEFF2425, 0x0a5607e4},
+{8500000, DIF_BPF_COEFF2627, 0xfc2ef314},
+{8500000, DIF_BPF_COEFF2829, 0xf723055f},
+{8500000, DIF_BPF_COEFF3031, 0x0f180943},
+{8500000, DIF_BPF_COEFF3233, 0xf919ef75},
+{8500000, DIF_BPF_COEFF3435, 0xf6fa0830},
+{8500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 85_quant.dat*/
+
+
+/*case 8600000:*/
+/* BEGIN - DIF BPF register values from 86_quant.dat*/
+{8600000, DIF_BPF_COEFF01, 0x00000003},
+{8600000, DIF_BPF_COEFF23, 0x0005fff8},
+{8600000, DIF_BPF_COEFF45, 0xffdeffe4},
+{8600000, DIF_BPF_COEFF67, 0x002f007f},
+{8600000, DIF_BPF_COEFF89, 0x0048ff6b},
+{8600000, DIF_BPF_COEFF1011, 0xfec7ff82},
+{8600000, DIF_BPF_COEFF1213, 0x0163025f},
+{8600000, DIF_BPF_COEFF1415, 0x00a2fd47},
+{8600000, DIF_BPF_COEFF1617, 0xfc17ff73},
+{8600000, DIF_BPF_COEFF1819, 0x04a405b2},
+{8600000, DIF_BPF_COEFF2021, 0x0017f8ed},
+{8600000, DIF_BPF_COEFF2223, 0xf88500dc},
+{8600000, DIF_BPF_COEFF2425, 0x09d208f9},
+{8600000, DIF_BPF_COEFF2627, 0xfdaff370},
+{8600000, DIF_BPF_COEFF2829, 0xf61c0429},
+{8600000, DIF_BPF_COEFF3031, 0x0ee80a0b},
+{8600000, DIF_BPF_COEFF3233, 0xf9d8ef82},
+{8600000, DIF_BPF_COEFF3435, 0xf6a00800},
+{8600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 86_quant.dat*/
+
+
+/*case 8700000:*/
+/* BEGIN - DIF BPF register values from 87_quant.dat*/
+{8700000, DIF_BPF_COEFF01, 0x00000003},
+{8700000, DIF_BPF_COEFF23, 0x0007ffff},
+{8700000, DIF_BPF_COEFF45, 0xffe1ffd4},
+{8700000, DIF_BPF_COEFF67, 0x0010007a},
+{8700000, DIF_BPF_COEFF89, 0x007cffb2},
+{8700000, DIF_BPF_COEFF1011, 0xfec6ff10},
+{8700000, DIF_BPF_COEFF1213, 0x00e60277},
+{8700000, DIF_BPF_COEFF1415, 0x0168fdf9},
+{8700000, DIF_BPF_COEFF1617, 0xfbd3fe50},
+{8700000, DIF_BPF_COEFF1819, 0x03ce0631},
+{8700000, DIF_BPF_COEFF2021, 0x0188f9c8},
+{8700000, DIF_BPF_COEFF2223, 0xf7c7ff43},
+{8700000, DIF_BPF_COEFF2425, 0x091509e3},
+{8700000, DIF_BPF_COEFF2627, 0xff39f3f6},
+{8700000, DIF_BPF_COEFF2829, 0xf52d02ea},
+{8700000, DIF_BPF_COEFF3031, 0x0ea30ac9},
+{8700000, DIF_BPF_COEFF3233, 0xfa9bef95},
+{8700000, DIF_BPF_COEFF3435, 0xf64607d0},
+{8700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 87_quant.dat*/
+
+
+/*case 8800000:*/
+/* BEGIN - DIF BPF register values from 88_quant.dat*/
+{8800000, DIF_BPF_COEFF01, 0x00000002},
+{8800000, DIF_BPF_COEFF23, 0x00090007},
+{8800000, DIF_BPF_COEFF45, 0xffe9ffca},
+{8800000, DIF_BPF_COEFF67, 0xfff00065},
+{8800000, DIF_BPF_COEFF89, 0x00a10003},
+{8800000, DIF_BPF_COEFF1011, 0xfee6feb6},
+{8800000, DIF_BPF_COEFF1213, 0x0053025b},
+{8800000, DIF_BPF_COEFF1415, 0x0213fed0},
+{8800000, DIF_BPF_COEFF1617, 0xfbd3fd46},
+{8800000, DIF_BPF_COEFF1819, 0x02c70668},
+{8800000, DIF_BPF_COEFF2021, 0x02eafadb},
+{8800000, DIF_BPF_COEFF2223, 0xf74bfdae},
+{8800000, DIF_BPF_COEFF2425, 0x08230a9c},
+{8800000, DIF_BPF_COEFF2627, 0x00c7f4a3},
+{8800000, DIF_BPF_COEFF2829, 0xf45b01a6},
+{8800000, DIF_BPF_COEFF3031, 0x0e480b7c},
+{8800000, DIF_BPF_COEFF3233, 0xfb61efae},
+{8800000, DIF_BPF_COEFF3435, 0xf5ef079f},
+{8800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 88_quant.dat*/
+
+
+/*case 8900000:*/
+/* BEGIN - DIF BPF register values from 89_quant.dat*/
+{8900000, DIF_BPF_COEFF01, 0xffff0000},
+{8900000, DIF_BPF_COEFF23, 0x0008000d},
+{8900000, DIF_BPF_COEFF45, 0xfff5ffc8},
+{8900000, DIF_BPF_COEFF67, 0xffd10043},
+{8900000, DIF_BPF_COEFF89, 0x00b20053},
+{8900000, DIF_BPF_COEFF1011, 0xff24fe7c},
+{8900000, DIF_BPF_COEFF1213, 0xffb9020c},
+{8900000, DIF_BPF_COEFF1415, 0x0295ffbb},
+{8900000, DIF_BPF_COEFF1617, 0xfc17fc64},
+{8900000, DIF_BPF_COEFF1819, 0x019b0654},
+{8900000, DIF_BPF_COEFF2021, 0x042dfc1c},
+{8900000, DIF_BPF_COEFF2223, 0xf714fc2a},
+{8900000, DIF_BPF_COEFF2425, 0x07020b21},
+{8900000, DIF_BPF_COEFF2627, 0x0251f575},
+{8900000, DIF_BPF_COEFF2829, 0xf3a7005e},
+{8900000, DIF_BPF_COEFF3031, 0x0dd80c24},
+{8900000, DIF_BPF_COEFF3233, 0xfc2aefcd},
+{8900000, DIF_BPF_COEFF3435, 0xf599076e},
+{8900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 89_quant.dat*/
+
+
+/*case 9000000:*/
+/* BEGIN - DIF BPF register values from 90_quant.dat*/
+{9000000, DIF_BPF_COEFF01, 0xffffffff},
+{9000000, DIF_BPF_COEFF23, 0x00060011},
+{9000000, DIF_BPF_COEFF45, 0x0002ffcf},
+{9000000, DIF_BPF_COEFF67, 0xffba0018},
+{9000000, DIF_BPF_COEFF89, 0x00ad009a},
+{9000000, DIF_BPF_COEFF1011, 0xff79fe68},
+{9000000, DIF_BPF_COEFF1213, 0xff260192},
+{9000000, DIF_BPF_COEFF1415, 0x02e500ab},
+{9000000, DIF_BPF_COEFF1617, 0xfc99fbb6},
+{9000000, DIF_BPF_COEFF1819, 0x005b05f7},
+{9000000, DIF_BPF_COEFF2021, 0x0545fd81},
+{9000000, DIF_BPF_COEFF2223, 0xf723fabf},
+{9000000, DIF_BPF_COEFF2425, 0x05b80b70},
+{9000000, DIF_BPF_COEFF2627, 0x03d2f669},
+{9000000, DIF_BPF_COEFF2829, 0xf313ff15},
+{9000000, DIF_BPF_COEFF3031, 0x0d550cbf},
+{9000000, DIF_BPF_COEFF3233, 0xfcf6eff2},
+{9000000, DIF_BPF_COEFF3435, 0xf544073d},
+{9000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 90_quant.dat*/
+
+
+/*case 9100000:*/
+/* BEGIN - DIF BPF register values from 91_quant.dat*/
+{9100000, DIF_BPF_COEFF01, 0xfffffffe},
+{9100000, DIF_BPF_COEFF23, 0x00030012},
+{9100000, DIF_BPF_COEFF45, 0x000fffdd},
+{9100000, DIF_BPF_COEFF67, 0xffacffea},
+{9100000, DIF_BPF_COEFF89, 0x009300cf},
+{9100000, DIF_BPF_COEFF1011, 0xffdcfe7c},
+{9100000, DIF_BPF_COEFF1213, 0xfea600f7},
+{9100000, DIF_BPF_COEFF1415, 0x02fd0190},
+{9100000, DIF_BPF_COEFF1617, 0xfd51fb46},
+{9100000, DIF_BPF_COEFF1819, 0xff150554},
+{9100000, DIF_BPF_COEFF2021, 0x0627fefd},
+{9100000, DIF_BPF_COEFF2223, 0xf778f978},
+{9100000, DIF_BPF_COEFF2425, 0x044d0b87},
+{9100000, DIF_BPF_COEFF2627, 0x0543f77d},
+{9100000, DIF_BPF_COEFF2829, 0xf2a0fdcf},
+{9100000, DIF_BPF_COEFF3031, 0x0cbe0d4e},
+{9100000, DIF_BPF_COEFF3233, 0xfdc4f01d},
+{9100000, DIF_BPF_COEFF3435, 0xf4f2070b},
+{9100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 91_quant.dat*/
+
+
+/*case 9200000:*/
+/* BEGIN - DIF BPF register values from 92_quant.dat*/
+{9200000, DIF_BPF_COEFF01, 0x0000fffd},
+{9200000, DIF_BPF_COEFF23, 0x00000010},
+{9200000, DIF_BPF_COEFF45, 0x001afff0},
+{9200000, DIF_BPF_COEFF67, 0xffaaffbf},
+{9200000, DIF_BPF_COEFF89, 0x006700ed},
+{9200000, DIF_BPF_COEFF1011, 0x0043feb6},
+{9200000, DIF_BPF_COEFF1213, 0xfe460047},
+{9200000, DIF_BPF_COEFF1415, 0x02db0258},
+{9200000, DIF_BPF_COEFF1617, 0xfe35fb1b},
+{9200000, DIF_BPF_COEFF1819, 0xfddc0473},
+{9200000, DIF_BPF_COEFF2021, 0x06c90082},
+{9200000, DIF_BPF_COEFF2223, 0xf811f85e},
+{9200000, DIF_BPF_COEFF2425, 0x02c90b66},
+{9200000, DIF_BPF_COEFF2627, 0x069ff8ad},
+{9200000, DIF_BPF_COEFF2829, 0xf250fc8d},
+{9200000, DIF_BPF_COEFF3031, 0x0c140dcf},
+{9200000, DIF_BPF_COEFF3233, 0xfe93f04d},
+{9200000, DIF_BPF_COEFF3435, 0xf4a106d9},
+{9200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 92_quant.dat*/
+
+
+/*case 9300000:*/
+/* BEGIN - DIF BPF register values from 93_quant.dat*/
+{9300000, DIF_BPF_COEFF01, 0x0000fffd},
+{9300000, DIF_BPF_COEFF23, 0xfffc000c},
+{9300000, DIF_BPF_COEFF45, 0x00200006},
+{9300000, DIF_BPF_COEFF67, 0xffb4ff9c},
+{9300000, DIF_BPF_COEFF89, 0x002f00ef},
+{9300000, DIF_BPF_COEFF1011, 0x00a4ff10},
+{9300000, DIF_BPF_COEFF1213, 0xfe0dff92},
+{9300000, DIF_BPF_COEFF1415, 0x028102f7},
+{9300000, DIF_BPF_COEFF1617, 0xff36fb37},
+{9300000, DIF_BPF_COEFF1819, 0xfcbf035e},
+{9300000, DIF_BPF_COEFF2021, 0x07260202},
+{9300000, DIF_BPF_COEFF2223, 0xf8e8f778},
+{9300000, DIF_BPF_COEFF2425, 0x01340b0d},
+{9300000, DIF_BPF_COEFF2627, 0x07e1f9f4},
+{9300000, DIF_BPF_COEFF2829, 0xf223fb51},
+{9300000, DIF_BPF_COEFF3031, 0x0b590e42},
+{9300000, DIF_BPF_COEFF3233, 0xff64f083},
+{9300000, DIF_BPF_COEFF3435, 0xf45206a7},
+{9300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 93_quant.dat*/
+
+
+/*case 9400000:*/
+/* BEGIN - DIF BPF register values from 94_quant.dat*/
+{9400000, DIF_BPF_COEFF01, 0x0000fffd},
+{9400000, DIF_BPF_COEFF23, 0xfff90005},
+{9400000, DIF_BPF_COEFF45, 0x0022001a},
+{9400000, DIF_BPF_COEFF67, 0xffc9ff86},
+{9400000, DIF_BPF_COEFF89, 0xfff000d7},
+{9400000, DIF_BPF_COEFF1011, 0x00f2ff82},
+{9400000, DIF_BPF_COEFF1213, 0xfe01fee5},
+{9400000, DIF_BPF_COEFF1415, 0x01f60362},
+{9400000, DIF_BPF_COEFF1617, 0x0044fb99},
+{9400000, DIF_BPF_COEFF1819, 0xfbcc0222},
+{9400000, DIF_BPF_COEFF2021, 0x07380370},
+{9400000, DIF_BPF_COEFF2223, 0xf9f7f6cc},
+{9400000, DIF_BPF_COEFF2425, 0xff990a7e},
+{9400000, DIF_BPF_COEFF2627, 0x0902fb50},
+{9400000, DIF_BPF_COEFF2829, 0xf21afa1f},
+{9400000, DIF_BPF_COEFF3031, 0x0a8d0ea6},
+{9400000, DIF_BPF_COEFF3233, 0x0034f0bf},
+{9400000, DIF_BPF_COEFF3435, 0xf4050675},
+{9400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 94_quant.dat*/
+
+
+/*case 9500000:*/
+/* BEGIN - DIF BPF register values from 95_quant.dat*/
+{9500000, DIF_BPF_COEFF01, 0x0000fffe},
+{9500000, DIF_BPF_COEFF23, 0xfff8fffe},
+{9500000, DIF_BPF_COEFF45, 0x001e002b},
+{9500000, DIF_BPF_COEFF67, 0xffe5ff81},
+{9500000, DIF_BPF_COEFF89, 0xffb400a5},
+{9500000, DIF_BPF_COEFF1011, 0x01280000},
+{9500000, DIF_BPF_COEFF1213, 0xfe24fe50},
+{9500000, DIF_BPF_COEFF1415, 0x01460390},
+{9500000, DIF_BPF_COEFF1617, 0x014dfc3a},
+{9500000, DIF_BPF_COEFF1819, 0xfb1000ce},
+{9500000, DIF_BPF_COEFF2021, 0x070104bf},
+{9500000, DIF_BPF_COEFF2223, 0xfb37f65f},
+{9500000, DIF_BPF_COEFF2425, 0xfe0009bc},
+{9500000, DIF_BPF_COEFF2627, 0x0a00fcbb},
+{9500000, DIF_BPF_COEFF2829, 0xf235f8f8},
+{9500000, DIF_BPF_COEFF3031, 0x09b20efc},
+{9500000, DIF_BPF_COEFF3233, 0x0105f101},
+{9500000, DIF_BPF_COEFF3435, 0xf3ba0642},
+{9500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 95_quant.dat*/
+
+
+/*case 9600000:*/
+/* BEGIN - DIF BPF register values from 96_quant.dat*/
+{9600000, DIF_BPF_COEFF01, 0x0001ffff},
+{9600000, DIF_BPF_COEFF23, 0xfff8fff7},
+{9600000, DIF_BPF_COEFF45, 0x00150036},
+{9600000, DIF_BPF_COEFF67, 0x0005ff8c},
+{9600000, DIF_BPF_COEFF89, 0xff810061},
+{9600000, DIF_BPF_COEFF1011, 0x013d007e},
+{9600000, DIF_BPF_COEFF1213, 0xfe71fddf},
+{9600000, DIF_BPF_COEFF1415, 0x007c0380},
+{9600000, DIF_BPF_COEFF1617, 0x0241fd13},
+{9600000, DIF_BPF_COEFF1819, 0xfa94ff70},
+{9600000, DIF_BPF_COEFF2021, 0x068005e2},
+{9600000, DIF_BPF_COEFF2223, 0xfc9bf633},
+{9600000, DIF_BPF_COEFF2425, 0xfc7308ca},
+{9600000, DIF_BPF_COEFF2627, 0x0ad5fe30},
+{9600000, DIF_BPF_COEFF2829, 0xf274f7e0},
+{9600000, DIF_BPF_COEFF3031, 0x08c90f43},
+{9600000, DIF_BPF_COEFF3233, 0x01d4f147},
+{9600000, DIF_BPF_COEFF3435, 0xf371060f},
+{9600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 96_quant.dat*/
+
+
+/*case 9700000:*/
+/* BEGIN - DIF BPF register values from 97_quant.dat*/
+{9700000, DIF_BPF_COEFF01, 0x00010001},
+{9700000, DIF_BPF_COEFF23, 0xfff9fff1},
+{9700000, DIF_BPF_COEFF45, 0x00090038},
+{9700000, DIF_BPF_COEFF67, 0x0025ffa7},
+{9700000, DIF_BPF_COEFF89, 0xff5e0012},
+{9700000, DIF_BPF_COEFF1011, 0x013200f0},
+{9700000, DIF_BPF_COEFF1213, 0xfee3fd9b},
+{9700000, DIF_BPF_COEFF1415, 0xffaa0331},
+{9700000, DIF_BPF_COEFF1617, 0x0311fe15},
+{9700000, DIF_BPF_COEFF1819, 0xfa60fe18},
+{9700000, DIF_BPF_COEFF2021, 0x05bd06d1},
+{9700000, DIF_BPF_COEFF2223, 0xfe1bf64a},
+{9700000, DIF_BPF_COEFF2425, 0xfafa07ae},
+{9700000, DIF_BPF_COEFF2627, 0x0b7effab},
+{9700000, DIF_BPF_COEFF2829, 0xf2d5f6d7},
+{9700000, DIF_BPF_COEFF3031, 0x07d30f7a},
+{9700000, DIF_BPF_COEFF3233, 0x02a3f194},
+{9700000, DIF_BPF_COEFF3435, 0xf32905dc},
+{9700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 97_quant.dat*/
+
+
+/*case 9800000:*/
+/* BEGIN - DIF BPF register values from 98_quant.dat*/
+{9800000, DIF_BPF_COEFF01, 0x00010002},
+{9800000, DIF_BPF_COEFF23, 0xfffcffee},
+{9800000, DIF_BPF_COEFF45, 0xfffb0032},
+{9800000, DIF_BPF_COEFF67, 0x003fffcd},
+{9800000, DIF_BPF_COEFF89, 0xff4effc1},
+{9800000, DIF_BPF_COEFF1011, 0x0106014a},
+{9800000, DIF_BPF_COEFF1213, 0xff6efd8a},
+{9800000, DIF_BPF_COEFF1415, 0xfedd02aa},
+{9800000, DIF_BPF_COEFF1617, 0x03b0ff34},
+{9800000, DIF_BPF_COEFF1819, 0xfa74fcd7},
+{9800000, DIF_BPF_COEFF2021, 0x04bf0781},
+{9800000, DIF_BPF_COEFF2223, 0xffaaf6a3},
+{9800000, DIF_BPF_COEFF2425, 0xf99e066b},
+{9800000, DIF_BPF_COEFF2627, 0x0bf90128},
+{9800000, DIF_BPF_COEFF2829, 0xf359f5e1},
+{9800000, DIF_BPF_COEFF3031, 0x06d20fa2},
+{9800000, DIF_BPF_COEFF3233, 0x0370f1e5},
+{9800000, DIF_BPF_COEFF3435, 0xf2e405a8},
+{9800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 98_quant.dat*/
+
+
+/*case 9900000:*/
+/* BEGIN - DIF BPF register values from 99_quant.dat*/
+{9900000, DIF_BPF_COEFF01, 0x00000003},
+{9900000, DIF_BPF_COEFF23, 0xffffffee},
+{9900000, DIF_BPF_COEFF45, 0xffef0024},
+{9900000, DIF_BPF_COEFF67, 0x0051fffa},
+{9900000, DIF_BPF_COEFF89, 0xff54ff77},
+{9900000, DIF_BPF_COEFF1011, 0x00be0184},
+{9900000, DIF_BPF_COEFF1213, 0x0006fdad},
+{9900000, DIF_BPF_COEFF1415, 0xfe2701f3},
+{9900000, DIF_BPF_COEFF1617, 0x0413005e},
+{9900000, DIF_BPF_COEFF1819, 0xfad1fbba},
+{9900000, DIF_BPF_COEFF2021, 0x039007ee},
+{9900000, DIF_BPF_COEFF2223, 0x013bf73d},
+{9900000, DIF_BPF_COEFF2425, 0xf868050a},
+{9900000, DIF_BPF_COEFF2627, 0x0c4302a1},
+{9900000, DIF_BPF_COEFF2829, 0xf3fdf4fe},
+{9900000, DIF_BPF_COEFF3031, 0x05c70fba},
+{9900000, DIF_BPF_COEFF3233, 0x043bf23c},
+{9900000, DIF_BPF_COEFF3435, 0xf2a10575},
+{9900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 99_quant.dat*/
+
+
+/*case 10000000:*/
+/* BEGIN - DIF BPF register values from 100_quant.dat*/
+{10000000, DIF_BPF_COEFF01, 0x00000003},
+{10000000, DIF_BPF_COEFF23, 0x0003fff1},
+{10000000, DIF_BPF_COEFF45, 0xffe50011},
+{10000000, DIF_BPF_COEFF67, 0x00570027},
+{10000000, DIF_BPF_COEFF89, 0xff70ff3c},
+{10000000, DIF_BPF_COEFF1011, 0x00620198},
+{10000000, DIF_BPF_COEFF1213, 0x009efe01},
+{10000000, DIF_BPF_COEFF1415, 0xfd95011a},
+{10000000, DIF_BPF_COEFF1617, 0x04350183},
+{10000000, DIF_BPF_COEFF1819, 0xfb71fad0},
+{10000000, DIF_BPF_COEFF2021, 0x023c0812},
+{10000000, DIF_BPF_COEFF2223, 0x02c3f811},
+{10000000, DIF_BPF_COEFF2425, 0xf75e0390},
+{10000000, DIF_BPF_COEFF2627, 0x0c5c0411},
+{10000000, DIF_BPF_COEFF2829, 0xf4c1f432},
+{10000000, DIF_BPF_COEFF3031, 0x04b30fc1},
+{10000000, DIF_BPF_COEFF3233, 0x0503f297},
+{10000000, DIF_BPF_COEFF3435, 0xf2610541},
+{10000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 100_quant.dat*/
+
+
+/*case 10100000:*/
+/* BEGIN - DIF BPF register values from 101_quant.dat*/
+{10100000, DIF_BPF_COEFF01, 0x00000003},
+{10100000, DIF_BPF_COEFF23, 0x0006fff7},
+{10100000, DIF_BPF_COEFF45, 0xffdffffc},
+{10100000, DIF_BPF_COEFF67, 0x00510050},
+{10100000, DIF_BPF_COEFF89, 0xff9dff18},
+{10100000, DIF_BPF_COEFF1011, 0xfffc0184},
+{10100000, DIF_BPF_COEFF1213, 0x0128fe80},
+{10100000, DIF_BPF_COEFF1415, 0xfd32002e},
+{10100000, DIF_BPF_COEFF1617, 0x04130292},
+{10100000, DIF_BPF_COEFF1819, 0xfc4dfa21},
+{10100000, DIF_BPF_COEFF2021, 0x00d107ee},
+{10100000, DIF_BPF_COEFF2223, 0x0435f91c},
+{10100000, DIF_BPF_COEFF2425, 0xf6850205},
+{10100000, DIF_BPF_COEFF2627, 0x0c430573},
+{10100000, DIF_BPF_COEFF2829, 0xf5a1f37d},
+{10100000, DIF_BPF_COEFF3031, 0x03990fba},
+{10100000, DIF_BPF_COEFF3233, 0x05c7f2f8},
+{10100000, DIF_BPF_COEFF3435, 0xf222050d},
+{10100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 101_quant.dat*/
+
+
+/*case 10200000:*/
+/* BEGIN - DIF BPF register values from 102_quant.dat*/
+{10200000, DIF_BPF_COEFF01, 0x00000002},
+{10200000, DIF_BPF_COEFF23, 0x0008fffe},
+{10200000, DIF_BPF_COEFF45, 0xffdfffe7},
+{10200000, DIF_BPF_COEFF67, 0x003f006e},
+{10200000, DIF_BPF_COEFF89, 0xffd6ff0f},
+{10200000, DIF_BPF_COEFF1011, 0xff96014a},
+{10200000, DIF_BPF_COEFF1213, 0x0197ff1f},
+{10200000, DIF_BPF_COEFF1415, 0xfd05ff3e},
+{10200000, DIF_BPF_COEFF1617, 0x03b0037c},
+{10200000, DIF_BPF_COEFF1819, 0xfd59f9b7},
+{10200000, DIF_BPF_COEFF2021, 0xff5d0781},
+{10200000, DIF_BPF_COEFF2223, 0x0585fa56},
+{10200000, DIF_BPF_COEFF2425, 0xf5e4006f},
+{10200000, DIF_BPF_COEFF2627, 0x0bf906c4},
+{10200000, DIF_BPF_COEFF2829, 0xf69df2e0},
+{10200000, DIF_BPF_COEFF3031, 0x02790fa2},
+{10200000, DIF_BPF_COEFF3233, 0x0688f35d},
+{10200000, DIF_BPF_COEFF3435, 0xf1e604d8},
+{10200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 102_quant.dat*/
+
+
+/*case 10300000:*/
+/* BEGIN - DIF BPF register values from 103_quant.dat*/
+{10300000, DIF_BPF_COEFF01, 0xffff0001},
+{10300000, DIF_BPF_COEFF23, 0x00090005},
+{10300000, DIF_BPF_COEFF45, 0xffe4ffd6},
+{10300000, DIF_BPF_COEFF67, 0x0025007e},
+{10300000, DIF_BPF_COEFF89, 0x0014ff20},
+{10300000, DIF_BPF_COEFF1011, 0xff3c00f0},
+{10300000, DIF_BPF_COEFF1213, 0x01e1ffd0},
+{10300000, DIF_BPF_COEFF1415, 0xfd12fe5c},
+{10300000, DIF_BPF_COEFF1617, 0x03110433},
+{10300000, DIF_BPF_COEFF1819, 0xfe88f996},
+{10300000, DIF_BPF_COEFF2021, 0xfdf106d1},
+{10300000, DIF_BPF_COEFF2223, 0x06aafbb7},
+{10300000, DIF_BPF_COEFF2425, 0xf57efed8},
+{10300000, DIF_BPF_COEFF2627, 0x0b7e07ff},
+{10300000, DIF_BPF_COEFF2829, 0xf7b0f25e},
+{10300000, DIF_BPF_COEFF3031, 0x01560f7a},
+{10300000, DIF_BPF_COEFF3233, 0x0745f3c7},
+{10300000, DIF_BPF_COEFF3435, 0xf1ac04a4},
+{10300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 103_quant.dat*/
+
+
+/*case 10400000:*/
+/* BEGIN - DIF BPF register values from 104_quant.dat*/
+{10400000, DIF_BPF_COEFF01, 0xffffffff},
+{10400000, DIF_BPF_COEFF23, 0x0008000c},
+{10400000, DIF_BPF_COEFF45, 0xffedffcb},
+{10400000, DIF_BPF_COEFF67, 0x0005007d},
+{10400000, DIF_BPF_COEFF89, 0x0050ff4c},
+{10400000, DIF_BPF_COEFF1011, 0xfef6007e},
+{10400000, DIF_BPF_COEFF1213, 0x01ff0086},
+{10400000, DIF_BPF_COEFF1415, 0xfd58fd97},
+{10400000, DIF_BPF_COEFF1617, 0x024104ad},
+{10400000, DIF_BPF_COEFF1819, 0xffcaf9c0},
+{10400000, DIF_BPF_COEFF2021, 0xfc9905e2},
+{10400000, DIF_BPF_COEFF2223, 0x079afd35},
+{10400000, DIF_BPF_COEFF2425, 0xf555fd46},
+{10400000, DIF_BPF_COEFF2627, 0x0ad50920},
+{10400000, DIF_BPF_COEFF2829, 0xf8d9f1f6},
+{10400000, DIF_BPF_COEFF3031, 0x00310f43},
+{10400000, DIF_BPF_COEFF3233, 0x07fdf435},
+{10400000, DIF_BPF_COEFF3435, 0xf174046f},
+{10400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 104_quant.dat*/
+
+
+/*case 10500000:*/
+/* BEGIN - DIF BPF register values from 105_quant.dat*/
+{10500000, DIF_BPF_COEFF01, 0xfffffffe},
+{10500000, DIF_BPF_COEFF23, 0x00050011},
+{10500000, DIF_BPF_COEFF45, 0xfffaffc8},
+{10500000, DIF_BPF_COEFF67, 0xffe5006b},
+{10500000, DIF_BPF_COEFF89, 0x0082ff8c},
+{10500000, DIF_BPF_COEFF1011, 0xfecc0000},
+{10500000, DIF_BPF_COEFF1213, 0x01f00130},
+{10500000, DIF_BPF_COEFF1415, 0xfdd2fcfc},
+{10500000, DIF_BPF_COEFF1617, 0x014d04e3},
+{10500000, DIF_BPF_COEFF1819, 0x010efa32},
+{10500000, DIF_BPF_COEFF2021, 0xfb6404bf},
+{10500000, DIF_BPF_COEFF2223, 0x084efec5},
+{10500000, DIF_BPF_COEFF2425, 0xf569fbc2},
+{10500000, DIF_BPF_COEFF2627, 0x0a000a23},
+{10500000, DIF_BPF_COEFF2829, 0xfa15f1ab},
+{10500000, DIF_BPF_COEFF3031, 0xff0b0efc},
+{10500000, DIF_BPF_COEFF3233, 0x08b0f4a7},
+{10500000, DIF_BPF_COEFF3435, 0xf13f043a},
+{10500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 105_quant.dat*/
+
+
+/*case 10600000:*/
+/* BEGIN - DIF BPF register values from 106_quant.dat*/
+{10600000, DIF_BPF_COEFF01, 0x0000fffd},
+{10600000, DIF_BPF_COEFF23, 0x00020012},
+{10600000, DIF_BPF_COEFF45, 0x0007ffcd},
+{10600000, DIF_BPF_COEFF67, 0xffc9004c},
+{10600000, DIF_BPF_COEFF89, 0x00a4ffd9},
+{10600000, DIF_BPF_COEFF1011, 0xfec3ff82},
+{10600000, DIF_BPF_COEFF1213, 0x01b401c1},
+{10600000, DIF_BPF_COEFF1415, 0xfe76fc97},
+{10600000, DIF_BPF_COEFF1617, 0x004404d2},
+{10600000, DIF_BPF_COEFF1819, 0x0245fae8},
+{10600000, DIF_BPF_COEFF2021, 0xfa5f0370},
+{10600000, DIF_BPF_COEFF2223, 0x08c1005f},
+{10600000, DIF_BPF_COEFF2425, 0xf5bcfa52},
+{10600000, DIF_BPF_COEFF2627, 0x09020b04},
+{10600000, DIF_BPF_COEFF2829, 0xfb60f17b},
+{10600000, DIF_BPF_COEFF3031, 0xfde70ea6},
+{10600000, DIF_BPF_COEFF3233, 0x095df51e},
+{10600000, DIF_BPF_COEFF3435, 0xf10c0405},
+{10600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 106_quant.dat*/
+
+
+/*case 10700000:*/
+/* BEGIN - DIF BPF register values from 107_quant.dat*/
+{10700000, DIF_BPF_COEFF01, 0x0000fffd},
+{10700000, DIF_BPF_COEFF23, 0xffff0011},
+{10700000, DIF_BPF_COEFF45, 0x0014ffdb},
+{10700000, DIF_BPF_COEFF67, 0xffb40023},
+{10700000, DIF_BPF_COEFF89, 0x00b2002a},
+{10700000, DIF_BPF_COEFF1011, 0xfedbff10},
+{10700000, DIF_BPF_COEFF1213, 0x0150022d},
+{10700000, DIF_BPF_COEFF1415, 0xff38fc6f},
+{10700000, DIF_BPF_COEFF1617, 0xff36047b},
+{10700000, DIF_BPF_COEFF1819, 0x035efbda},
+{10700000, DIF_BPF_COEFF2021, 0xf9940202},
+{10700000, DIF_BPF_COEFF2223, 0x08ee01f5},
+{10700000, DIF_BPF_COEFF2425, 0xf649f8fe},
+{10700000, DIF_BPF_COEFF2627, 0x07e10bc2},
+{10700000, DIF_BPF_COEFF2829, 0xfcb6f169},
+{10700000, DIF_BPF_COEFF3031, 0xfcc60e42},
+{10700000, DIF_BPF_COEFF3233, 0x0a04f599},
+{10700000, DIF_BPF_COEFF3435, 0xf0db03d0},
+{10700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 107_quant.dat*/
+
+
+/*case 10800000:*/
+/* BEGIN - DIF BPF register values from 108_quant.dat*/
+{10800000, DIF_BPF_COEFF01, 0x0000fffd},
+{10800000, DIF_BPF_COEFF23, 0xfffb000d},
+{10800000, DIF_BPF_COEFF45, 0x001dffed},
+{10800000, DIF_BPF_COEFF67, 0xffaafff5},
+{10800000, DIF_BPF_COEFF89, 0x00aa0077},
+{10800000, DIF_BPF_COEFF1011, 0xff13feb6},
+{10800000, DIF_BPF_COEFF1213, 0x00ce026b},
+{10800000, DIF_BPF_COEFF1415, 0x000afc85},
+{10800000, DIF_BPF_COEFF1617, 0xfe3503e3},
+{10800000, DIF_BPF_COEFF1819, 0x044cfcfb},
+{10800000, DIF_BPF_COEFF2021, 0xf90c0082},
+{10800000, DIF_BPF_COEFF2223, 0x08d5037f},
+{10800000, DIF_BPF_COEFF2425, 0xf710f7cc},
+{10800000, DIF_BPF_COEFF2627, 0x069f0c59},
+{10800000, DIF_BPF_COEFF2829, 0xfe16f173},
+{10800000, DIF_BPF_COEFF3031, 0xfbaa0dcf},
+{10800000, DIF_BPF_COEFF3233, 0x0aa5f617},
+{10800000, DIF_BPF_COEFF3435, 0xf0ad039b},
+{10800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 108_quant.dat*/
+
+
+/*case 10900000:*/
+/* BEGIN - DIF BPF register values from 109_quant.dat*/
+{10900000, DIF_BPF_COEFF01, 0x0000fffe},
+{10900000, DIF_BPF_COEFF23, 0xfff90006},
+{10900000, DIF_BPF_COEFF45, 0x00210003},
+{10900000, DIF_BPF_COEFF67, 0xffacffc8},
+{10900000, DIF_BPF_COEFF89, 0x008e00b6},
+{10900000, DIF_BPF_COEFF1011, 0xff63fe7c},
+{10900000, DIF_BPF_COEFF1213, 0x003a0275},
+{10900000, DIF_BPF_COEFF1415, 0x00dafcda},
+{10900000, DIF_BPF_COEFF1617, 0xfd510313},
+{10900000, DIF_BPF_COEFF1819, 0x0501fe40},
+{10900000, DIF_BPF_COEFF2021, 0xf8cbfefd},
+{10900000, DIF_BPF_COEFF2223, 0x087604f0},
+{10900000, DIF_BPF_COEFF2425, 0xf80af6c2},
+{10900000, DIF_BPF_COEFF2627, 0x05430cc8},
+{10900000, DIF_BPF_COEFF2829, 0xff7af19a},
+{10900000, DIF_BPF_COEFF3031, 0xfa940d4e},
+{10900000, DIF_BPF_COEFF3233, 0x0b3ff699},
+{10900000, DIF_BPF_COEFF3435, 0xf0810365},
+{10900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 109_quant.dat*/
+
+
+/*case 11000000:*/
+/* BEGIN - DIF BPF register values from 110_quant.dat*/
+{11000000, DIF_BPF_COEFF01, 0x0001ffff},
+{11000000, DIF_BPF_COEFF23, 0xfff8ffff},
+{11000000, DIF_BPF_COEFF45, 0x00210018},
+{11000000, DIF_BPF_COEFF67, 0xffbaffa3},
+{11000000, DIF_BPF_COEFF89, 0x006000e1},
+{11000000, DIF_BPF_COEFF1011, 0xffc4fe68},
+{11000000, DIF_BPF_COEFF1213, 0xffa0024b},
+{11000000, DIF_BPF_COEFF1415, 0x019afd66},
+{11000000, DIF_BPF_COEFF1617, 0xfc990216},
+{11000000, DIF_BPF_COEFF1819, 0x0575ff99},
+{11000000, DIF_BPF_COEFF2021, 0xf8d4fd81},
+{11000000, DIF_BPF_COEFF2223, 0x07d40640},
+{11000000, DIF_BPF_COEFF2425, 0xf932f5e6},
+{11000000, DIF_BPF_COEFF2627, 0x03d20d0d},
+{11000000, DIF_BPF_COEFF2829, 0x00dff1de},
+{11000000, DIF_BPF_COEFF3031, 0xf9860cbf},
+{11000000, DIF_BPF_COEFF3233, 0x0bd1f71e},
+{11000000, DIF_BPF_COEFF3435, 0xf058032f},
+{11000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 110_quant.dat*/
+
+
+/*case 11100000:*/
+/* BEGIN - DIF BPF register values from 111_quant.dat*/
+{11100000, DIF_BPF_COEFF01, 0x00010000},
+{11100000, DIF_BPF_COEFF23, 0xfff8fff8},
+{11100000, DIF_BPF_COEFF45, 0x001b0029},
+{11100000, DIF_BPF_COEFF67, 0xffd1ff8a},
+{11100000, DIF_BPF_COEFF89, 0x002600f2},
+{11100000, DIF_BPF_COEFF1011, 0x002cfe7c},
+{11100000, DIF_BPF_COEFF1213, 0xff0f01f0},
+{11100000, DIF_BPF_COEFF1415, 0x023bfe20},
+{11100000, DIF_BPF_COEFF1617, 0xfc1700fa},
+{11100000, DIF_BPF_COEFF1819, 0x05a200f7},
+{11100000, DIF_BPF_COEFF2021, 0xf927fc1c},
+{11100000, DIF_BPF_COEFF2223, 0x06f40765},
+{11100000, DIF_BPF_COEFF2425, 0xfa82f53b},
+{11100000, DIF_BPF_COEFF2627, 0x02510d27},
+{11100000, DIF_BPF_COEFF2829, 0x0243f23d},
+{11100000, DIF_BPF_COEFF3031, 0xf8810c24},
+{11100000, DIF_BPF_COEFF3233, 0x0c5cf7a7},
+{11100000, DIF_BPF_COEFF3435, 0xf03102fa},
+{11100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 111_quant.dat*/
+
+
+/*case 11200000:*/
+/* BEGIN - DIF BPF register values from 112_quant.dat*/
+{11200000, DIF_BPF_COEFF01, 0x00010002},
+{11200000, DIF_BPF_COEFF23, 0xfffafff2},
+{11200000, DIF_BPF_COEFF45, 0x00110035},
+{11200000, DIF_BPF_COEFF67, 0xfff0ff81},
+{11200000, DIF_BPF_COEFF89, 0xffe700e7},
+{11200000, DIF_BPF_COEFF1011, 0x008ffeb6},
+{11200000, DIF_BPF_COEFF1213, 0xfe94016d},
+{11200000, DIF_BPF_COEFF1415, 0x02b0fefb},
+{11200000, DIF_BPF_COEFF1617, 0xfbd3ffd1},
+{11200000, DIF_BPF_COEFF1819, 0x05850249},
+{11200000, DIF_BPF_COEFF2021, 0xf9c1fadb},
+{11200000, DIF_BPF_COEFF2223, 0x05de0858},
+{11200000, DIF_BPF_COEFF2425, 0xfbf2f4c4},
+{11200000, DIF_BPF_COEFF2627, 0x00c70d17},
+{11200000, DIF_BPF_COEFF2829, 0x03a0f2b8},
+{11200000, DIF_BPF_COEFF3031, 0xf7870b7c},
+{11200000, DIF_BPF_COEFF3233, 0x0cdff833},
+{11200000, DIF_BPF_COEFF3435, 0xf00d02c4},
+{11200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 112_quant.dat*/
+
+
+/*case 11300000:*/
+/* BEGIN - DIF BPF register values from 113_quant.dat*/
+{11300000, DIF_BPF_COEFF01, 0x00000003},
+{11300000, DIF_BPF_COEFF23, 0xfffdffee},
+{11300000, DIF_BPF_COEFF45, 0x00040038},
+{11300000, DIF_BPF_COEFF67, 0x0010ff88},
+{11300000, DIF_BPF_COEFF89, 0xffac00c2},
+{11300000, DIF_BPF_COEFF1011, 0x00e2ff10},
+{11300000, DIF_BPF_COEFF1213, 0xfe3900cb},
+{11300000, DIF_BPF_COEFF1415, 0x02f1ffe9},
+{11300000, DIF_BPF_COEFF1617, 0xfbd3feaa},
+{11300000, DIF_BPF_COEFF1819, 0x05210381},
+{11300000, DIF_BPF_COEFF2021, 0xfa9cf9c8},
+{11300000, DIF_BPF_COEFF2223, 0x04990912},
+{11300000, DIF_BPF_COEFF2425, 0xfd7af484},
+{11300000, DIF_BPF_COEFF2627, 0xff390cdb},
+{11300000, DIF_BPF_COEFF2829, 0x04f4f34d},
+{11300000, DIF_BPF_COEFF3031, 0xf69a0ac9},
+{11300000, DIF_BPF_COEFF3233, 0x0d5af8c1},
+{11300000, DIF_BPF_COEFF3435, 0xefec028e},
+{11300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 113_quant.dat*/
+
+
+/*case 11400000:*/
+/* BEGIN - DIF BPF register values from 114_quant.dat*/
+{11400000, DIF_BPF_COEFF01, 0x00000003},
+{11400000, DIF_BPF_COEFF23, 0x0000ffee},
+{11400000, DIF_BPF_COEFF45, 0xfff60033},
+{11400000, DIF_BPF_COEFF67, 0x002fff9f},
+{11400000, DIF_BPF_COEFF89, 0xff7b0087},
+{11400000, DIF_BPF_COEFF1011, 0x011eff82},
+{11400000, DIF_BPF_COEFF1213, 0xfe080018},
+{11400000, DIF_BPF_COEFF1415, 0x02f900d8},
+{11400000, DIF_BPF_COEFF1617, 0xfc17fd96},
+{11400000, DIF_BPF_COEFF1819, 0x04790490},
+{11400000, DIF_BPF_COEFF2021, 0xfbadf8ed},
+{11400000, DIF_BPF_COEFF2223, 0x032f098e},
+{11400000, DIF_BPF_COEFF2425, 0xff10f47d},
+{11400000, DIF_BPF_COEFF2627, 0xfdaf0c75},
+{11400000, DIF_BPF_COEFF2829, 0x063cf3fc},
+{11400000, DIF_BPF_COEFF3031, 0xf5ba0a0b},
+{11400000, DIF_BPF_COEFF3233, 0x0dccf952},
+{11400000, DIF_BPF_COEFF3435, 0xefcd0258},
+{11400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 114_quant.dat*/
+
+
+/*case 11500000:*/
+/* BEGIN - DIF BPF register values from 115_quant.dat*/
+{11500000, DIF_BPF_COEFF01, 0x00000003},
+{11500000, DIF_BPF_COEFF23, 0x0004fff1},
+{11500000, DIF_BPF_COEFF45, 0xffea0026},
+{11500000, DIF_BPF_COEFF67, 0x0046ffc3},
+{11500000, DIF_BPF_COEFF89, 0xff5a003c},
+{11500000, DIF_BPF_COEFF1011, 0x013b0000},
+{11500000, DIF_BPF_COEFF1213, 0xfe04ff63},
+{11500000, DIF_BPF_COEFF1415, 0x02c801b8},
+{11500000, DIF_BPF_COEFF1617, 0xfc99fca6},
+{11500000, DIF_BPF_COEFF1819, 0x0397056a},
+{11500000, DIF_BPF_COEFF2021, 0xfcecf853},
+{11500000, DIF_BPF_COEFF2223, 0x01ad09c9},
+{11500000, DIF_BPF_COEFF2425, 0x00acf4ad},
+{11500000, DIF_BPF_COEFF2627, 0xfc2e0be7},
+{11500000, DIF_BPF_COEFF2829, 0x0773f4c2},
+{11500000, DIF_BPF_COEFF3031, 0xf4e90943},
+{11500000, DIF_BPF_COEFF3233, 0x0e35f9e6},
+{11500000, DIF_BPF_COEFF3435, 0xefb10221},
+{11500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 115_quant.dat*/
+
+
+/*case 11600000:*/
+/* BEGIN - DIF BPF register values from 116_quant.dat*/
+{11600000, DIF_BPF_COEFF01, 0x00000002},
+{11600000, DIF_BPF_COEFF23, 0x0007fff6},
+{11600000, DIF_BPF_COEFF45, 0xffe20014},
+{11600000, DIF_BPF_COEFF67, 0x0054ffee},
+{11600000, DIF_BPF_COEFF89, 0xff4effeb},
+{11600000, DIF_BPF_COEFF1011, 0x0137007e},
+{11600000, DIF_BPF_COEFF1213, 0xfe2efebb},
+{11600000, DIF_BPF_COEFF1415, 0x0260027a},
+{11600000, DIF_BPF_COEFF1617, 0xfd51fbe6},
+{11600000, DIF_BPF_COEFF1819, 0x02870605},
+{11600000, DIF_BPF_COEFF2021, 0xfe4af7fe},
+{11600000, DIF_BPF_COEFF2223, 0x001d09c1},
+{11600000, DIF_BPF_COEFF2425, 0x0243f515},
+{11600000, DIF_BPF_COEFF2627, 0xfabd0b32},
+{11600000, DIF_BPF_COEFF2829, 0x0897f59e},
+{11600000, DIF_BPF_COEFF3031, 0xf4280871},
+{11600000, DIF_BPF_COEFF3233, 0x0e95fa7c},
+{11600000, DIF_BPF_COEFF3435, 0xef9701eb},
+{11600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 116_quant.dat*/
+
+
+/*case 11700000:*/
+/* BEGIN - DIF BPF register values from 117_quant.dat*/
+{11700000, DIF_BPF_COEFF01, 0xffff0001},
+{11700000, DIF_BPF_COEFF23, 0x0008fffd},
+{11700000, DIF_BPF_COEFF45, 0xffdeffff},
+{11700000, DIF_BPF_COEFF67, 0x0056001d},
+{11700000, DIF_BPF_COEFF89, 0xff57ff9c},
+{11700000, DIF_BPF_COEFF1011, 0x011300f0},
+{11700000, DIF_BPF_COEFF1213, 0xfe82fe2e},
+{11700000, DIF_BPF_COEFF1415, 0x01ca0310},
+{11700000, DIF_BPF_COEFF1617, 0xfe35fb62},
+{11700000, DIF_BPF_COEFF1819, 0x0155065a},
+{11700000, DIF_BPF_COEFF2021, 0xffbaf7f2},
+{11700000, DIF_BPF_COEFF2223, 0xfe8c0977},
+{11700000, DIF_BPF_COEFF2425, 0x03cef5b2},
+{11700000, DIF_BPF_COEFF2627, 0xf9610a58},
+{11700000, DIF_BPF_COEFF2829, 0x09a5f68f},
+{11700000, DIF_BPF_COEFF3031, 0xf3790797},
+{11700000, DIF_BPF_COEFF3233, 0x0eebfb14},
+{11700000, DIF_BPF_COEFF3435, 0xef8001b5},
+{11700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 117_quant.dat*/
+
+
+/*case 11800000:*/
+/* BEGIN - DIF BPF register values from 118_quant.dat*/
+{11800000, DIF_BPF_COEFF01, 0xffff0000},
+{11800000, DIF_BPF_COEFF23, 0x00080004},
+{11800000, DIF_BPF_COEFF45, 0xffe0ffe9},
+{11800000, DIF_BPF_COEFF67, 0x004c0047},
+{11800000, DIF_BPF_COEFF89, 0xff75ff58},
+{11800000, DIF_BPF_COEFF1011, 0x00d1014a},
+{11800000, DIF_BPF_COEFF1213, 0xfef9fdc8},
+{11800000, DIF_BPF_COEFF1415, 0x0111036f},
+{11800000, DIF_BPF_COEFF1617, 0xff36fb21},
+{11800000, DIF_BPF_COEFF1819, 0x00120665},
+{11800000, DIF_BPF_COEFF2021, 0x012df82e},
+{11800000, DIF_BPF_COEFF2223, 0xfd0708ec},
+{11800000, DIF_BPF_COEFF2425, 0x0542f682},
+{11800000, DIF_BPF_COEFF2627, 0xf81f095c},
+{11800000, DIF_BPF_COEFF2829, 0x0a9af792},
+{11800000, DIF_BPF_COEFF3031, 0xf2db06b5},
+{11800000, DIF_BPF_COEFF3233, 0x0f38fbad},
+{11800000, DIF_BPF_COEFF3435, 0xef6c017e},
+{11800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 118_quant.dat*/
+
+
+/*case 11900000:*/
+/* BEGIN - DIF BPF register values from 119_quant.dat*/
+{11900000, DIF_BPF_COEFF01, 0xffffffff},
+{11900000, DIF_BPF_COEFF23, 0x0007000b},
+{11900000, DIF_BPF_COEFF45, 0xffe7ffd8},
+{11900000, DIF_BPF_COEFF67, 0x00370068},
+{11900000, DIF_BPF_COEFF89, 0xffa4ff28},
+{11900000, DIF_BPF_COEFF1011, 0x00790184},
+{11900000, DIF_BPF_COEFF1213, 0xff87fd91},
+{11900000, DIF_BPF_COEFF1415, 0x00430392},
+{11900000, DIF_BPF_COEFF1617, 0x0044fb26},
+{11900000, DIF_BPF_COEFF1819, 0xfece0626},
+{11900000, DIF_BPF_COEFF2021, 0x0294f8b2},
+{11900000, DIF_BPF_COEFF2223, 0xfb990825},
+{11900000, DIF_BPF_COEFF2425, 0x0698f77f},
+{11900000, DIF_BPF_COEFF2627, 0xf6fe0842},
+{11900000, DIF_BPF_COEFF2829, 0x0b73f8a7},
+{11900000, DIF_BPF_COEFF3031, 0xf25105cd},
+{11900000, DIF_BPF_COEFF3233, 0x0f7bfc48},
+{11900000, DIF_BPF_COEFF3435, 0xef5a0148},
+{11900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 119_quant.dat*/
+
+
+/*case 12000000:*/
+/* BEGIN - DIF BPF register values from 120_quant.dat*/
+{12000000, DIF_BPF_COEFF01, 0x0000fffe},
+{12000000, DIF_BPF_COEFF23, 0x00050010},
+{12000000, DIF_BPF_COEFF45, 0xfff2ffcc},
+{12000000, DIF_BPF_COEFF67, 0x001b007b},
+{12000000, DIF_BPF_COEFF89, 0xffdfff10},
+{12000000, DIF_BPF_COEFF1011, 0x00140198},
+{12000000, DIF_BPF_COEFF1213, 0x0020fd8e},
+{12000000, DIF_BPF_COEFF1415, 0xff710375},
+{12000000, DIF_BPF_COEFF1617, 0x014dfb73},
+{12000000, DIF_BPF_COEFF1819, 0xfd9a059f},
+{12000000, DIF_BPF_COEFF2021, 0x03e0f978},
+{12000000, DIF_BPF_COEFF2223, 0xfa4e0726},
+{12000000, DIF_BPF_COEFF2425, 0x07c8f8a7},
+{12000000, DIF_BPF_COEFF2627, 0xf600070c},
+{12000000, DIF_BPF_COEFF2829, 0x0c2ff9c9},
+{12000000, DIF_BPF_COEFF3031, 0xf1db04de},
+{12000000, DIF_BPF_COEFF3233, 0x0fb4fce5},
+{12000000, DIF_BPF_COEFF3435, 0xef4b0111},
+{12000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 120_quant.dat*/
+
+
+/*case 12100000:*/
+/* BEGIN - DIF BPF register values from 121_quant.dat*/
+{12100000, DIF_BPF_COEFF01, 0x0000fffd},
+{12100000, DIF_BPF_COEFF23, 0x00010012},
+{12100000, DIF_BPF_COEFF45, 0xffffffc8},
+{12100000, DIF_BPF_COEFF67, 0xfffb007e},
+{12100000, DIF_BPF_COEFF89, 0x001dff14},
+{12100000, DIF_BPF_COEFF1011, 0xffad0184},
+{12100000, DIF_BPF_COEFF1213, 0x00b7fdbe},
+{12100000, DIF_BPF_COEFF1415, 0xfea9031b},
+{12100000, DIF_BPF_COEFF1617, 0x0241fc01},
+{12100000, DIF_BPF_COEFF1819, 0xfc8504d6},
+{12100000, DIF_BPF_COEFF2021, 0x0504fa79},
+{12100000, DIF_BPF_COEFF2223, 0xf93005f6},
+{12100000, DIF_BPF_COEFF2425, 0x08caf9f2},
+{12100000, DIF_BPF_COEFF2627, 0xf52b05c0},
+{12100000, DIF_BPF_COEFF2829, 0x0ccbfaf9},
+{12100000, DIF_BPF_COEFF3031, 0xf17903eb},
+{12100000, DIF_BPF_COEFF3233, 0x0fe3fd83},
+{12100000, DIF_BPF_COEFF3435, 0xef3f00db},
+{12100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 121_quant.dat*/
+
+
+/*case 12200000:*/
+/* BEGIN - DIF BPF register values from 122_quant.dat*/
+{12200000, DIF_BPF_COEFF01, 0x0000fffd},
+{12200000, DIF_BPF_COEFF23, 0xfffe0011},
+{12200000, DIF_BPF_COEFF45, 0x000cffcc},
+{12200000, DIF_BPF_COEFF67, 0xffdb0071},
+{12200000, DIF_BPF_COEFF89, 0x0058ff32},
+{12200000, DIF_BPF_COEFF1011, 0xff4f014a},
+{12200000, DIF_BPF_COEFF1213, 0x013cfe1f},
+{12200000, DIF_BPF_COEFF1415, 0xfdfb028a},
+{12200000, DIF_BPF_COEFF1617, 0x0311fcc9},
+{12200000, DIF_BPF_COEFF1819, 0xfb9d03d6},
+{12200000, DIF_BPF_COEFF2021, 0x05f4fbad},
+{12200000, DIF_BPF_COEFF2223, 0xf848049d},
+{12200000, DIF_BPF_COEFF2425, 0x0999fb5b},
+{12200000, DIF_BPF_COEFF2627, 0xf4820461},
+{12200000, DIF_BPF_COEFF2829, 0x0d46fc32},
+{12200000, DIF_BPF_COEFF3031, 0xf12d02f4},
+{12200000, DIF_BPF_COEFF3233, 0x1007fe21},
+{12200000, DIF_BPF_COEFF3435, 0xef3600a4},
+{12200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 122_quant.dat*/
+
+
+/*case 12300000:*/
+/* BEGIN - DIF BPF register values from 123_quant.dat*/
+{12300000, DIF_BPF_COEFF01, 0x0000fffe},
+{12300000, DIF_BPF_COEFF23, 0xfffa000e},
+{12300000, DIF_BPF_COEFF45, 0x0017ffd9},
+{12300000, DIF_BPF_COEFF67, 0xffc10055},
+{12300000, DIF_BPF_COEFF89, 0x0088ff68},
+{12300000, DIF_BPF_COEFF1011, 0xff0400f0},
+{12300000, DIF_BPF_COEFF1213, 0x01a6fea7},
+{12300000, DIF_BPF_COEFF1415, 0xfd7501cc},
+{12300000, DIF_BPF_COEFF1617, 0x03b0fdc0},
+{12300000, DIF_BPF_COEFF1819, 0xfaef02a8},
+{12300000, DIF_BPF_COEFF2021, 0x06a7fd07},
+{12300000, DIF_BPF_COEFF2223, 0xf79d0326},
+{12300000, DIF_BPF_COEFF2425, 0x0a31fcda},
+{12300000, DIF_BPF_COEFF2627, 0xf40702f3},
+{12300000, DIF_BPF_COEFF2829, 0x0d9ffd72},
+{12300000, DIF_BPF_COEFF3031, 0xf0f601fa},
+{12300000, DIF_BPF_COEFF3233, 0x1021fec0},
+{12300000, DIF_BPF_COEFF3435, 0xef2f006d},
+{12300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 123_quant.dat*/
+
+
+/*case 12400000:*/
+/* BEGIN - DIF BPF register values from 124_quant.dat*/
+{12400000, DIF_BPF_COEFF01, 0x0001ffff},
+{12400000, DIF_BPF_COEFF23, 0xfff80007},
+{12400000, DIF_BPF_COEFF45, 0x001fffeb},
+{12400000, DIF_BPF_COEFF67, 0xffaf002d},
+{12400000, DIF_BPF_COEFF89, 0x00a8ffb0},
+{12400000, DIF_BPF_COEFF1011, 0xfed3007e},
+{12400000, DIF_BPF_COEFF1213, 0x01e9ff4c},
+{12400000, DIF_BPF_COEFF1415, 0xfd2000ee},
+{12400000, DIF_BPF_COEFF1617, 0x0413fed8},
+{12400000, DIF_BPF_COEFF1819, 0xfa82015c},
+{12400000, DIF_BPF_COEFF2021, 0x0715fe7d},
+{12400000, DIF_BPF_COEFF2223, 0xf7340198},
+{12400000, DIF_BPF_COEFF2425, 0x0a8dfe69},
+{12400000, DIF_BPF_COEFF2627, 0xf3bd017c},
+{12400000, DIF_BPF_COEFF2829, 0x0dd5feb8},
+{12400000, DIF_BPF_COEFF3031, 0xf0d500fd},
+{12400000, DIF_BPF_COEFF3233, 0x1031ff60},
+{12400000, DIF_BPF_COEFF3435, 0xef2b0037},
+{12400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 124_quant.dat*/
+
+
+/*case 12500000:*/
+/* BEGIN - DIF BPF register values from 125_quant.dat*/
+{12500000, DIF_BPF_COEFF01, 0x00010000},
+{12500000, DIF_BPF_COEFF23, 0xfff70000},
+{12500000, DIF_BPF_COEFF45, 0x00220000},
+{12500000, DIF_BPF_COEFF67, 0xffa90000},
+{12500000, DIF_BPF_COEFF89, 0x00b30000},
+{12500000, DIF_BPF_COEFF1011, 0xfec20000},
+{12500000, DIF_BPF_COEFF1213, 0x02000000},
+{12500000, DIF_BPF_COEFF1415, 0xfd030000},
+{12500000, DIF_BPF_COEFF1617, 0x04350000},
+{12500000, DIF_BPF_COEFF1819, 0xfa5e0000},
+{12500000, DIF_BPF_COEFF2021, 0x073b0000},
+{12500000, DIF_BPF_COEFF2223, 0xf7110000},
+{12500000, DIF_BPF_COEFF2425, 0x0aac0000},
+{12500000, DIF_BPF_COEFF2627, 0xf3a40000},
+{12500000, DIF_BPF_COEFF2829, 0x0de70000},
+{12500000, DIF_BPF_COEFF3031, 0xf0c90000},
+{12500000, DIF_BPF_COEFF3233, 0x10360000},
+{12500000, DIF_BPF_COEFF3435, 0xef290000},
+{12500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 125_quant.dat*/
+
+
+/*case 12600000:*/
+/* BEGIN - DIF BPF register values from 126_quant.dat*/
+{12600000, DIF_BPF_COEFF01, 0x00010001},
+{12600000, DIF_BPF_COEFF23, 0xfff8fff9},
+{12600000, DIF_BPF_COEFF45, 0x001f0015},
+{12600000, DIF_BPF_COEFF67, 0xffafffd3},
+{12600000, DIF_BPF_COEFF89, 0x00a80050},
+{12600000, DIF_BPF_COEFF1011, 0xfed3ff82},
+{12600000, DIF_BPF_COEFF1213, 0x01e900b4},
+{12600000, DIF_BPF_COEFF1415, 0xfd20ff12},
+{12600000, DIF_BPF_COEFF1617, 0x04130128},
+{12600000, DIF_BPF_COEFF1819, 0xfa82fea4},
+{12600000, DIF_BPF_COEFF2021, 0x07150183},
+{12600000, DIF_BPF_COEFF2223, 0xf734fe68},
+{12600000, DIF_BPF_COEFF2425, 0x0a8d0197},
+{12600000, DIF_BPF_COEFF2627, 0xf3bdfe84},
+{12600000, DIF_BPF_COEFF2829, 0x0dd50148},
+{12600000, DIF_BPF_COEFF3031, 0xf0d5ff03},
+{12600000, DIF_BPF_COEFF3233, 0x103100a0},
+{12600000, DIF_BPF_COEFF3435, 0xef2bffc9},
+{12600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 126_quant.dat*/
+
+
+/*case 12700000:*/
+/* BEGIN - DIF BPF register values from 127_quant.dat*/
+{12700000, DIF_BPF_COEFF01, 0x00000002},
+{12700000, DIF_BPF_COEFF23, 0xfffafff2},
+{12700000, DIF_BPF_COEFF45, 0x00170027},
+{12700000, DIF_BPF_COEFF67, 0xffc1ffab},
+{12700000, DIF_BPF_COEFF89, 0x00880098},
+{12700000, DIF_BPF_COEFF1011, 0xff04ff10},
+{12700000, DIF_BPF_COEFF1213, 0x01a60159},
+{12700000, DIF_BPF_COEFF1415, 0xfd75fe34},
+{12700000, DIF_BPF_COEFF1617, 0x03b00240},
+{12700000, DIF_BPF_COEFF1819, 0xfaeffd58},
+{12700000, DIF_BPF_COEFF2021, 0x06a702f9},
+{12700000, DIF_BPF_COEFF2223, 0xf79dfcda},
+{12700000, DIF_BPF_COEFF2425, 0x0a310326},
+{12700000, DIF_BPF_COEFF2627, 0xf407fd0d},
+{12700000, DIF_BPF_COEFF2829, 0x0d9f028e},
+{12700000, DIF_BPF_COEFF3031, 0xf0f6fe06},
+{12700000, DIF_BPF_COEFF3233, 0x10210140},
+{12700000, DIF_BPF_COEFF3435, 0xef2fff93},
+{12700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 127_quant.dat*/
+
+
+/*case 12800000:*/
+/* BEGIN - DIF BPF register values from 128_quant.dat*/
+{12800000, DIF_BPF_COEFF01, 0x00000003},
+{12800000, DIF_BPF_COEFF23, 0xfffeffef},
+{12800000, DIF_BPF_COEFF45, 0x000c0034},
+{12800000, DIF_BPF_COEFF67, 0xffdbff8f},
+{12800000, DIF_BPF_COEFF89, 0x005800ce},
+{12800000, DIF_BPF_COEFF1011, 0xff4ffeb6},
+{12800000, DIF_BPF_COEFF1213, 0x013c01e1},
+{12800000, DIF_BPF_COEFF1415, 0xfdfbfd76},
+{12800000, DIF_BPF_COEFF1617, 0x03110337},
+{12800000, DIF_BPF_COEFF1819, 0xfb9dfc2a},
+{12800000, DIF_BPF_COEFF2021, 0x05f40453},
+{12800000, DIF_BPF_COEFF2223, 0xf848fb63},
+{12800000, DIF_BPF_COEFF2425, 0x099904a5},
+{12800000, DIF_BPF_COEFF2627, 0xf482fb9f},
+{12800000, DIF_BPF_COEFF2829, 0x0d4603ce},
+{12800000, DIF_BPF_COEFF3031, 0xf12dfd0c},
+{12800000, DIF_BPF_COEFF3233, 0x100701df},
+{12800000, DIF_BPF_COEFF3435, 0xef36ff5c},
+{12800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 128_quant.dat*/
+
+
+/*case 12900000:*/
+/* BEGIN - DIF BPF register values from 129_quant.dat*/
+{12900000, DIF_BPF_COEFF01, 0x00000003},
+{12900000, DIF_BPF_COEFF23, 0x0001ffee},
+{12900000, DIF_BPF_COEFF45, 0xffff0038},
+{12900000, DIF_BPF_COEFF67, 0xfffbff82},
+{12900000, DIF_BPF_COEFF89, 0x001d00ec},
+{12900000, DIF_BPF_COEFF1011, 0xffadfe7c},
+{12900000, DIF_BPF_COEFF1213, 0x00b70242},
+{12900000, DIF_BPF_COEFF1415, 0xfea9fce5},
+{12900000, DIF_BPF_COEFF1617, 0x024103ff},
+{12900000, DIF_BPF_COEFF1819, 0xfc85fb2a},
+{12900000, DIF_BPF_COEFF2021, 0x05040587},
+{12900000, DIF_BPF_COEFF2223, 0xf930fa0a},
+{12900000, DIF_BPF_COEFF2425, 0x08ca060e},
+{12900000, DIF_BPF_COEFF2627, 0xf52bfa40},
+{12900000, DIF_BPF_COEFF2829, 0x0ccb0507},
+{12900000, DIF_BPF_COEFF3031, 0xf179fc15},
+{12900000, DIF_BPF_COEFF3233, 0x0fe3027d},
+{12900000, DIF_BPF_COEFF3435, 0xef3fff25},
+{12900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 129_quant.dat*/
+
+
+/*case 113000000:*/
+/* BEGIN - DIF BPF register values from 130_quant.dat*/
+{13000000, DIF_BPF_COEFF01, 0x00000002},
+{13000000, DIF_BPF_COEFF23, 0x0005fff0},
+{13000000, DIF_BPF_COEFF45, 0xfff20034},
+{13000000, DIF_BPF_COEFF67, 0x001bff85},
+{13000000, DIF_BPF_COEFF89, 0xffdf00f0},
+{13000000, DIF_BPF_COEFF1011, 0x0014fe68},
+{13000000, DIF_BPF_COEFF1213, 0x00200272},
+{13000000, DIF_BPF_COEFF1415, 0xff71fc8b},
+{13000000, DIF_BPF_COEFF1617, 0x014d048d},
+{13000000, DIF_BPF_COEFF1819, 0xfd9afa61},
+{13000000, DIF_BPF_COEFF2021, 0x03e00688},
+{13000000, DIF_BPF_COEFF2223, 0xfa4ef8da},
+{13000000, DIF_BPF_COEFF2425, 0x07c80759},
+{13000000, DIF_BPF_COEFF2627, 0xf600f8f4},
+{13000000, DIF_BPF_COEFF2829, 0x0c2f0637},
+{13000000, DIF_BPF_COEFF3031, 0xf1dbfb22},
+{13000000, DIF_BPF_COEFF3233, 0x0fb4031b},
+{13000000, DIF_BPF_COEFF3435, 0xef4bfeef},
+{13000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 130_quant.dat*/
+
+
+/*case 13100000:*/
+/* BEGIN - DIF BPF register values from 131_quant.dat*/
+{13100000, DIF_BPF_COEFF01, 0xffff0001},
+{13100000, DIF_BPF_COEFF23, 0x0007fff5},
+{13100000, DIF_BPF_COEFF45, 0xffe70028},
+{13100000, DIF_BPF_COEFF67, 0x0037ff98},
+{13100000, DIF_BPF_COEFF89, 0xffa400d8},
+{13100000, DIF_BPF_COEFF1011, 0x0079fe7c},
+{13100000, DIF_BPF_COEFF1213, 0xff87026f},
+{13100000, DIF_BPF_COEFF1415, 0x0043fc6e},
+{13100000, DIF_BPF_COEFF1617, 0x004404da},
+{13100000, DIF_BPF_COEFF1819, 0xfecef9da},
+{13100000, DIF_BPF_COEFF2021, 0x0294074e},
+{13100000, DIF_BPF_COEFF2223, 0xfb99f7db},
+{13100000, DIF_BPF_COEFF2425, 0x06980881},
+{13100000, DIF_BPF_COEFF2627, 0xf6fef7be},
+{13100000, DIF_BPF_COEFF2829, 0x0b730759},
+{13100000, DIF_BPF_COEFF3031, 0xf251fa33},
+{13100000, DIF_BPF_COEFF3233, 0x0f7b03b8},
+{13100000, DIF_BPF_COEFF3435, 0xef5afeb8},
+{13100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 131_quant.dat*/
+
+
+/*case 13200000:*/
+/* BEGIN - DIF BPF register values from 132_quant.dat*/
+{13200000, DIF_BPF_COEFF01, 0xffff0000},
+{13200000, DIF_BPF_COEFF23, 0x0008fffc},
+{13200000, DIF_BPF_COEFF45, 0xffe00017},
+{13200000, DIF_BPF_COEFF67, 0x004cffb9},
+{13200000, DIF_BPF_COEFF89, 0xff7500a8},
+{13200000, DIF_BPF_COEFF1011, 0x00d1feb6},
+{13200000, DIF_BPF_COEFF1213, 0xfef90238},
+{13200000, DIF_BPF_COEFF1415, 0x0111fc91},
+{13200000, DIF_BPF_COEFF1617, 0xff3604df},
+{13200000, DIF_BPF_COEFF1819, 0x0012f99b},
+{13200000, DIF_BPF_COEFF2021, 0x012d07d2},
+{13200000, DIF_BPF_COEFF2223, 0xfd07f714},
+{13200000, DIF_BPF_COEFF2425, 0x0542097e},
+{13200000, DIF_BPF_COEFF2627, 0xf81ff6a4},
+{13200000, DIF_BPF_COEFF2829, 0x0a9a086e},
+{13200000, DIF_BPF_COEFF3031, 0xf2dbf94b},
+{13200000, DIF_BPF_COEFF3233, 0x0f380453},
+{13200000, DIF_BPF_COEFF3435, 0xef6cfe82},
+{13200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 132_quant.dat*/
+
+
+/*case 13300000:*/
+/* BEGIN - DIF BPF register values from 133_quant.dat*/
+{13300000, DIF_BPF_COEFF01, 0xffffffff},
+{13300000, DIF_BPF_COEFF23, 0x00080003},
+{13300000, DIF_BPF_COEFF45, 0xffde0001},
+{13300000, DIF_BPF_COEFF67, 0x0056ffe3},
+{13300000, DIF_BPF_COEFF89, 0xff570064},
+{13300000, DIF_BPF_COEFF1011, 0x0113ff10},
+{13300000, DIF_BPF_COEFF1213, 0xfe8201d2},
+{13300000, DIF_BPF_COEFF1415, 0x01cafcf0},
+{13300000, DIF_BPF_COEFF1617, 0xfe35049e},
+{13300000, DIF_BPF_COEFF1819, 0x0155f9a6},
+{13300000, DIF_BPF_COEFF2021, 0xffba080e},
+{13300000, DIF_BPF_COEFF2223, 0xfe8cf689},
+{13300000, DIF_BPF_COEFF2425, 0x03ce0a4e},
+{13300000, DIF_BPF_COEFF2627, 0xf961f5a8},
+{13300000, DIF_BPF_COEFF2829, 0x09a50971},
+{13300000, DIF_BPF_COEFF3031, 0xf379f869},
+{13300000, DIF_BPF_COEFF3233, 0x0eeb04ec},
+{13300000, DIF_BPF_COEFF3435, 0xef80fe4b},
+{13300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 133_quant.dat*/
+
+
+/*case 13400000:*/
+/* BEGIN - DIF BPF register values from 134_quant.dat*/
+{13400000, DIF_BPF_COEFF01, 0x0000fffe},
+{13400000, DIF_BPF_COEFF23, 0x0007000a},
+{13400000, DIF_BPF_COEFF45, 0xffe2ffec},
+{13400000, DIF_BPF_COEFF67, 0x00540012},
+{13400000, DIF_BPF_COEFF89, 0xff4e0015},
+{13400000, DIF_BPF_COEFF1011, 0x0137ff82},
+{13400000, DIF_BPF_COEFF1213, 0xfe2e0145},
+{13400000, DIF_BPF_COEFF1415, 0x0260fd86},
+{13400000, DIF_BPF_COEFF1617, 0xfd51041a},
+{13400000, DIF_BPF_COEFF1819, 0x0287f9fb},
+{13400000, DIF_BPF_COEFF2021, 0xfe4a0802},
+{13400000, DIF_BPF_COEFF2223, 0x001df63f},
+{13400000, DIF_BPF_COEFF2425, 0x02430aeb},
+{13400000, DIF_BPF_COEFF2627, 0xfabdf4ce},
+{13400000, DIF_BPF_COEFF2829, 0x08970a62},
+{13400000, DIF_BPF_COEFF3031, 0xf428f78f},
+{13400000, DIF_BPF_COEFF3233, 0x0e950584},
+{13400000, DIF_BPF_COEFF3435, 0xef97fe15},
+{13400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 134_quant.dat*/
+
+
+/*case 13500000:*/
+/* BEGIN - DIF BPF register values from 135_quant.dat*/
+{13500000, DIF_BPF_COEFF01, 0x0000fffd},
+{13500000, DIF_BPF_COEFF23, 0x0004000f},
+{13500000, DIF_BPF_COEFF45, 0xffeaffda},
+{13500000, DIF_BPF_COEFF67, 0x0046003d},
+{13500000, DIF_BPF_COEFF89, 0xff5affc4},
+{13500000, DIF_BPF_COEFF1011, 0x013b0000},
+{13500000, DIF_BPF_COEFF1213, 0xfe04009d},
+{13500000, DIF_BPF_COEFF1415, 0x02c8fe48},
+{13500000, DIF_BPF_COEFF1617, 0xfc99035a},
+{13500000, DIF_BPF_COEFF1819, 0x0397fa96},
+{13500000, DIF_BPF_COEFF2021, 0xfcec07ad},
+{13500000, DIF_BPF_COEFF2223, 0x01adf637},
+{13500000, DIF_BPF_COEFF2425, 0x00ac0b53},
+{13500000, DIF_BPF_COEFF2627, 0xfc2ef419},
+{13500000, DIF_BPF_COEFF2829, 0x07730b3e},
+{13500000, DIF_BPF_COEFF3031, 0xf4e9f6bd},
+{13500000, DIF_BPF_COEFF3233, 0x0e35061a},
+{13500000, DIF_BPF_COEFF3435, 0xefb1fddf},
+{13500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 135_quant.dat*/
+
+
+/*case 13600000:*/
+/* BEGIN - DIF BPF register values from 136_quant.dat*/
+{13600000, DIF_BPF_COEFF01, 0x0000fffd},
+{13600000, DIF_BPF_COEFF23, 0x00000012},
+{13600000, DIF_BPF_COEFF45, 0xfff6ffcd},
+{13600000, DIF_BPF_COEFF67, 0x002f0061},
+{13600000, DIF_BPF_COEFF89, 0xff7bff79},
+{13600000, DIF_BPF_COEFF1011, 0x011e007e},
+{13600000, DIF_BPF_COEFF1213, 0xfe08ffe8},
+{13600000, DIF_BPF_COEFF1415, 0x02f9ff28},
+{13600000, DIF_BPF_COEFF1617, 0xfc17026a},
+{13600000, DIF_BPF_COEFF1819, 0x0479fb70},
+{13600000, DIF_BPF_COEFF2021, 0xfbad0713},
+{13600000, DIF_BPF_COEFF2223, 0x032ff672},
+{13600000, DIF_BPF_COEFF2425, 0xff100b83},
+{13600000, DIF_BPF_COEFF2627, 0xfdaff38b},
+{13600000, DIF_BPF_COEFF2829, 0x063c0c04},
+{13600000, DIF_BPF_COEFF3031, 0xf5baf5f5},
+{13600000, DIF_BPF_COEFF3233, 0x0dcc06ae},
+{13600000, DIF_BPF_COEFF3435, 0xefcdfda8},
+{13600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 136_quant.dat*/
+
+
+/*case 13700000:*/
+/* BEGIN - DIF BPF register values from 137_quant.dat*/
+{13700000, DIF_BPF_COEFF01, 0x0000fffd},
+{13700000, DIF_BPF_COEFF23, 0xfffd0012},
+{13700000, DIF_BPF_COEFF45, 0x0004ffc8},
+{13700000, DIF_BPF_COEFF67, 0x00100078},
+{13700000, DIF_BPF_COEFF89, 0xffacff3e},
+{13700000, DIF_BPF_COEFF1011, 0x00e200f0},
+{13700000, DIF_BPF_COEFF1213, 0xfe39ff35},
+{13700000, DIF_BPF_COEFF1415, 0x02f10017},
+{13700000, DIF_BPF_COEFF1617, 0xfbd30156},
+{13700000, DIF_BPF_COEFF1819, 0x0521fc7f},
+{13700000, DIF_BPF_COEFF2021, 0xfa9c0638},
+{13700000, DIF_BPF_COEFF2223, 0x0499f6ee},
+{13700000, DIF_BPF_COEFF2425, 0xfd7a0b7c},
+{13700000, DIF_BPF_COEFF2627, 0xff39f325},
+{13700000, DIF_BPF_COEFF2829, 0x04f40cb3},
+{13700000, DIF_BPF_COEFF3031, 0xf69af537},
+{13700000, DIF_BPF_COEFF3233, 0x0d5a073f},
+{13700000, DIF_BPF_COEFF3435, 0xefecfd72},
+{13700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 137_quant.dat*/
+
+
+/*case 13800000:*/
+/* BEGIN - DIF BPF register values from 138_quant.dat*/
+{13800000, DIF_BPF_COEFF01, 0x0001fffe},
+{13800000, DIF_BPF_COEFF23, 0xfffa000e},
+{13800000, DIF_BPF_COEFF45, 0x0011ffcb},
+{13800000, DIF_BPF_COEFF67, 0xfff0007f},
+{13800000, DIF_BPF_COEFF89, 0xffe7ff19},
+{13800000, DIF_BPF_COEFF1011, 0x008f014a},
+{13800000, DIF_BPF_COEFF1213, 0xfe94fe93},
+{13800000, DIF_BPF_COEFF1415, 0x02b00105},
+{13800000, DIF_BPF_COEFF1617, 0xfbd3002f},
+{13800000, DIF_BPF_COEFF1819, 0x0585fdb7},
+{13800000, DIF_BPF_COEFF2021, 0xf9c10525},
+{13800000, DIF_BPF_COEFF2223, 0x05def7a8},
+{13800000, DIF_BPF_COEFF2425, 0xfbf20b3c},
+{13800000, DIF_BPF_COEFF2627, 0x00c7f2e9},
+{13800000, DIF_BPF_COEFF2829, 0x03a00d48},
+{13800000, DIF_BPF_COEFF3031, 0xf787f484},
+{13800000, DIF_BPF_COEFF3233, 0x0cdf07cd},
+{13800000, DIF_BPF_COEFF3435, 0xf00dfd3c},
+{13800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 138_quant.dat*/
+
+
+/*case 13900000:*/
+/* BEGIN - DIF BPF register values from 139_quant.dat*/
+{13900000, DIF_BPF_COEFF01, 0x00010000},
+{13900000, DIF_BPF_COEFF23, 0xfff80008},
+{13900000, DIF_BPF_COEFF45, 0x001bffd7},
+{13900000, DIF_BPF_COEFF67, 0xffd10076},
+{13900000, DIF_BPF_COEFF89, 0x0026ff0e},
+{13900000, DIF_BPF_COEFF1011, 0x002c0184},
+{13900000, DIF_BPF_COEFF1213, 0xff0ffe10},
+{13900000, DIF_BPF_COEFF1415, 0x023b01e0},
+{13900000, DIF_BPF_COEFF1617, 0xfc17ff06},
+{13900000, DIF_BPF_COEFF1819, 0x05a2ff09},
+{13900000, DIF_BPF_COEFF2021, 0xf92703e4},
+{13900000, DIF_BPF_COEFF2223, 0x06f4f89b},
+{13900000, DIF_BPF_COEFF2425, 0xfa820ac5},
+{13900000, DIF_BPF_COEFF2627, 0x0251f2d9},
+{13900000, DIF_BPF_COEFF2829, 0x02430dc3},
+{13900000, DIF_BPF_COEFF3031, 0xf881f3dc},
+{13900000, DIF_BPF_COEFF3233, 0x0c5c0859},
+{13900000, DIF_BPF_COEFF3435, 0xf031fd06},
+{13900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 139_quant.dat*/
+
+
+/*case 14000000:*/
+/* BEGIN - DIF BPF register values from 140_quant.dat*/
+{14000000, DIF_BPF_COEFF01, 0x00010001},
+{14000000, DIF_BPF_COEFF23, 0xfff80001},
+{14000000, DIF_BPF_COEFF45, 0x0021ffe8},
+{14000000, DIF_BPF_COEFF67, 0xffba005d},
+{14000000, DIF_BPF_COEFF89, 0x0060ff1f},
+{14000000, DIF_BPF_COEFF1011, 0xffc40198},
+{14000000, DIF_BPF_COEFF1213, 0xffa0fdb5},
+{14000000, DIF_BPF_COEFF1415, 0x019a029a},
+{14000000, DIF_BPF_COEFF1617, 0xfc99fdea},
+{14000000, DIF_BPF_COEFF1819, 0x05750067},
+{14000000, DIF_BPF_COEFF2021, 0xf8d4027f},
+{14000000, DIF_BPF_COEFF2223, 0x07d4f9c0},
+{14000000, DIF_BPF_COEFF2425, 0xf9320a1a},
+{14000000, DIF_BPF_COEFF2627, 0x03d2f2f3},
+{14000000, DIF_BPF_COEFF2829, 0x00df0e22},
+{14000000, DIF_BPF_COEFF3031, 0xf986f341},
+{14000000, DIF_BPF_COEFF3233, 0x0bd108e2},
+{14000000, DIF_BPF_COEFF3435, 0xf058fcd1},
+{14000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 140_quant.dat*/
+
+
+/*case 14100000:*/
+/* BEGIN - DIF BPF register values from 141_quant.dat*/
+{14100000, DIF_BPF_COEFF01, 0x00000002},
+{14100000, DIF_BPF_COEFF23, 0xfff9fffa},
+{14100000, DIF_BPF_COEFF45, 0x0021fffd},
+{14100000, DIF_BPF_COEFF67, 0xffac0038},
+{14100000, DIF_BPF_COEFF89, 0x008eff4a},
+{14100000, DIF_BPF_COEFF1011, 0xff630184},
+{14100000, DIF_BPF_COEFF1213, 0x003afd8b},
+{14100000, DIF_BPF_COEFF1415, 0x00da0326},
+{14100000, DIF_BPF_COEFF1617, 0xfd51fced},
+{14100000, DIF_BPF_COEFF1819, 0x050101c0},
+{14100000, DIF_BPF_COEFF2021, 0xf8cb0103},
+{14100000, DIF_BPF_COEFF2223, 0x0876fb10},
+{14100000, DIF_BPF_COEFF2425, 0xf80a093e},
+{14100000, DIF_BPF_COEFF2627, 0x0543f338},
+{14100000, DIF_BPF_COEFF2829, 0xff7a0e66},
+{14100000, DIF_BPF_COEFF3031, 0xfa94f2b2},
+{14100000, DIF_BPF_COEFF3233, 0x0b3f0967},
+{14100000, DIF_BPF_COEFF3435, 0xf081fc9b},
+{14100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 141_quant.dat*/
+
+
+/*case 14200000:*/
+/* BEGIN - DIF BPF register values from 142_quant.dat*/
+{14200000, DIF_BPF_COEFF01, 0x00000003},
+{14200000, DIF_BPF_COEFF23, 0xfffbfff3},
+{14200000, DIF_BPF_COEFF45, 0x001d0013},
+{14200000, DIF_BPF_COEFF67, 0xffaa000b},
+{14200000, DIF_BPF_COEFF89, 0x00aaff89},
+{14200000, DIF_BPF_COEFF1011, 0xff13014a},
+{14200000, DIF_BPF_COEFF1213, 0x00cefd95},
+{14200000, DIF_BPF_COEFF1415, 0x000a037b},
+{14200000, DIF_BPF_COEFF1617, 0xfe35fc1d},
+{14200000, DIF_BPF_COEFF1819, 0x044c0305},
+{14200000, DIF_BPF_COEFF2021, 0xf90cff7e},
+{14200000, DIF_BPF_COEFF2223, 0x08d5fc81},
+{14200000, DIF_BPF_COEFF2425, 0xf7100834},
+{14200000, DIF_BPF_COEFF2627, 0x069ff3a7},
+{14200000, DIF_BPF_COEFF2829, 0xfe160e8d},
+{14200000, DIF_BPF_COEFF3031, 0xfbaaf231},
+{14200000, DIF_BPF_COEFF3233, 0x0aa509e9},
+{14200000, DIF_BPF_COEFF3435, 0xf0adfc65},
+{14200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 142_quant.dat*/
+
+
+/*case 14300000:*/
+/* BEGIN - DIF BPF register values from 143_quant.dat*/
+{14300000, DIF_BPF_COEFF01, 0x00000003},
+{14300000, DIF_BPF_COEFF23, 0xffffffef},
+{14300000, DIF_BPF_COEFF45, 0x00140025},
+{14300000, DIF_BPF_COEFF67, 0xffb4ffdd},
+{14300000, DIF_BPF_COEFF89, 0x00b2ffd6},
+{14300000, DIF_BPF_COEFF1011, 0xfedb00f0},
+{14300000, DIF_BPF_COEFF1213, 0x0150fdd3},
+{14300000, DIF_BPF_COEFF1415, 0xff380391},
+{14300000, DIF_BPF_COEFF1617, 0xff36fb85},
+{14300000, DIF_BPF_COEFF1819, 0x035e0426},
+{14300000, DIF_BPF_COEFF2021, 0xf994fdfe},
+{14300000, DIF_BPF_COEFF2223, 0x08eefe0b},
+{14300000, DIF_BPF_COEFF2425, 0xf6490702},
+{14300000, DIF_BPF_COEFF2627, 0x07e1f43e},
+{14300000, DIF_BPF_COEFF2829, 0xfcb60e97},
+{14300000, DIF_BPF_COEFF3031, 0xfcc6f1be},
+{14300000, DIF_BPF_COEFF3233, 0x0a040a67},
+{14300000, DIF_BPF_COEFF3435, 0xf0dbfc30},
+{14300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 143_quant.dat*/
+
+
+/*case 14400000:*/
+/* BEGIN - DIF BPF register values from 144_quant.dat*/
+{14400000, DIF_BPF_COEFF01, 0x00000003},
+{14400000, DIF_BPF_COEFF23, 0x0002ffee},
+{14400000, DIF_BPF_COEFF45, 0x00070033},
+{14400000, DIF_BPF_COEFF67, 0xffc9ffb4},
+{14400000, DIF_BPF_COEFF89, 0x00a40027},
+{14400000, DIF_BPF_COEFF1011, 0xfec3007e},
+{14400000, DIF_BPF_COEFF1213, 0x01b4fe3f},
+{14400000, DIF_BPF_COEFF1415, 0xfe760369},
+{14400000, DIF_BPF_COEFF1617, 0x0044fb2e},
+{14400000, DIF_BPF_COEFF1819, 0x02450518},
+{14400000, DIF_BPF_COEFF2021, 0xfa5ffc90},
+{14400000, DIF_BPF_COEFF2223, 0x08c1ffa1},
+{14400000, DIF_BPF_COEFF2425, 0xf5bc05ae},
+{14400000, DIF_BPF_COEFF2627, 0x0902f4fc},
+{14400000, DIF_BPF_COEFF2829, 0xfb600e85},
+{14400000, DIF_BPF_COEFF3031, 0xfde7f15a},
+{14400000, DIF_BPF_COEFF3233, 0x095d0ae2},
+{14400000, DIF_BPF_COEFF3435, 0xf10cfbfb},
+{14400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 144_quant.dat*/
+
+
+/*case 14500000:*/
+/* BEGIN - DIF BPF register values from 145_quant.dat*/
+{14500000, DIF_BPF_COEFF01, 0xffff0002},
+{14500000, DIF_BPF_COEFF23, 0x0005ffef},
+{14500000, DIF_BPF_COEFF45, 0xfffa0038},
+{14500000, DIF_BPF_COEFF67, 0xffe5ff95},
+{14500000, DIF_BPF_COEFF89, 0x00820074},
+{14500000, DIF_BPF_COEFF1011, 0xfecc0000},
+{14500000, DIF_BPF_COEFF1213, 0x01f0fed0},
+{14500000, DIF_BPF_COEFF1415, 0xfdd20304},
+{14500000, DIF_BPF_COEFF1617, 0x014dfb1d},
+{14500000, DIF_BPF_COEFF1819, 0x010e05ce},
+{14500000, DIF_BPF_COEFF2021, 0xfb64fb41},
+{14500000, DIF_BPF_COEFF2223, 0x084e013b},
+{14500000, DIF_BPF_COEFF2425, 0xf569043e},
+{14500000, DIF_BPF_COEFF2627, 0x0a00f5dd},
+{14500000, DIF_BPF_COEFF2829, 0xfa150e55},
+{14500000, DIF_BPF_COEFF3031, 0xff0bf104},
+{14500000, DIF_BPF_COEFF3233, 0x08b00b59},
+{14500000, DIF_BPF_COEFF3435, 0xf13ffbc6},
+{14500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 145_quant.dat*/
+
+
+/*case 14600000:*/
+/* BEGIN - DIF BPF register values from 146_quant.dat*/
+{14600000, DIF_BPF_COEFF01, 0xffff0001},
+{14600000, DIF_BPF_COEFF23, 0x0008fff4},
+{14600000, DIF_BPF_COEFF45, 0xffed0035},
+{14600000, DIF_BPF_COEFF67, 0x0005ff83},
+{14600000, DIF_BPF_COEFF89, 0x005000b4},
+{14600000, DIF_BPF_COEFF1011, 0xfef6ff82},
+{14600000, DIF_BPF_COEFF1213, 0x01ffff7a},
+{14600000, DIF_BPF_COEFF1415, 0xfd580269},
+{14600000, DIF_BPF_COEFF1617, 0x0241fb53},
+{14600000, DIF_BPF_COEFF1819, 0xffca0640},
+{14600000, DIF_BPF_COEFF2021, 0xfc99fa1e},
+{14600000, DIF_BPF_COEFF2223, 0x079a02cb},
+{14600000, DIF_BPF_COEFF2425, 0xf55502ba},
+{14600000, DIF_BPF_COEFF2627, 0x0ad5f6e0},
+{14600000, DIF_BPF_COEFF2829, 0xf8d90e0a},
+{14600000, DIF_BPF_COEFF3031, 0x0031f0bd},
+{14600000, DIF_BPF_COEFF3233, 0x07fd0bcb},
+{14600000, DIF_BPF_COEFF3435, 0xf174fb91},
+{14600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 146_quant.dat*/
+
+
+/*case 14700000:*/
+/* BEGIN - DIF BPF register values from 147_quant.dat*/
+{14700000, DIF_BPF_COEFF01, 0xffffffff},
+{14700000, DIF_BPF_COEFF23, 0x0009fffb},
+{14700000, DIF_BPF_COEFF45, 0xffe4002a},
+{14700000, DIF_BPF_COEFF67, 0x0025ff82},
+{14700000, DIF_BPF_COEFF89, 0x001400e0},
+{14700000, DIF_BPF_COEFF1011, 0xff3cff10},
+{14700000, DIF_BPF_COEFF1213, 0x01e10030},
+{14700000, DIF_BPF_COEFF1415, 0xfd1201a4},
+{14700000, DIF_BPF_COEFF1617, 0x0311fbcd},
+{14700000, DIF_BPF_COEFF1819, 0xfe88066a},
+{14700000, DIF_BPF_COEFF2021, 0xfdf1f92f},
+{14700000, DIF_BPF_COEFF2223, 0x06aa0449},
+{14700000, DIF_BPF_COEFF2425, 0xf57e0128},
+{14700000, DIF_BPF_COEFF2627, 0x0b7ef801},
+{14700000, DIF_BPF_COEFF2829, 0xf7b00da2},
+{14700000, DIF_BPF_COEFF3031, 0x0156f086},
+{14700000, DIF_BPF_COEFF3233, 0x07450c39},
+{14700000, DIF_BPF_COEFF3435, 0xf1acfb5c},
+{14700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 147_quant.dat*/
+
+
+/*case 14800000:*/
+/* BEGIN - DIF BPF register values from 148_quant.dat*/
+{14800000, DIF_BPF_COEFF01, 0x0000fffe},
+{14800000, DIF_BPF_COEFF23, 0x00080002},
+{14800000, DIF_BPF_COEFF45, 0xffdf0019},
+{14800000, DIF_BPF_COEFF67, 0x003fff92},
+{14800000, DIF_BPF_COEFF89, 0xffd600f1},
+{14800000, DIF_BPF_COEFF1011, 0xff96feb6},
+{14800000, DIF_BPF_COEFF1213, 0x019700e1},
+{14800000, DIF_BPF_COEFF1415, 0xfd0500c2},
+{14800000, DIF_BPF_COEFF1617, 0x03b0fc84},
+{14800000, DIF_BPF_COEFF1819, 0xfd590649},
+{14800000, DIF_BPF_COEFF2021, 0xff5df87f},
+{14800000, DIF_BPF_COEFF2223, 0x058505aa},
+{14800000, DIF_BPF_COEFF2425, 0xf5e4ff91},
+{14800000, DIF_BPF_COEFF2627, 0x0bf9f93c},
+{14800000, DIF_BPF_COEFF2829, 0xf69d0d20},
+{14800000, DIF_BPF_COEFF3031, 0x0279f05e},
+{14800000, DIF_BPF_COEFF3233, 0x06880ca3},
+{14800000, DIF_BPF_COEFF3435, 0xf1e6fb28},
+{14800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 148_quant.dat*/
+
+
+/*case 14900000:*/
+/* BEGIN - DIF BPF register values from 149_quant.dat*/
+{14900000, DIF_BPF_COEFF01, 0x0000fffd},
+{14900000, DIF_BPF_COEFF23, 0x00060009},
+{14900000, DIF_BPF_COEFF45, 0xffdf0004},
+{14900000, DIF_BPF_COEFF67, 0x0051ffb0},
+{14900000, DIF_BPF_COEFF89, 0xff9d00e8},
+{14900000, DIF_BPF_COEFF1011, 0xfffcfe7c},
+{14900000, DIF_BPF_COEFF1213, 0x01280180},
+{14900000, DIF_BPF_COEFF1415, 0xfd32ffd2},
+{14900000, DIF_BPF_COEFF1617, 0x0413fd6e},
+{14900000, DIF_BPF_COEFF1819, 0xfc4d05df},
+{14900000, DIF_BPF_COEFF2021, 0x00d1f812},
+{14900000, DIF_BPF_COEFF2223, 0x043506e4},
+{14900000, DIF_BPF_COEFF2425, 0xf685fdfb},
+{14900000, DIF_BPF_COEFF2627, 0x0c43fa8d},
+{14900000, DIF_BPF_COEFF2829, 0xf5a10c83},
+{14900000, DIF_BPF_COEFF3031, 0x0399f046},
+{14900000, DIF_BPF_COEFF3233, 0x05c70d08},
+{14900000, DIF_BPF_COEFF3435, 0xf222faf3},
+{14900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 149_quant.dat*/
+
+
+/*case 15000000:*/
+/* BEGIN - DIF BPF register values from 150_quant.dat*/
+{15000000, DIF_BPF_COEFF01, 0x0000fffd},
+{15000000, DIF_BPF_COEFF23, 0x0003000f},
+{15000000, DIF_BPF_COEFF45, 0xffe5ffef},
+{15000000, DIF_BPF_COEFF67, 0x0057ffd9},
+{15000000, DIF_BPF_COEFF89, 0xff7000c4},
+{15000000, DIF_BPF_COEFF1011, 0x0062fe68},
+{15000000, DIF_BPF_COEFF1213, 0x009e01ff},
+{15000000, DIF_BPF_COEFF1415, 0xfd95fee6},
+{15000000, DIF_BPF_COEFF1617, 0x0435fe7d},
+{15000000, DIF_BPF_COEFF1819, 0xfb710530},
+{15000000, DIF_BPF_COEFF2021, 0x023cf7ee},
+{15000000, DIF_BPF_COEFF2223, 0x02c307ef},
+{15000000, DIF_BPF_COEFF2425, 0xf75efc70},
+{15000000, DIF_BPF_COEFF2627, 0x0c5cfbef},
+{15000000, DIF_BPF_COEFF2829, 0xf4c10bce},
+{15000000, DIF_BPF_COEFF3031, 0x04b3f03f},
+{15000000, DIF_BPF_COEFF3233, 0x05030d69},
+{15000000, DIF_BPF_COEFF3435, 0xf261fabf},
+{15000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 150_quant.dat*/
+
+
+/*case 15100000:*/
+/* BEGIN - DIF BPF register values from 151_quant.dat*/
+{15100000, DIF_BPF_COEFF01, 0x0000fffd},
+{15100000, DIF_BPF_COEFF23, 0xffff0012},
+{15100000, DIF_BPF_COEFF45, 0xffefffdc},
+{15100000, DIF_BPF_COEFF67, 0x00510006},
+{15100000, DIF_BPF_COEFF89, 0xff540089},
+{15100000, DIF_BPF_COEFF1011, 0x00befe7c},
+{15100000, DIF_BPF_COEFF1213, 0x00060253},
+{15100000, DIF_BPF_COEFF1415, 0xfe27fe0d},
+{15100000, DIF_BPF_COEFF1617, 0x0413ffa2},
+{15100000, DIF_BPF_COEFF1819, 0xfad10446},
+{15100000, DIF_BPF_COEFF2021, 0x0390f812},
+{15100000, DIF_BPF_COEFF2223, 0x013b08c3},
+{15100000, DIF_BPF_COEFF2425, 0xf868faf6},
+{15100000, DIF_BPF_COEFF2627, 0x0c43fd5f},
+{15100000, DIF_BPF_COEFF2829, 0xf3fd0b02},
+{15100000, DIF_BPF_COEFF3031, 0x05c7f046},
+{15100000, DIF_BPF_COEFF3233, 0x043b0dc4},
+{15100000, DIF_BPF_COEFF3435, 0xf2a1fa8b},
+{15100000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 151_quant.dat*/
+
+
+/*case 15200000:*/
+/* BEGIN - DIF BPF register values from 152_quant.dat*/
+{15200000, DIF_BPF_COEFF01, 0x0001fffe},
+{15200000, DIF_BPF_COEFF23, 0xfffc0012},
+{15200000, DIF_BPF_COEFF45, 0xfffbffce},
+{15200000, DIF_BPF_COEFF67, 0x003f0033},
+{15200000, DIF_BPF_COEFF89, 0xff4e003f},
+{15200000, DIF_BPF_COEFF1011, 0x0106feb6},
+{15200000, DIF_BPF_COEFF1213, 0xff6e0276},
+{15200000, DIF_BPF_COEFF1415, 0xfeddfd56},
+{15200000, DIF_BPF_COEFF1617, 0x03b000cc},
+{15200000, DIF_BPF_COEFF1819, 0xfa740329},
+{15200000, DIF_BPF_COEFF2021, 0x04bff87f},
+{15200000, DIF_BPF_COEFF2223, 0xffaa095d},
+{15200000, DIF_BPF_COEFF2425, 0xf99ef995},
+{15200000, DIF_BPF_COEFF2627, 0x0bf9fed8},
+{15200000, DIF_BPF_COEFF2829, 0xf3590a1f},
+{15200000, DIF_BPF_COEFF3031, 0x06d2f05e},
+{15200000, DIF_BPF_COEFF3233, 0x03700e1b},
+{15200000, DIF_BPF_COEFF3435, 0xf2e4fa58},
+{15200000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 152_quant.dat*/
+
+
+/*case 115300000:*/
+/* BEGIN - DIF BPF register values from 153_quant.dat*/
+{15300000, DIF_BPF_COEFF01, 0x0001ffff},
+{15300000, DIF_BPF_COEFF23, 0xfff9000f},
+{15300000, DIF_BPF_COEFF45, 0x0009ffc8},
+{15300000, DIF_BPF_COEFF67, 0x00250059},
+{15300000, DIF_BPF_COEFF89, 0xff5effee},
+{15300000, DIF_BPF_COEFF1011, 0x0132ff10},
+{15300000, DIF_BPF_COEFF1213, 0xfee30265},
+{15300000, DIF_BPF_COEFF1415, 0xffaafccf},
+{15300000, DIF_BPF_COEFF1617, 0x031101eb},
+{15300000, DIF_BPF_COEFF1819, 0xfa6001e8},
+{15300000, DIF_BPF_COEFF2021, 0x05bdf92f},
+{15300000, DIF_BPF_COEFF2223, 0xfe1b09b6},
+{15300000, DIF_BPF_COEFF2425, 0xfafaf852},
+{15300000, DIF_BPF_COEFF2627, 0x0b7e0055},
+{15300000, DIF_BPF_COEFF2829, 0xf2d50929},
+{15300000, DIF_BPF_COEFF3031, 0x07d3f086},
+{15300000, DIF_BPF_COEFF3233, 0x02a30e6c},
+{15300000, DIF_BPF_COEFF3435, 0xf329fa24},
+{15300000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 153_quant.dat*/
+
+
+/*case 115400000:*/
+/* BEGIN - DIF BPF register values from 154_quant.dat*/
+{15400000, DIF_BPF_COEFF01, 0x00010001},
+{15400000, DIF_BPF_COEFF23, 0xfff80009},
+{15400000, DIF_BPF_COEFF45, 0x0015ffca},
+{15400000, DIF_BPF_COEFF67, 0x00050074},
+{15400000, DIF_BPF_COEFF89, 0xff81ff9f},
+{15400000, DIF_BPF_COEFF1011, 0x013dff82},
+{15400000, DIF_BPF_COEFF1213, 0xfe710221},
+{15400000, DIF_BPF_COEFF1415, 0x007cfc80},
+{15400000, DIF_BPF_COEFF1617, 0x024102ed},
+{15400000, DIF_BPF_COEFF1819, 0xfa940090},
+{15400000, DIF_BPF_COEFF2021, 0x0680fa1e},
+{15400000, DIF_BPF_COEFF2223, 0xfc9b09cd},
+{15400000, DIF_BPF_COEFF2425, 0xfc73f736},
+{15400000, DIF_BPF_COEFF2627, 0x0ad501d0},
+{15400000, DIF_BPF_COEFF2829, 0xf2740820},
+{15400000, DIF_BPF_COEFF3031, 0x08c9f0bd},
+{15400000, DIF_BPF_COEFF3233, 0x01d40eb9},
+{15400000, DIF_BPF_COEFF3435, 0xf371f9f1},
+{15400000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 154_quant.dat*/
+
+
+/*case 115500000:*/
+/* BEGIN - DIF BPF register values from 155_quant.dat*/
+{15500000, DIF_BPF_COEFF01, 0x00000002},
+{15500000, DIF_BPF_COEFF23, 0xfff80002},
+{15500000, DIF_BPF_COEFF45, 0x001effd5},
+{15500000, DIF_BPF_COEFF67, 0xffe5007f},
+{15500000, DIF_BPF_COEFF89, 0xffb4ff5b},
+{15500000, DIF_BPF_COEFF1011, 0x01280000},
+{15500000, DIF_BPF_COEFF1213, 0xfe2401b0},
+{15500000, DIF_BPF_COEFF1415, 0x0146fc70},
+{15500000, DIF_BPF_COEFF1617, 0x014d03c6},
+{15500000, DIF_BPF_COEFF1819, 0xfb10ff32},
+{15500000, DIF_BPF_COEFF2021, 0x0701fb41},
+{15500000, DIF_BPF_COEFF2223, 0xfb3709a1},
+{15500000, DIF_BPF_COEFF2425, 0xfe00f644},
+{15500000, DIF_BPF_COEFF2627, 0x0a000345},
+{15500000, DIF_BPF_COEFF2829, 0xf2350708},
+{15500000, DIF_BPF_COEFF3031, 0x09b2f104},
+{15500000, DIF_BPF_COEFF3233, 0x01050eff},
+{15500000, DIF_BPF_COEFF3435, 0xf3baf9be},
+{15500000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 155_quant.dat*/
+
+
+/*case 115600000:*/
+/* BEGIN - DIF BPF register values from 156_quant.dat*/
+{15600000, DIF_BPF_COEFF01, 0x00000003},
+{15600000, DIF_BPF_COEFF23, 0xfff9fffb},
+{15600000, DIF_BPF_COEFF45, 0x0022ffe6},
+{15600000, DIF_BPF_COEFF67, 0xffc9007a},
+{15600000, DIF_BPF_COEFF89, 0xfff0ff29},
+{15600000, DIF_BPF_COEFF1011, 0x00f2007e},
+{15600000, DIF_BPF_COEFF1213, 0xfe01011b},
+{15600000, DIF_BPF_COEFF1415, 0x01f6fc9e},
+{15600000, DIF_BPF_COEFF1617, 0x00440467},
+{15600000, DIF_BPF_COEFF1819, 0xfbccfdde},
+{15600000, DIF_BPF_COEFF2021, 0x0738fc90},
+{15600000, DIF_BPF_COEFF2223, 0xf9f70934},
+{15600000, DIF_BPF_COEFF2425, 0xff99f582},
+{15600000, DIF_BPF_COEFF2627, 0x090204b0},
+{15600000, DIF_BPF_COEFF2829, 0xf21a05e1},
+{15600000, DIF_BPF_COEFF3031, 0x0a8df15a},
+{15600000, DIF_BPF_COEFF3233, 0x00340f41},
+{15600000, DIF_BPF_COEFF3435, 0xf405f98b},
+{15600000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 156_quant.dat*/
+
+
+/*case 115700000:*/
+/* BEGIN - DIF BPF register values from 157_quant.dat*/
+{15700000, DIF_BPF_COEFF01, 0x00000003},
+{15700000, DIF_BPF_COEFF23, 0xfffcfff4},
+{15700000, DIF_BPF_COEFF45, 0x0020fffa},
+{15700000, DIF_BPF_COEFF67, 0xffb40064},
+{15700000, DIF_BPF_COEFF89, 0x002fff11},
+{15700000, DIF_BPF_COEFF1011, 0x00a400f0},
+{15700000, DIF_BPF_COEFF1213, 0xfe0d006e},
+{15700000, DIF_BPF_COEFF1415, 0x0281fd09},
+{15700000, DIF_BPF_COEFF1617, 0xff3604c9},
+{15700000, DIF_BPF_COEFF1819, 0xfcbffca2},
+{15700000, DIF_BPF_COEFF2021, 0x0726fdfe},
+{15700000, DIF_BPF_COEFF2223, 0xf8e80888},
+{15700000, DIF_BPF_COEFF2425, 0x0134f4f3},
+{15700000, DIF_BPF_COEFF2627, 0x07e1060c},
+{15700000, DIF_BPF_COEFF2829, 0xf22304af},
+{15700000, DIF_BPF_COEFF3031, 0x0b59f1be},
+{15700000, DIF_BPF_COEFF3233, 0xff640f7d},
+{15700000, DIF_BPF_COEFF3435, 0xf452f959},
+{15700000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 157_quant.dat*/
+
+
+/*case 115800000:*/
+/* BEGIN - DIF BPF register values from 158_quant.dat*/
+{15800000, DIF_BPF_COEFF01, 0x00000003},
+{15800000, DIF_BPF_COEFF23, 0x0000fff0},
+{15800000, DIF_BPF_COEFF45, 0x001a0010},
+{15800000, DIF_BPF_COEFF67, 0xffaa0041},
+{15800000, DIF_BPF_COEFF89, 0x0067ff13},
+{15800000, DIF_BPF_COEFF1011, 0x0043014a},
+{15800000, DIF_BPF_COEFF1213, 0xfe46ffb9},
+{15800000, DIF_BPF_COEFF1415, 0x02dbfda8},
+{15800000, DIF_BPF_COEFF1617, 0xfe3504e5},
+{15800000, DIF_BPF_COEFF1819, 0xfddcfb8d},
+{15800000, DIF_BPF_COEFF2021, 0x06c9ff7e},
+{15800000, DIF_BPF_COEFF2223, 0xf81107a2},
+{15800000, DIF_BPF_COEFF2425, 0x02c9f49a},
+{15800000, DIF_BPF_COEFF2627, 0x069f0753},
+{15800000, DIF_BPF_COEFF2829, 0xf2500373},
+{15800000, DIF_BPF_COEFF3031, 0x0c14f231},
+{15800000, DIF_BPF_COEFF3233, 0xfe930fb3},
+{15800000, DIF_BPF_COEFF3435, 0xf4a1f927},
+{15800000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 158_quant.dat*/
+
+
+/*case 115900000:*/
+/* BEGIN - DIF BPF register values from 159_quant.dat*/
+{15900000, DIF_BPF_COEFF01, 0xffff0002},
+{15900000, DIF_BPF_COEFF23, 0x0003ffee},
+{15900000, DIF_BPF_COEFF45, 0x000f0023},
+{15900000, DIF_BPF_COEFF67, 0xffac0016},
+{15900000, DIF_BPF_COEFF89, 0x0093ff31},
+{15900000, DIF_BPF_COEFF1011, 0xffdc0184},
+{15900000, DIF_BPF_COEFF1213, 0xfea6ff09},
+{15900000, DIF_BPF_COEFF1415, 0x02fdfe70},
+{15900000, DIF_BPF_COEFF1617, 0xfd5104ba},
+{15900000, DIF_BPF_COEFF1819, 0xff15faac},
+{15900000, DIF_BPF_COEFF2021, 0x06270103},
+{15900000, DIF_BPF_COEFF2223, 0xf7780688},
+{15900000, DIF_BPF_COEFF2425, 0x044df479},
+{15900000, DIF_BPF_COEFF2627, 0x05430883},
+{15900000, DIF_BPF_COEFF2829, 0xf2a00231},
+{15900000, DIF_BPF_COEFF3031, 0x0cbef2b2},
+{15900000, DIF_BPF_COEFF3233, 0xfdc40fe3},
+{15900000, DIF_BPF_COEFF3435, 0xf4f2f8f5},
+{15900000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 159_quant.dat*/
+
+
+/*case 116000000:*/
+/* BEGIN - DIF BPF register values from 160_quant.dat*/
+{16000000, DIF_BPF_COEFF01, 0xffff0001},
+{16000000, DIF_BPF_COEFF23, 0x0006ffef},
+{16000000, DIF_BPF_COEFF45, 0x00020031},
+{16000000, DIF_BPF_COEFF67, 0xffbaffe8},
+{16000000, DIF_BPF_COEFF89, 0x00adff66},
+{16000000, DIF_BPF_COEFF1011, 0xff790198},
+{16000000, DIF_BPF_COEFF1213, 0xff26fe6e},
+{16000000, DIF_BPF_COEFF1415, 0x02e5ff55},
+{16000000, DIF_BPF_COEFF1617, 0xfc99044a},
+{16000000, DIF_BPF_COEFF1819, 0x005bfa09},
+{16000000, DIF_BPF_COEFF2021, 0x0545027f},
+{16000000, DIF_BPF_COEFF2223, 0xf7230541},
+{16000000, DIF_BPF_COEFF2425, 0x05b8f490},
+{16000000, DIF_BPF_COEFF2627, 0x03d20997},
+{16000000, DIF_BPF_COEFF2829, 0xf31300eb},
+{16000000, DIF_BPF_COEFF3031, 0x0d55f341},
+{16000000, DIF_BPF_COEFF3233, 0xfcf6100e},
+{16000000, DIF_BPF_COEFF3435, 0xf544f8c3},
+{16000000, DIF_BPF_COEFF36, 0x110d0000},
+/* END - DIF BPF register values from 160_quant.dat*/
+};
+
+#endif
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index 4ea3776b39fb..5feb3ee640d9 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -29,6 +29,10 @@
#include "xc5000.h"
#include "dvb_dummy_fe.h"
+#include "s5h1432.h"
+#include "tda18271.h"
+#include "s5h1411.h"
+#include "lgdt3305.h"
MODULE_DESCRIPTION("driver for cx231xx based DVB cards");
MODULE_AUTHOR("Srinivasa Deevi <srinivasa.deevi@conexant.com>");
@@ -65,6 +69,72 @@ struct cx231xx_dvb {
struct dvb_net net;
};
+static struct s5h1432_config dvico_s5h1432_config = {
+ .output_mode = S5H1432_SERIAL_OUTPUT,
+ .gpio = S5H1432_GPIO_ON,
+ .qam_if = S5H1432_IF_4000,
+ .vsb_if = S5H1432_IF_4000,
+ .inversion = S5H1432_INVERSION_OFF,
+ .status_mode = S5H1432_DEMODLOCKING,
+ .mpeg_timing = S5H1432_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct tda18271_std_map cnxt_rde253s_tda18271_std_map = {
+ .dvbt_6 = { .if_freq = 4000, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_7 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_8 = { .if_freq = 4000, .agc_mode = 3, .std = 6,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+};
+
+static struct tda18271_config cnxt_rde253s_tunerconfig = {
+ .std_map = &cnxt_rde253s_tda18271_std_map,
+ .gate = TDA18271_GATE_ANALOG,
+};
+
+static struct s5h1411_config tda18271_s5h1411_config = {
+ .output_mode = S5H1411_SERIAL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .vsb_if = S5H1411_IF_3250,
+ .qam_if = S5H1411_IF_4000,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+static struct s5h1411_config xc5000_s5h1411_config = {
+ .output_mode = S5H1411_SERIAL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .vsb_if = S5H1411_IF_3250,
+ .qam_if = S5H1411_IF_3250,
+ .inversion = S5H1411_INVERSION_OFF,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct lgdt3305_config hcw_lgdt3305_config = {
+ .i2c_addr = 0x0e,
+ .mpeg_mode = LGDT3305_MPEG_SERIAL,
+ .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
+ .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 1,
+ .qam_if_khz = 4000,
+ .vsb_if_khz = 3250,
+};
+
+static struct tda18271_std_map hauppauge_tda18271_std_map = {
+ .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x58, },
+ .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x58, },
+};
+
+static struct tda18271_config hcw_tda18271_config = {
+ .std_map = &hauppauge_tda18271_std_map,
+ .gate = TDA18271_GATE_DIGITAL,
+};
+
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
{
char *errmsg = "Unknown";
@@ -128,11 +198,33 @@ static inline int dvb_isoc_copy(struct cx231xx *dev, struct urb *urb)
continue;
}
- dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer +
- urb->iso_frame_desc[i].offset,
- urb->iso_frame_desc[i].actual_length);
+ dvb_dmx_swfilter(&dev->dvb->demux,
+ urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset,
+ urb->iso_frame_desc[i].actual_length);
+ }
+
+ return 0;
+}
+
+static inline int dvb_bulk_copy(struct cx231xx *dev, struct urb *urb)
+{
+ if (!dev)
+ return 0;
+
+ if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ return 0;
+
+ if (urb->status < 0) {
+ print_err_status(dev, -1, urb->status);
+ if (urb->status == -ENOENT)
+ return 0;
}
+ /* Feed the transport payload into the kernel demux */
+ dvb_dmx_swfilter(&dev->dvb->demux,
+ urb->transfer_buffer, urb->actual_length);
+
return 0;
}
@@ -141,21 +233,44 @@ static int start_streaming(struct cx231xx_dvb *dvb)
int rc;
struct cx231xx *dev = dvb->adapter.priv;
- usb_set_interface(dev->udev, 0, 1);
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- if (rc < 0)
- return rc;
+ if (dev->USE_ISO) {
+ cx231xx_info("DVB transfer mode is ISO.\n");
+ mutex_lock(&dev->i2c_lock);
+ cx231xx_enable_i2c_port_3(dev, false);
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
+ cx231xx_enable_i2c_port_3(dev, true);
+ mutex_unlock(&dev->i2c_lock);
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (rc < 0)
+ return rc;
+ dev->mode_tv = 1;
+ return cx231xx_init_isoc(dev, CX231XX_DVB_MAX_PACKETS,
+ CX231XX_DVB_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ dvb_isoc_copy);
+ } else {
+ cx231xx_info("DVB transfer mode is BULK.\n");
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (rc < 0)
+ return rc;
+ dev->mode_tv = 1;
+ return cx231xx_init_bulk(dev, CX231XX_DVB_MAX_PACKETS,
+ CX231XX_DVB_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ dvb_bulk_copy);
+ }
- return cx231xx_init_isoc(dev, CX231XX_DVB_MAX_PACKETS,
- CX231XX_DVB_NUM_BUFS,
- CX231XX_DVB_MAX_PACKETSIZE, dvb_isoc_copy);
}
static int stop_streaming(struct cx231xx_dvb *dvb)
{
struct cx231xx *dev = dvb->adapter.priv;
- cx231xx_uninit_isoc(dev);
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
@@ -216,7 +331,11 @@ static int cx231xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
static struct xc5000_config cnxt_rde250_tunerconfig = {
.i2c_address = 0x61,
- .if_khz = 5380,
+ .if_khz = 4000,
+};
+static struct xc5000_config cnxt_rdu250_tunerconfig = {
+ .i2c_address = 0x61,
+ .if_khz = 3250,
};
/* ------------------------------------------------------------------ */
@@ -228,7 +347,7 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
struct xc5000_config cfg;
memset(&cfg, 0, sizeof(cfg));
- cfg.i2c_adap = &dev->i2c_bus[1].i2c_adap;
+ cfg.i2c_adap = &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap;
cfg.i2c_addr = addr;
if (!dev->dvb->frontend) {
@@ -268,7 +387,6 @@ int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq)
/*params.audmode = ; */
/* Set the analog parameters to set the frequency */
- cx231xx_info("Setting Frequency for XC5000\n");
dops->set_analog_params(dev->dvb->frontend, &params);
}
@@ -445,19 +563,21 @@ static int dvb_init(struct cx231xx *dev)
dev->cx231xx_set_analog_freq = cx231xx_set_analog_freq;
dev->cx231xx_reset_analog_tuner = cx231xx_reset_analog_tuner;
+ mutex_lock(&dev->lock);
cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ cx231xx_demod_reset(dev);
/* init frontend */
switch (dev->model) {
+ case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
- /* dev->dvb->frontend = dvb_attach(s5h1411_attach,
- &dvico_s5h1411_config,
- &dev->i2c_bus[1].i2c_adap); */
- dev->dvb->frontend = dvb_attach(dvb_dummy_fe_ofdm_attach);
+ dev->dvb->frontend = dvb_attach(s5h1432_attach,
+ &dvico_s5h1432_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
- ": Failed to attach dummy front end\n");
+ ": Failed to attach s5h1432 front end\n");
result = -EINVAL;
goto out_free;
}
@@ -466,16 +586,19 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
- &dev->i2c_bus[1].i2c_adap,
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&cnxt_rde250_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
+ case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
- dev->dvb->frontend = dvb_attach(dvb_dummy_fe_ofdm_attach);
+ dev->dvb->frontend = dvb_attach(s5h1411_attach,
+ &xc5000_s5h1411_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
@@ -488,12 +611,82 @@ static int dvb_init(struct cx231xx *dev)
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
- &dev->i2c_bus[1].i2c_adap,
- &cnxt_rde250_tunerconfig)) {
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &cnxt_rdu250_tunerconfig)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+
+ dev->dvb->frontend = dvb_attach(s5h1432_attach,
+ &dvico_s5h1432_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+
+ if (dev->dvb->frontend == NULL) {
+ printk(DRIVER_NAME
+ ": Failed to attach s5h1432 front end\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
+ 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &cnxt_rde253s_tunerconfig)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
+ case CX231XX_BOARD_CNXT_RDU_253S:
+
+ dev->dvb->frontend = dvb_attach(s5h1411_attach,
+ &tda18271_s5h1411_config,
+ &dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
+
+ if (dev->dvb->frontend == NULL) {
+ printk(DRIVER_NAME
+ ": Failed to attach dummy front end\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
+ 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &cnxt_rde253s_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+
+ printk(KERN_INFO "%s: looking for tuner / demod on i2c bus: %d\n",
+ __func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap));
+
+ dev->dvb->frontend = dvb_attach(lgdt3305_attach,
+ &hcw_lgdt3305_config,
+ &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap);
+
+ if (dev->dvb->frontend == NULL) {
+ printk(DRIVER_NAME
+ ": Failed to attach LG3305 front end\n");
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ dvb_attach(tda18271_attach, dev->dvb->frontend,
+ 0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
+ &hcw_tda18271_config);
+ break;
+
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
@@ -513,15 +706,18 @@ static int dvb_init(struct cx231xx *dev)
if (result < 0)
goto out_free;
- cx231xx_set_mode(dev, CX231XX_SUSPEND);
+
printk(KERN_INFO "Successfully loaded cx231xx-dvb\n");
- return 0;
-out_free:
+ret:
cx231xx_set_mode(dev, CX231XX_SUSPEND);
+ mutex_unlock(&dev->lock);
+ return result;
+
+out_free:
kfree(dvb);
dev->dvb = NULL;
- return result;
+ goto ret;
}
static int dvb_fini(struct cx231xx *dev)
diff --git a/drivers/media/video/cx231xx/cx231xx-i2c.c b/drivers/media/video/cx231xx/cx231xx-i2c.c
index 58d9cc0867b9..835670623dfb 100644
--- a/drivers/media/video/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/video/cx231xx/cx231xx-i2c.c
@@ -359,7 +359,7 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
if (num <= 0)
return 0;
-
+ mutex_lock(&dev->i2c_lock);
for (i = 0; i < num; i++) {
addr = msgs[i].addr >> 1;
@@ -372,6 +372,7 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
rc = cx231xx_i2c_check_for_device(i2c_adap, &msgs[i]);
if (rc < 0) {
dprintk2(2, " no device\n");
+ mutex_unlock(&dev->i2c_lock);
return rc;
}
@@ -384,7 +385,7 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
}
} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
msgs[i].addr == msgs[i + 1].addr
- && (msgs[i].len <= 2) && (bus->nr < 2)) {
+ && (msgs[i].len <= 2) && (bus->nr < 3)) {
/* read bytes */
rc = cx231xx_i2c_recv_bytes_with_saddr(i2c_adap,
&msgs[i],
@@ -407,10 +408,11 @@ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap,
if (i2c_debug >= 2)
printk("\n");
}
-
+ mutex_unlock(&dev->i2c_lock);
return num;
err:
dprintk2(2, " ERROR: %i\n", rc);
+ mutex_unlock(&dev->i2c_lock);
return rc;
}
@@ -507,9 +509,6 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
if (0 == bus->i2c_rc) {
if (i2c_scan)
cx231xx_do_i2c_scan(dev, &bus->i2c_client);
-
- /* Instantiate the IR receiver device, if present */
- cx231xx_register_i2c_ir(dev);
} else
cx231xx_warn("%s: i2c bus %d register FAILED\n",
dev->name, bus->nr);
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
deleted file mode 100644
index fd099153b746..000000000000
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- handle cx231xx IR remotes via linux kernel input layer.
-
- Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
- Based on em28xx driver
-
- < This is a place holder for IR now.>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-
-#include "cx231xx.h"
-
-static unsigned int ir_debug;
-module_param(ir_debug, int, 0644);
-MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
-
-#define MODULE_NAME "cx231xx"
-
-#define i2cdprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
-
-#define dprintk(fmt, arg...) \
- if (ir_debug) { \
- printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
-
-/**********************************************************
- Polling structure used by cx231xx IR's
- **********************************************************/
-
-struct cx231xx_ir_poll_result {
- unsigned int toggle_bit:1;
- unsigned int read_count:7;
- u8 rc_address;
- u8 rc_data[4];
-};
-
-struct cx231xx_IR {
- struct cx231xx *dev;
- struct input_dev *input;
- char name[32];
- char phys[32];
-
- /* poll external decoder */
- int polling;
- struct work_struct work;
- struct timer_list timer;
- unsigned int last_readcount;
-
- int (*get_key) (struct cx231xx_IR *, struct cx231xx_ir_poll_result *);
-};
-
-/**********************************************************
- Polling code for cx231xx
- **********************************************************/
-
-static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
-{
- int result;
- struct cx231xx_ir_poll_result poll_result;
-
- /* read the registers containing the IR status */
- result = ir->get_key(ir, &poll_result);
- if (result < 0) {
- dprintk("ir->get_key() failed %d\n", result);
- return;
- }
-
- dprintk("ir->get_key result tb=%02x rc=%02x lr=%02x data=%02x\n",
- poll_result.toggle_bit, poll_result.read_count,
- ir->last_readcount, poll_result.rc_data[0]);
-
- if (poll_result.read_count > 0 &&
- poll_result.read_count != ir->last_readcount)
- ir_keydown(ir->input,
- poll_result.rc_data[0],
- poll_result.toggle_bit);
-
- if (ir->dev->chip_id == CHIP_ID_EM2874)
- /* The em2874 clears the readcount field every time the
- register is read. The em2860/2880 datasheet says that it
- is supposed to clear the readcount, but it doesn't. So with
- the em2874, we are looking for a non-zero read count as
- opposed to a readcount that is incrementing */
- ir->last_readcount = 0;
- else
- ir->last_readcount = poll_result.read_count;
-
- }
-}
-
-static void ir_timer(unsigned long data)
-{
- struct cx231xx_IR *ir = (struct cx231xx_IR *)data;
-
- schedule_work(&ir->work);
-}
-
-static void cx231xx_ir_work(struct work_struct *work)
-{
- struct cx231xx_IR *ir = container_of(work, struct cx231xx_IR, work);
-
- cx231xx_ir_handle_key(ir);
- mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
-}
-
-void cx231xx_ir_start(struct cx231xx_IR *ir)
-{
- setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
- INIT_WORK(&ir->work, cx231xx_ir_work);
- schedule_work(&ir->work);
-}
-
-static void cx231xx_ir_stop(struct cx231xx_IR *ir)
-{
- del_timer_sync(&ir->timer);
- flush_scheduled_work();
-}
-
-int cx231xx_ir_init(struct cx231xx *dev)
-{
- struct cx231xx_IR *ir;
- struct input_dev *input_dev;
- u8 ir_config;
- int err = -ENOMEM;
-
- if (dev->board.ir_codes == NULL) {
- /* No remote control support */
- return 0;
- }
-
- ir = kzalloc(sizeof(*ir), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ir || !input_dev)
- goto err_out_free;
-
- ir->input = input_dev;
-
- /* Setup the proper handler based on the chip */
- switch (dev->chip_id) {
- default:
- printk("Unrecognized cx231xx chip id: IR not supported\n");
- goto err_out_free;
- }
-
- /* This is how often we ask the chip for IR information */
- ir->polling = 100; /* ms */
-
- /* init input device */
- snprintf(ir->name, sizeof(ir->name), "cx231xx IR (%s)", dev->name);
-
- usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
- strlcat(ir->phys, "/input0", sizeof(ir->phys));
-
- input_dev->name = ir->name;
- input_dev->phys = ir->phys;
- input_dev->id.bustype = BUS_USB;
- input_dev->id.version = 1;
- input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
-
- input_dev->dev.parent = &dev->udev->dev;
- /* record handles to ourself */
- ir->dev = dev;
- dev->ir = ir;
-
- cx231xx_ir_start(ir);
-
- /* all done */
- err = __ir_input_register(ir->input, dev->board.ir_codes,
- NULL, MODULE_NAME);
- if (err)
- goto err_out_stop;
-
- return 0;
-err_out_stop:
- cx231xx_ir_stop(ir);
- dev->ir = NULL;
-err_out_free:
- kfree(ir);
- return err;
-}
-
-int cx231xx_ir_fini(struct cx231xx *dev)
-{
- struct cx231xx_IR *ir = dev->ir;
-
- /* skip detach on non attached boards */
- if (!ir)
- return 0;
-
- cx231xx_ir_stop(ir);
- ir_input_unregister(ir->input);
- kfree(ir);
-
- /* done */
- dev->ir = NULL;
- return 0;
-}
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 689c5e25776c..1d914488dbb3 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -102,7 +102,7 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
/* get buffer pointer and length */
p_buffer = urb->transfer_buffer;
@@ -180,7 +180,7 @@ vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- *size = (dev->width * height * 2);
+ *size = (dev->width * height * 2 * 2);
if (0 == *count)
*count = CX231XX_DEF_VBI_BUF;
@@ -209,8 +209,8 @@ static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
VIDEOBUF_ACTIVE, it won't be, though.
*/
spin_lock_irqsave(&dev->vbi_mode.slock, flags);
- if (dev->vbi_mode.isoc_ctl.buf == buf)
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ if (dev->vbi_mode.bulk_ctl.buf == buf)
+ dev->vbi_mode.bulk_ctl.buf = NULL;
spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
videobuf_vmalloc_free(&buf->vb);
@@ -230,7 +230,7 @@ vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- buf->vb.size = ((dev->width << 1) * height);
+ buf->vb.size = ((dev->width << 1) * height * 2);
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
@@ -246,7 +246,7 @@ vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
goto fail;
}
- if (!dev->vbi_mode.isoc_ctl.num_bufs)
+ if (!dev->vbi_mode.bulk_ctl.num_bufs)
urb_init = 1;
if (urb_init) {
@@ -328,7 +328,7 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->vbi_mode.slock);
- rc = dev->vbi_mode.isoc_ctl.isoc_copy(dev, urb);
+ rc = dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
spin_unlock(&dev->vbi_mode.slock);
/* Reset status */
@@ -351,34 +351,34 @@ void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_uninit_vbi_isoc\n");
- dev->vbi_mode.isoc_ctl.nfields = -1;
- for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
- urb = dev->vbi_mode.isoc_ctl.urb[i];
+ dev->vbi_mode.bulk_ctl.nfields = -1;
+ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
+ urb = dev->vbi_mode.bulk_ctl.urb[i];
if (urb) {
if (!irqs_disabled())
usb_kill_urb(urb);
else
usb_unlink_urb(urb);
- if (dev->vbi_mode.isoc_ctl.transfer_buffer[i]) {
+ if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
- kfree(dev->vbi_mode.isoc_ctl.
+ kfree(dev->vbi_mode.bulk_ctl.
transfer_buffer[i]);
- dev->vbi_mode.isoc_ctl.transfer_buffer[i] =
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
NULL;
}
usb_free_urb(urb);
- dev->vbi_mode.isoc_ctl.urb[i] = NULL;
+ dev->vbi_mode.bulk_ctl.urb[i] = NULL;
}
- dev->vbi_mode.isoc_ctl.transfer_buffer[i] = NULL;
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL;
}
- kfree(dev->vbi_mode.isoc_ctl.urb);
- kfree(dev->vbi_mode.isoc_ctl.transfer_buffer);
+ kfree(dev->vbi_mode.bulk_ctl.urb);
+ kfree(dev->vbi_mode.bulk_ctl.transfer_buffer);
- dev->vbi_mode.isoc_ctl.urb = NULL;
- dev->vbi_mode.isoc_ctl.transfer_buffer = NULL;
- dev->vbi_mode.isoc_ctl.num_bufs = 0;
+ dev->vbi_mode.bulk_ctl.urb = NULL;
+ dev->vbi_mode.bulk_ctl.transfer_buffer = NULL;
+ dev->vbi_mode.bulk_ctl.num_bufs = 0;
cx231xx_capture_start(dev, 0, Vbi);
}
@@ -389,7 +389,7 @@ EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc);
*/
int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct cx231xx *dev,
+ int (*bulk_copy) (struct cx231xx *dev,
struct urb *urb))
{
struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq;
@@ -408,8 +408,8 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
usb_rcvbulkpipe(dev->udev,
dev->vbi_mode.end_point_addr));
- dev->vbi_mode.isoc_ctl.isoc_copy = isoc_copy;
- dev->vbi_mode.isoc_ctl.num_bufs = num_bufs;
+ dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy;
+ dev->vbi_mode.bulk_ctl.num_bufs = num_bufs;
dma_q->pos = 0;
dma_q->is_partial_line = 0;
dma_q->last_sav = 0;
@@ -421,42 +421,42 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
for (i = 0; i < 8; i++)
dma_q->partial_buf[i] = 0;
- dev->vbi_mode.isoc_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
+ dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
GFP_KERNEL);
- if (!dev->vbi_mode.isoc_ctl.urb) {
+ if (!dev->vbi_mode.bulk_ctl.urb) {
cx231xx_errdev("cannot alloc memory for usb buffers\n");
return -ENOMEM;
}
- dev->vbi_mode.isoc_ctl.transfer_buffer =
+ dev->vbi_mode.bulk_ctl.transfer_buffer =
kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
- if (!dev->vbi_mode.isoc_ctl.transfer_buffer) {
+ if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
cx231xx_errdev("cannot allocate memory for usbtransfer\n");
- kfree(dev->vbi_mode.isoc_ctl.urb);
+ kfree(dev->vbi_mode.bulk_ctl.urb);
return -ENOMEM;
}
- dev->vbi_mode.isoc_ctl.max_pkt_size = max_pkt_size;
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size;
+ dev->vbi_mode.bulk_ctl.buf = NULL;
- sb_size = max_packets * dev->vbi_mode.isoc_ctl.max_pkt_size;
+ sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size;
/* allocate urbs and transfer buffers */
- for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
+ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
cx231xx_err(DRIVER_NAME
- ": cannot alloc isoc_ctl.urb %i\n", i);
+ ": cannot alloc bulk_ctl.urb %i\n", i);
cx231xx_uninit_vbi_isoc(dev);
return -ENOMEM;
}
- dev->vbi_mode.isoc_ctl.urb[i] = urb;
+ dev->vbi_mode.bulk_ctl.urb[i] = urb;
urb->transfer_flags = 0;
- dev->vbi_mode.isoc_ctl.transfer_buffer[i] =
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
kzalloc(sb_size, GFP_KERNEL);
- if (!dev->vbi_mode.isoc_ctl.transfer_buffer[i]) {
+ if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
cx231xx_err(DRIVER_NAME
": unable to allocate %i bytes for transfer"
" buffer %i%s\n", sb_size, i,
@@ -467,15 +467,15 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr);
usb_fill_bulk_urb(urb, dev->udev, pipe,
- dev->vbi_mode.isoc_ctl.transfer_buffer[i],
+ dev->vbi_mode.bulk_ctl.transfer_buffer[i],
sb_size, cx231xx_irq_vbi_callback, dma_q);
}
init_waitqueue_head(&dma_q->wq);
/* submit urbs and enables IRQ */
- for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
- rc = usb_submit_urb(dev->vbi_mode.isoc_ctl.urb[i], GFP_ATOMIC);
+ for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
+ rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC);
if (rc) {
cx231xx_err(DRIVER_NAME
": submit of urb %i failed (error=%i)\n", i,
@@ -536,7 +536,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
buf->vb.field_count++;
do_gettimeofday(&buf->vb.ts);
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ dev->vbi_mode.bulk_ctl.buf = NULL;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
@@ -549,11 +549,16 @@ u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
struct cx231xx_buffer *buf;
u32 _line_size = dev->width * 2;
- if (dma_q->current_field != field_number)
+ if (dma_q->current_field == -1) {
+ /* Just starting up */
cx231xx_reset_vbi_buffer(dev, dma_q);
+ }
+
+ if (dma_q->current_field != field_number)
+ dma_q->lines_completed = 0;
/* get the buffer pointer */
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
/* Remember the field number for next time */
dma_q->current_field = field_number;
@@ -597,8 +602,8 @@ u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
vbi_buffer_filled(dev, dma_q, buf);
dma_q->pos = 0;
- buf = NULL;
dma_q->lines_completed = 0;
+ cx231xx_reset_vbi_buffer(dev, dma_q);
}
}
@@ -618,7 +623,7 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
if (list_empty(&dma_q->active)) {
cx231xx_err(DRIVER_NAME ": No active queue to serve\n");
- dev->vbi_mode.isoc_ctl.buf = NULL;
+ dev->vbi_mode.bulk_ctl.buf = NULL;
*buf = NULL;
return;
}
@@ -630,7 +635,7 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
outp = videobuf_to_vmalloc(&(*buf)->vb);
memset(outp, 0, (*buf)->vb.size);
- dev->vbi_mode.isoc_ctl.buf = *buf;
+ dev->vbi_mode.bulk_ctl.buf = *buf;
return;
}
@@ -640,7 +645,7 @@ void cx231xx_reset_vbi_buffer(struct cx231xx *dev,
{
struct cx231xx_buffer *buf;
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
if (buf == NULL) {
/* first try to get the buffer */
@@ -664,7 +669,7 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
void *startwrite;
int offset, lencopy;
- buf = dev->vbi_mode.isoc_ctl.buf;
+ buf = dev->vbi_mode.bulk_ctl.buf;
if (buf == NULL)
return -EINVAL;
@@ -679,6 +684,11 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
offset = (dma_q->lines_completed * _line_size) +
current_line_bytes_copied;
+ if (dma_q->current_field == 2) {
+ /* Populate the second half of the frame */
+ offset += (dev->width * 2 * dma_q->lines_per_field);
+ }
+
/* prepare destination address */
startwrite = p_out_buffer + offset;
@@ -697,5 +707,8 @@ u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev,
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- return (dma_q->lines_completed == height) ? 1 : 0;
+ if (dma_q->lines_completed == height && dma_q->current_field == 2)
+ return 1;
+ else
+ return 0;
}
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.h b/drivers/media/video/cx231xx/cx231xx-vbi.h
index 89c7fe80b261..16c7d20a22a4 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.h
@@ -41,7 +41,7 @@ extern struct videobuf_queue_ops cx231xx_vbi_qops;
/* stream functions */
int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct cx231xx *dev,
+ int (*bulk_copy) (struct cx231xx *dev,
struct urb *urb));
void cx231xx_uninit_vbi_isoc(struct cx231xx *dev);
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index e76014561aa7..b13b69fb2af6 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -237,7 +237,10 @@ static inline void buffer_filled(struct cx231xx *dev,
buf->vb.field_count++;
do_gettimeofday(&buf->vb.ts);
- dev->video_mode.isoc_ctl.buf = NULL;
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
@@ -295,7 +298,10 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
if (list_empty(&dma_q->active)) {
cx231xx_isocdbg("No active queue to serve\n");
- dev->video_mode.isoc_ctl.buf = NULL;
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
*buf = NULL;
return;
}
@@ -307,7 +313,10 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
outp = videobuf_to_vmalloc(&(*buf)->vb);
memset(outp, 0, (*buf)->vb.size);
- dev->video_mode.isoc_ctl.buf = *buf;
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = *buf;
+ else
+ dev->video_mode.bulk_ctl.buf = *buf;
return;
}
@@ -418,6 +427,93 @@ static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
return rc;
}
+static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
+{
+ struct cx231xx_buffer *buf;
+ struct cx231xx_dmaqueue *dma_q = urb->context;
+ unsigned char *outp = NULL;
+ int rc = 1;
+ unsigned char *p_buffer;
+ u32 bytes_parsed = 0, buffer_size = 0;
+ u8 sav_eav = 0;
+
+ if (!dev)
+ return 0;
+
+ if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ return 0;
+
+ if (urb->status < 0) {
+ print_err_status(dev, -1, urb->status);
+ if (urb->status == -ENOENT)
+ return 0;
+ }
+
+ buf = dev->video_mode.bulk_ctl.buf;
+ if (buf != NULL)
+ outp = videobuf_to_vmalloc(&buf->vb);
+
+ if (1) {
+
+ /* get buffer pointer and length */
+ p_buffer = urb->transfer_buffer;
+ buffer_size = urb->actual_length;
+ bytes_parsed = 0;
+
+ if (dma_q->is_partial_line) {
+ /* Handle the case of a partial line */
+ sav_eav = dma_q->last_sav;
+ } else {
+ /* Check for a SAV/EAV overlapping
+ the buffer boundary */
+ sav_eav =
+ cx231xx_find_boundary_SAV_EAV(p_buffer,
+ dma_q->partial_buf,
+ &bytes_parsed);
+ }
+
+ sav_eav &= 0xF0;
+ /* Get the first line if we have some portion of an SAV/EAV from
+ the last buffer or a partial line */
+ if (sav_eav) {
+ bytes_parsed += cx231xx_get_video_line(dev, dma_q,
+ sav_eav, /* SAV/EAV */
+ p_buffer + bytes_parsed, /* p_buffer */
+ buffer_size - bytes_parsed);/* buf size */
+ }
+
+ /* Now parse data that is completely in this buffer */
+ /* dma_q->is_partial_line = 0; */
+
+ while (bytes_parsed < buffer_size) {
+ u32 bytes_used = 0;
+
+ sav_eav = cx231xx_find_next_SAV_EAV(
+ p_buffer + bytes_parsed, /* p_buffer */
+ buffer_size - bytes_parsed, /* buf size */
+ &bytes_used);/* bytes used to get SAV/EAV */
+
+ bytes_parsed += bytes_used;
+
+ sav_eav &= 0xF0;
+ if (sav_eav && (bytes_parsed < buffer_size)) {
+ bytes_parsed += cx231xx_get_video_line(dev,
+ dma_q, sav_eav, /* SAV/EAV */
+ p_buffer + bytes_parsed,/* p_buffer */
+ buffer_size - bytes_parsed);/*buf size*/
+ }
+ }
+
+ /* Save the last four bytes of the buffer so we can check the
+ buffer boundary condition next time */
+ memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
+ bytes_parsed = 0;
+
+ }
+ return rc;
+}
+
+
u8 cx231xx_find_boundary_SAV_EAV(u8 *p_buffer, u8 *partial_buf,
u32 *p_bytes_used)
{
@@ -533,7 +629,10 @@ u32 cx231xx_copy_video_line(struct cx231xx *dev,
cx231xx_reset_video_buffer(dev, dma_q);
/* get the buffer pointer */
- buf = dev->video_mode.isoc_ctl.buf;
+ if (dev->USE_ISO)
+ buf = dev->video_mode.isoc_ctl.buf;
+ else
+ buf = dev->video_mode.bulk_ctl.buf;
/* Remember the field number for next time */
dma_q->current_field = field_number;
@@ -596,7 +695,10 @@ void cx231xx_reset_video_buffer(struct cx231xx *dev,
dma_q->field1_done = 0;
}
- buf = dev->video_mode.isoc_ctl.buf;
+ if (dev->USE_ISO)
+ buf = dev->video_mode.isoc_ctl.buf;
+ else
+ buf = dev->video_mode.bulk_ctl.buf;
if (buf == NULL) {
u8 *outp = NULL;
@@ -626,7 +728,10 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
void *startwrite;
int offset, lencopy;
- buf = dev->video_mode.isoc_ctl.buf;
+ if (dev->USE_ISO)
+ buf = dev->video_mode.isoc_ctl.buf;
+ else
+ buf = dev->video_mode.bulk_ctl.buf;
if (buf == NULL)
return -1;
@@ -691,7 +796,6 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
{
struct cx231xx_fh *fh = vq->priv_data;
struct cx231xx *dev = fh->dev;
- struct v4l2_frequency f;
*size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)>>3;
if (0 == *count)
@@ -700,13 +804,6 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
if (*count < CX231XX_MIN_BUF)
*count = CX231XX_MIN_BUF;
- /* Ask tuner to go to analog mode */
- memset(&f, 0, sizeof(f));
- f.frequency = dev->ctl_freq;
- f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
-
- call_all(dev, tuner, s_frequency, &f);
-
return 0;
}
@@ -730,8 +827,13 @@ static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
VIDEOBUF_ACTIVE, it won't be, though.
*/
spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
+ if (dev->USE_ISO) {
+ if (dev->video_mode.isoc_ctl.buf == buf)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ } else {
+ if (dev->video_mode.bulk_ctl.buf == buf)
+ dev->video_mode.bulk_ctl.buf = NULL;
+ }
spin_unlock_irqrestore(&dev->video_mode.slock, flags);
videobuf_vmalloc_free(&buf->vb);
@@ -764,14 +866,27 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
goto fail;
}
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
-
+ if (dev->USE_ISO) {
+ if (!dev->video_mode.isoc_ctl.num_bufs)
+ urb_init = 1;
+ } else {
+ if (!dev->video_mode.bulk_ctl.num_bufs)
+ urb_init = 1;
+ }
+ /*cx231xx_info("urb_init=%d dev->video_mode.max_pkt_size=%d\n",
+ urb_init, dev->video_mode.max_pkt_size);*/
if (urb_init) {
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ dev->mode_tv = 0;
+ if (dev->USE_ISO)
+ rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
CX231XX_NUM_BUFS,
dev->video_mode.max_pkt_size,
cx231xx_isoc_copy);
+ else
+ rc = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_bulk_copy);
if (rc < 0)
goto fail;
}
@@ -894,22 +1009,6 @@ static int check_dev(struct cx231xx *dev)
return 0;
}
-static void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale)
-{
- unsigned int maxw = norm_maxw(dev);
- unsigned int maxh = norm_maxh(dev);
-
- *hscale = (((unsigned long)maxw) << 12) / width - 4096L;
- if (*hscale >= 0x4000)
- *hscale = 0x3fff;
-
- *vscale = (((unsigned long)maxh) << 12) / height - 4096L;
- if (*vscale >= 0x4000)
- *vscale = 0x3fff;
-}
-
/* ------------------------------------------------------------------
IOCTL vidioc handling
------------------------------------------------------------------*/
@@ -920,8 +1019,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- mutex_lock(&dev->lock);
-
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.pixelformat = dev->format->fourcc;
@@ -931,8 +1028,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -956,7 +1051,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
- unsigned int hscale, vscale;
struct cx231xx_fmt *fmt;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -970,11 +1064,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
height must be even because of interlacing */
v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
- get_scale(dev, width, height, &hscale, &vscale);
-
- width = (((unsigned long)maxw) << 12) / (hscale + 4096L);
- height = (((unsigned long)maxh) << 12) / (vscale + 4096L);
-
f->fmt.pix.width = width;
f->fmt.pix.height = height;
f->fmt.pix.pixelformat = fmt->fourcc;
@@ -999,47 +1088,35 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
vidioc_try_fmt_vid_cap(file, priv, f);
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt) {
- rc = -EINVAL;
- goto out;
- }
+ if (!fmt)
+ return -EINVAL;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
cx231xx_errdev("%s queue busy\n", __func__);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (dev->stream_on && !fh->stream_on) {
cx231xx_errdev("%s device in use by another fh\n", __func__);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
/* set new image size */
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
dev->format = fmt;
- get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
call_all(dev, video, s_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
- /* Set the correct alternate setting for this resolution */
- cx231xx_resolution_set(dev);
-
-out:
- mutex_unlock(&dev->lock);
return rc;
}
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id * id)
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
@@ -1052,6 +1129,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
+ struct v4l2_mbus_framefmt mbus_fmt;
struct v4l2_format f;
int rc;
@@ -1061,7 +1139,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
cx231xx_info("vidioc_s_std : 0x%x\n", (unsigned int)*norm);
- mutex_lock(&dev->lock);
dev->norm = *norm;
/* Adjusts width/height, if needed */
@@ -1069,16 +1146,18 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
f.fmt.pix.height = dev->height;
vidioc_try_fmt_vid_cap(file, priv, &f);
- /* set new image size */
- dev->width = f.fmt.pix.width;
- dev->height = f.fmt.pix.height;
- get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
-
call_all(dev, core, s_std, dev->norm);
- mutex_unlock(&dev->lock);
+ /* We need to reset basic properties in the decoder related to
+ resolution (since a standard change effects things like the number
+ of lines in VACT, etc) */
+ v4l2_fill_mbus_format(&mbus_fmt, &f.fmt.pix, V4L2_MBUS_FMT_FIXED);
+ call_all(dev, video, s_mbus_fmt, &mbus_fmt);
+ v4l2_fill_pix_format(&f.fmt.pix, &mbus_fmt);
- cx231xx_resolution_set(dev);
+ /* set new image size */
+ dev->width = f.fmt.pix.width;
+ dev->height = f.fmt.pix.height;
/* do mode control overrides */
cx231xx_do_mode_ctrl_overrides(dev);
@@ -1138,6 +1217,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
struct cx231xx *dev = fh->dev;
int rc;
+ dev->mode_tv = 0;
rc = check_dev(dev);
if (rc < 0)
return rc;
@@ -1147,11 +1227,16 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
if (0 == INPUT(i)->type)
return -EINVAL;
- mutex_lock(&dev->lock);
-
video_mux(dev, i);
- mutex_unlock(&dev->lock);
+ if (INPUT(i)->type == CX231XX_VMUX_TELEVISION ||
+ INPUT(i)->type == CX231XX_VMUX_CABLE) {
+ /* There's a tuner, so reset the standard and put it on the
+ last known frequency (since it was probably powered down
+ until now */
+ call_all(dev, core, s_std, dev->norm);
+ }
+
return 0;
}
@@ -1227,9 +1312,7 @@ static int vidioc_queryctrl(struct file *file, void *priv,
}
*qc = cx231xx_ctls[i].v;
- mutex_lock(&dev->lock);
call_all(dev, core, queryctrl, qc);
- mutex_unlock(&dev->lock);
if (qc->type)
return 0;
@@ -1248,9 +1331,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
call_all(dev, core, g_ctrl, ctrl);
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1265,9 +1346,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
call_all(dev, core, s_ctrl, ctrl);
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1307,9 +1386,7 @@ static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (0 != t->index)
return -EINVAL;
#if 0
- mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
#endif
return 0;
}
@@ -1320,14 +1397,11 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- mutex_lock(&dev->lock);
f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->ctl_freq;
call_all(dev, tuner, g_frequency, f);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1337,6 +1411,11 @@ static int vidioc_s_frequency(struct file *file, void *priv,
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
+ u32 if_frequency = 5400000;
+
+ cx231xx_info("Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
+ f->frequency, f->type);
+ /*cx231xx_info("f->type: 1-radio 2-analogTV 3-digitalTV\n");*/
rc = check_dev(dev);
if (rc < 0)
@@ -1353,21 +1432,34 @@ static int vidioc_s_frequency(struct file *file, void *priv,
/* set pre channel change settings in DIF first */
rc = cx231xx_tuner_pre_channel_change(dev);
- mutex_lock(&dev->lock);
-
dev->ctl_freq = f->frequency;
-
- if (dev->tuner_type == TUNER_XC5000) {
- if (dev->cx231xx_set_analog_freq != NULL)
- dev->cx231xx_set_analog_freq(dev, f->frequency);
- } else
- call_all(dev, tuner, s_frequency, f);
-
- mutex_unlock(&dev->lock);
+ call_all(dev, tuner, s_frequency, f);
/* set post channel change settings in DIF first */
rc = cx231xx_tuner_post_channel_change(dev);
+ if (dev->tuner_type == TUNER_NXP_TDA18271) {
+ if (dev->norm & (V4L2_STD_MN | V4L2_STD_NTSC_443))
+ if_frequency = 5400000; /*5.4MHz */
+ else if (dev->norm & V4L2_STD_B)
+ if_frequency = 6000000; /*6.0MHz */
+ else if (dev->norm & (V4L2_STD_PAL_DK | V4L2_STD_SECAM_DK))
+ if_frequency = 6900000; /*6.9MHz */
+ else if (dev->norm & V4L2_STD_GH)
+ if_frequency = 7100000; /*7.1MHz */
+ else if (dev->norm & V4L2_STD_PAL_I)
+ if_frequency = 7250000; /*7.25MHz */
+ else if (dev->norm & V4L2_STD_SECAM_L)
+ if_frequency = 6900000; /*6.9MHz */
+ else if (dev->norm & V4L2_STD_SECAM_LC)
+ if_frequency = 1250000; /*1.25MHz */
+
+ cx231xx_info("if_frequency is set to %d\n", if_frequency);
+ cx231xx_set_Colibri_For_LowIF(dev, if_frequency, 1, 1);
+
+ update_HH_register_after_set_DIF(dev);
+ }
+
cx231xx_info("Set New FREQUENCY to %d\n", f->frequency);
return rc;
@@ -1445,17 +1537,92 @@ static int vidioc_g_register(struct file *file, void *priv,
case V4L2_CHIP_MATCH_I2C_DRIVER:
call_all(dev, core, g_register, reg);
return 0;
- case V4L2_CHIP_MATCH_I2C_ADDR:
- /* Not supported yet */
- return -EINVAL;
+ case V4L2_CHIP_MATCH_I2C_ADDR:/*for register debug*/
+ switch (reg->match.addr) {
+ case 0: /* Cx231xx - internal registers */
+ ret = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
+ (u16)reg->reg, value, 4);
+ reg->val = value[0] | value[1] << 8 |
+ value[2] << 16 | value[3] << 24;
+
+ break;
+ case 0x600:/* AFE - read byte */
+ ret = cx231xx_read_i2c_master(dev, AFE_DEVICE_ADDRESS,
+ (u16)reg->reg, 2,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+
+ case 0x880:/* Video Block - read byte */
+ if (reg->reg < 0x0b) {
+ ret = cx231xx_read_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ } else {
+ ret = cx231xx_read_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ &data, 4 , 0);
+ reg->val = le32_to_cpu(data);
+ }
+ break;
+ case 0x980:
+ ret = cx231xx_read_i2c_master(dev,
+ I2S_BLK_DEVICE_ADDRESS,
+ (u16)reg->reg, 1,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+ case 0x400:
+ ret =
+ cx231xx_read_i2c_master(dev, 0x40,
+ (u16)reg->reg, 1,
+ &data, 1 , 0);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+ case 0xc01:
+ ret =
+ cx231xx_read_i2c_master(dev, 0xc0,
+ (u16)reg->reg, 2,
+ &data, 38, 1);
+ reg->val = le32_to_cpu(data);
+ break;
+ case 0x022:
+ ret =
+ cx231xx_read_i2c_master(dev, 0x02,
+ (u16)reg->reg, 1,
+ &data, 1, 2);
+ reg->val = le32_to_cpu(data & 0xff);
+ break;
+ case 0x322:
+ ret = cx231xx_read_i2c_master(dev,
+ 0x32,
+ (u16)reg->reg, 1,
+ &data, 4 , 2);
+ reg->val = le32_to_cpu(data);
+ break;
+ case 0x342:
+ ret = cx231xx_read_i2c_master(dev,
+ 0x34,
+ (u16)reg->reg, 1,
+ &data, 4 , 2);
+ reg->val = le32_to_cpu(data);
+ break;
+
+ default:
+ cx231xx_info("no match device address!!\n");
+ break;
+ }
+ return ret < 0 ? ret : 0;
+ /*return -EINVAL;*/
default:
if (!v4l2_chip_match_host(&reg->match))
return -EINVAL;
}
- mutex_lock(&dev->lock);
call_all(dev, core, g_register, reg);
- mutex_unlock(&dev->lock);
return ret;
}
@@ -1531,14 +1698,96 @@ static int vidioc_s_register(struct file *file, void *priv,
}
}
return ret < 0 ? ret : 0;
+ case V4L2_CHIP_MATCH_I2C_ADDR:
+ {
+ value = (u32) buf & 0xffffffff;
+
+ switch (reg->match.addr) {
+ case 0:/*cx231xx internal registers*/
+ data[0] = (u8) value;
+ data[1] = (u8) (value >> 8);
+ data[2] = (u8) (value >> 16);
+ data[3] = (u8) (value >> 24);
+ ret = cx231xx_write_ctrl_reg(dev,
+ VRT_SET_REGISTER,
+ (u16)reg->reg, data,
+ 4);
+ break;
+ case 0x600:/* AFE - read byte */
+ ret = cx231xx_write_i2c_master(dev,
+ AFE_DEVICE_ADDRESS,
+ (u16)reg->reg, 2,
+ value, 1 , 0);
+ break;
+ case 0x880:/* Video Block - read byte */
+ if (reg->reg < 0x0b)
+ cx231xx_write_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ value, 1, 0);
+ else
+ cx231xx_write_i2c_master(dev,
+ VID_BLK_I2C_ADDRESS,
+ (u16)reg->reg, 2,
+ value, 4, 0);
+ break;
+ case 0x980:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ I2S_BLK_DEVICE_ADDRESS,
+ (u16)reg->reg, 1,
+ value, 1, 0);
+ break;
+ case 0x400:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x40,
+ (u16)reg->reg, 1,
+ value, 1, 0);
+ break;
+ case 0xc01:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0xc0,
+ (u16)reg->reg, 1,
+ value, 1, 1);
+ break;
+
+ case 0x022:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x02,
+ (u16)reg->reg, 1,
+ value, 1, 2);
+ case 0x322:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x32,
+ (u16)reg->reg, 1,
+ value, 4, 2);
+ break;
+
+ case 0x342:
+ ret =
+ cx231xx_write_i2c_master(dev,
+ 0x34,
+ (u16)reg->reg, 1,
+ value, 4, 2);
+ break;
+ default:
+ cx231xx_info("no match device address, "
+ "the value is %x\n", reg->match.addr);
+ break;
+
+ }
+
+ }
default:
break;
}
- mutex_lock(&dev->lock);
call_all(dev, core, s_register, reg);
- mutex_unlock(&dev->lock);
return ret;
}
@@ -1575,7 +1824,6 @@ static int vidioc_streamon(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
rc = res_get(fh);
if (likely(rc >= 0))
@@ -1583,8 +1831,6 @@ static int vidioc_streamon(struct file *file, void *priv,
call_all(dev, video, s_stream, 1);
- mutex_unlock(&dev->lock);
-
return rc;
}
@@ -1605,15 +1851,11 @@ static int vidioc_streamoff(struct file *file, void *priv,
if (type != fh->type)
return -EINVAL;
- mutex_lock(&dev->lock);
-
cx25840_call(dev, video, s_stream, 0);
videobuf_streamoff(&fh->vb_vidq);
res_free(fh);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1668,8 +1910,6 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
f->fmt.sliced.service_set = 0;
call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
@@ -1677,7 +1917,6 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1692,9 +1931,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
- mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
return -EINVAL;
@@ -1709,12 +1946,10 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
-
- f->fmt.vbi.sampling_rate = (dev->norm & V4L2_STD_625_50) ?
- 35468950 : 28636363;
+ f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
- f->fmt.vbi.offset = 64 * 4;
+ f->fmt.vbi.offset = 0;
f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE : NTSC_VBI_START_LINE;
f->fmt.vbi.count[0] = (dev->norm & V4L2_STD_625_50) ?
@@ -1739,11 +1974,10 @@ static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
}
f->type = V4L2_BUF_TYPE_VBI_CAPTURE;
- f->fmt.vbi.sampling_rate = (dev->norm & V4L2_STD_625_50) ?
- 35468950 : 28636363;
+ f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
- f->fmt.vbi.offset = 244;
+ f->fmt.vbi.offset = 0;
f->fmt.vbi.flags = 0;
f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE : NTSC_VBI_START_LINE;
@@ -1847,9 +2081,7 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
- mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1880,9 +2112,7 @@ static int radio_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (0 != t->index)
return -EINVAL;
- mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1941,8 +2171,6 @@ static int cx231xx_v4l2_open(struct file *filp)
break;
}
- mutex_lock(&dev->lock);
-
cx231xx_videodbg("open dev=%s type=%s users=%d\n",
video_device_node_name(vdev), v4l2_type_names[fh_type],
dev->users);
@@ -1952,7 +2180,6 @@ static int cx231xx_v4l2_open(struct file *filp)
if (errCode < 0) {
cx231xx_errdev
("Device locked on digital mode. Can't open analog\n");
- mutex_unlock(&dev->lock);
return -EBUSY;
}
#endif
@@ -1960,7 +2187,6 @@ static int cx231xx_v4l2_open(struct file *filp)
fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
if (!fh) {
cx231xx_errdev("cx231xx-video.c: Out of memory?!\n");
- mutex_unlock(&dev->lock);
return -ENOMEM;
}
fh->dev = dev;
@@ -1971,16 +2197,18 @@ static int cx231xx_v4l2_open(struct file *filp)
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
dev->width = norm_maxw(dev);
dev->height = norm_maxh(dev);
- dev->hscale = 0;
- dev->vscale = 0;
/* Power up in Analog TV mode */
- cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER ||
+ dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ cx231xx_set_power_mode(dev,
+ POLARIS_AVMODE_ENXTERNAL_AV);
+ else
+ cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
#if 0
cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
#endif
- cx231xx_resolution_set(dev);
/* set video alternate setting */
cx231xx_set_video_alternate(dev);
@@ -1991,7 +2219,6 @@ static int cx231xx_v4l2_open(struct file *filp)
/* device needs to be initialized before isoc transfer */
dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
- video_mux(dev, dev->video_input);
}
if (fh->radio) {
@@ -2008,20 +2235,22 @@ static int cx231xx_v4l2_open(struct file *filp)
videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_video_qops,
NULL, &dev->video_mode.slock,
fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer), fh);
+ sizeof(struct cx231xx_buffer),
+ fh, &dev->lock);
if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
/* Set the required alternate setting VBI interface works in
Bulk mode only */
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ if (dev->model != CX231XX_BOARD_CNXT_VIDEO_GRABBER &&
+ dev->model != CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_vbi_qops,
NULL, &dev->vbi_mode.slock,
fh->type, V4L2_FIELD_SEQ_TB,
- sizeof(struct cx231xx_buffer), fh);
+ sizeof(struct cx231xx_buffer),
+ fh, &dev->lock);
}
- mutex_unlock(&dev->lock);
-
return errCode;
}
@@ -2054,6 +2283,10 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
if (dev->vdev) {
cx231xx_info("V4L2 device %s deregistered\n",
video_device_node_name(dev->vdev));
+
+ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER)
+ cx231xx_417_unregister(dev);
+
if (video_is_registered(dev->vdev))
video_unregister_device(dev->vdev);
else
@@ -2074,39 +2307,44 @@ static int cx231xx_v4l2_close(struct file *filp)
cx231xx_videodbg("users=%d\n", dev->users);
- mutex_lock(&dev->lock);
-
+ cx231xx_videodbg("users=%d\n", dev->users);
if (res_check(fh))
res_free(fh);
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- cx231xx_release_resources(dev);
- mutex_unlock(&dev->lock);
- kfree(dev);
- return 0;
- }
+ /*To workaround error number=-71 on EP0 for VideoGrabber,
+ need exclude following.*/
+ if (dev->model != CX231XX_BOARD_CNXT_VIDEO_GRABBER &&
+ dev->model != CX231XX_BOARD_HAUPPAUGE_USBLIVE2)
+ if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ videobuf_stop(&fh->vb_vidq);
+ videobuf_mmap_free(&fh->vb_vidq);
+
+ /* the device is already disconnect,
+ free the remaining resources */
+ if (dev->state & DEV_DISCONNECTED) {
+ if (atomic_read(&dev->devlist_count) > 0) {
+ cx231xx_release_resources(dev);
+ kfree(dev);
+ dev = NULL;
+ return 0;
+ }
+ return 0;
+ }
- /* do this before setting alternate! */
- cx231xx_uninit_vbi_isoc(dev);
+ /* do this before setting alternate! */
+ cx231xx_uninit_vbi_isoc(dev);
- /* set alternate 0 */
- if (!dev->vbi_or_sliced_cc_mode)
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
- else
- cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
+ /* set alternate 0 */
+ if (!dev->vbi_or_sliced_cc_mode)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ else
+ cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
- kfree(fh);
- dev->users--;
- wake_up_interruptible_nr(&dev->open, 1);
- mutex_unlock(&dev->lock);
- return 0;
- }
+ kfree(fh);
+ dev->users--;
+ wake_up_interruptible_nr(&dev->open, 1);
+ return 0;
+ }
if (dev->users == 1) {
videobuf_stop(&fh->vb_vidq);
@@ -2116,8 +2354,8 @@ static int cx231xx_v4l2_close(struct file *filp)
free the remaining resources */
if (dev->state & DEV_DISCONNECTED) {
cx231xx_release_resources(dev);
- mutex_unlock(&dev->lock);
kfree(dev);
+ dev = NULL;
return 0;
}
@@ -2125,7 +2363,10 @@ static int cx231xx_v4l2_close(struct file *filp)
call_all(dev, core, s_power, 0);
/* do this before setting alternate! */
- cx231xx_uninit_isoc(dev);
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
/* set alternate 0 */
@@ -2134,7 +2375,6 @@ static int cx231xx_v4l2_close(struct file *filp)
kfree(fh);
dev->users--;
wake_up_interruptible_nr(&dev->open, 1);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -2156,9 +2396,7 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
if ((fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
(fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)) {
- mutex_lock(&dev->lock);
rc = res_get(fh);
- mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return rc;
@@ -2173,7 +2411,7 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
* cx231xx_v4l2_poll()
* will allocate buffers when called for the first time
*/
-static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table * wait)
+static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
{
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
@@ -2183,9 +2421,7 @@ static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table * wait)
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
rc = res_get(fh);
- mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return POLLERR;
@@ -2210,9 +2446,7 @@ static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
rc = res_get(fh);
- mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return rc;
@@ -2234,7 +2468,7 @@ static const struct v4l2_file_operations cx231xx_v4l_fops = {
.read = cx231xx_v4l2_read,
.poll = cx231xx_v4l2_poll,
.mmap = cx231xx_v4l2_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
@@ -2336,6 +2570,7 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
+ vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
@@ -2358,12 +2593,12 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev->width = norm_maxw(dev);
dev->height = norm_maxh(dev);
dev->interlaced = 0;
- dev->hscale = 0;
- dev->vscale = 0;
/* Analog specific initialization */
dev->format = &format[0];
- /* video_mux(dev, dev->video_input); */
+
+ /* Set the initial input */
+ video_mux(dev, dev->video_input);
/* Audio defaults */
dev->mute = 1;
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 38d417191a65..d067df9b81e7 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -27,16 +27,15 @@
#include <linux/ioctl.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <media/cx2341x.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/ir-core.h>
-#if defined(CONFIG_VIDEO_CX231XX_DVB) || \
- defined(CONFIG_VIDEO_CX231XX_DVB_MODULE)
#include <media/videobuf-dvb.h>
-#endif
#include "cx231xx-reg.h"
#include "cx231xx-pcb-cfg.h"
@@ -49,12 +48,20 @@
#define AFE_DEVICE_ADDRESS 0x60
#define I2S_BLK_DEVICE_ADDRESS 0x98
#define VID_BLK_I2C_ADDRESS 0x88
+#define VERVE_I2C_ADDRESS 0x40
#define DIF_USE_BASEBAND 0xFFFFFFFF
/* Boards supported by driver */
#define CX231XX_BOARD_UNKNOWN 0
-#define CX231XX_BOARD_CNXT_RDE_250 1
-#define CX231XX_BOARD_CNXT_RDU_250 2
+#define CX231XX_BOARD_CNXT_CARRAERA 1
+#define CX231XX_BOARD_CNXT_SHELBY 2
+#define CX231XX_BOARD_CNXT_RDE_253S 3
+#define CX231XX_BOARD_CNXT_RDU_253S 4
+#define CX231XX_BOARD_CNXT_VIDEO_GRABBER 5
+#define CX231XX_BOARD_CNXT_RDE_250 6
+#define CX231XX_BOARD_CNXT_RDU_250 7
+#define CX231XX_BOARD_HAUPPAUGE_EXETER 8
+#define CX231XX_BOARD_HAUPPAUGE_USBLIVE2 9
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
@@ -95,6 +102,24 @@
#define CX231XX_URB_TIMEOUT \
msecs_to_jiffies(CX231XX_NUM_BUFS * CX231XX_NUM_PACKETS)
+#define CX231xx_NORMS (\
+ V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_443 | \
+ V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \
+ V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \
+ V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK)
+#define CX231xx_VERSION_CODE KERNEL_VERSION(0, 0, 2)
+
+#define SLEEP_S5H1432 30
+#define CX23417_OSC_EN 8
+#define CX23417_RESET 9
+
+struct cx23417_fmt {
+ char *name;
+ u32 fourcc; /* v4l2 format id */
+ int depth;
+ int flags;
+ u32 cxformat;
+};
enum cx231xx_mode {
CX231XX_SUSPEND,
CX231XX_ANALOG_MODE,
@@ -114,7 +139,7 @@ enum cx231xx_stream_state {
struct cx231xx;
-struct cx231xx_usb_isoc_ctl {
+struct cx231xx_isoc_ctl {
/* max packet size of isoc transaction */
int max_pkt_size;
@@ -148,6 +173,40 @@ struct cx231xx_usb_isoc_ctl {
int (*isoc_copy) (struct cx231xx *dev, struct urb *urb);
};
+struct cx231xx_bulk_ctl {
+ /* max packet size of bulk transaction */
+ int max_pkt_size;
+
+ /* number of allocated urbs */
+ int num_bufs;
+
+ /* urb for bulk transfers */
+ struct urb **urb;
+
+ /* transfer buffers for bulk transfer */
+ char **transfer_buffer;
+
+ /* Last buffer command and region */
+ u8 cmd;
+ int pos, size, pktsize;
+
+ /* Last field: ODD or EVEN? */
+ int field;
+
+ /* Stores incomplete commands */
+ u32 tmp_buf;
+ int tmp_buf_len;
+
+ /* Stores already requested buffers */
+ struct cx231xx_buffer *buf;
+
+ /* Stores the number of received fields */
+ int nfields;
+
+ /* bulk urb callback */
+ int (*bulk_copy) (struct cx231xx *dev, struct urb *urb);
+};
+
struct cx231xx_fmt {
char *name;
u32 fourcc; /* v4l2 format id */
@@ -165,6 +224,11 @@ struct cx231xx_buffer {
int receiving;
};
+enum ps_package_head {
+ CX231XX_NEED_ADD_PS_PACKAGE_HEAD = 0,
+ CX231XX_NONEED_PS_PACKAGE_HEAD
+};
+
struct cx231xx_dmaqueue {
struct list_head active;
struct list_head queued;
@@ -181,6 +245,14 @@ struct cx231xx_dmaqueue {
u32 lines_completed;
u8 field1_done;
u32 lines_per_field;
+
+ /*Mpeg2 control buffer*/
+ u8 *p_left_data;
+ u32 left_data_count;
+ u8 mpeg_buffer_done;
+ u32 mpeg_buffer_completed;
+ enum ps_package_head add_ps_package_head;
+ char ps_head[10];
};
/* inputs */
@@ -259,9 +331,10 @@ struct cx231xx_board {
struct cx231xx_reg_seq *dvb_gpio;
struct cx231xx_reg_seq *suspend_gpio;
struct cx231xx_reg_seq *tuner_gpio;
- u8 tuner_sif_gpio;
- u8 tuner_scl_gpio;
- u8 tuner_sda_gpio;
+ /* Negative means don't use it */
+ s8 tuner_sif_gpio;
+ s8 tuner_scl_gpio;
+ s8 tuner_sda_gpio;
/* PIN ctrl */
u32 ctl_pin_status_mask;
@@ -279,6 +352,7 @@ struct cx231xx_board {
unsigned char xclk, i2c_speed;
enum cx231xx_decoder decoder;
+ int output_mode;
struct cx231xx_input input[MAX_CX231XX_INPUT];
struct cx231xx_input radio;
@@ -309,10 +383,8 @@ enum AUDIO_INPUT {
};
#define CX231XX_AUDIO_BUFS 5
-#define CX231XX_NUM_AUDIO_PACKETS 64
-#define CX231XX_CAPTURE_STREAM_EN 1
-#define CX231XX_STOP_AUDIO 0
-#define CX231XX_START_AUDIO 1
+#define CX231XX_NUM_AUDIO_PACKETS 16
+#define CX231XX_ISO_NUM_AUDIO_PACKETS 64
/* cx231xx extensions */
#define CX231XX_AUDIO 0x10
@@ -330,7 +402,7 @@ struct cx231xx_audio {
struct snd_card *sndcard;
int users, shutdown;
- enum cx231xx_stream_state capture_stream;
+ /* locks */
spinlock_t slock;
int alt; /* alternate */
@@ -350,6 +422,28 @@ struct cx231xx_fh {
struct videobuf_queue vb_vidq;
enum v4l2_buf_type type;
+
+
+
+/*following is copyed from cx23885.h*/
+ u32 resources;
+
+ /* video overlay */
+ struct v4l2_window win;
+ struct v4l2_clip *clips;
+ unsigned int nclips;
+
+ /* video capture */
+ struct cx23417_fmt *fmt;
+ unsigned int width, height;
+
+ /* vbi capture */
+ struct videobuf_queue vidq;
+ struct videobuf_queue vbiq;
+
+ /* MPEG Encoder specifics ONLY */
+
+ atomic_t v4l_reading;
};
/*****************************************************************/
@@ -403,6 +497,13 @@ struct VENDOR_REQUEST_IN {
u8 *pBuff;
};
+struct cx231xx_tvnorm {
+ char *name;
+ v4l2_std_id id;
+ u32 cxiformat;
+ u32 cxoformat;
+};
+
struct cx231xx_ctrl {
struct v4l2_queryctrl v;
u32 off;
@@ -424,7 +525,9 @@ enum TRANSFER_TYPE {
struct cx231xx_video_mode {
/* Isoc control struct */
struct cx231xx_dmaqueue vidq;
- struct cx231xx_usb_isoc_ctl isoc_ctl;
+ struct cx231xx_isoc_ctl isoc_ctl;
+ struct cx231xx_bulk_ctl bulk_ctl;
+ /* locks */
spinlock_t slock;
/* usb transfer */
@@ -434,6 +537,64 @@ struct cx231xx_video_mode {
unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
u16 end_point_addr;
};
+/*
+struct cx23885_dmaqueue {
+ struct list_head active;
+ struct list_head queued;
+ struct timer_list timeout;
+ struct btcx_riscmem stopper;
+ u32 count;
+};
+*/
+struct cx231xx_tsport {
+ struct cx231xx *dev;
+
+ int nr;
+ int sram_chno;
+
+ struct videobuf_dvb_frontends frontends;
+
+ /* dma queues */
+
+ u32 ts_packet_size;
+ u32 ts_packet_count;
+
+ int width;
+ int height;
+
+ /* locks */
+ spinlock_t slock;
+
+ /* registers */
+ u32 reg_gpcnt;
+ u32 reg_gpcnt_ctl;
+ u32 reg_dma_ctl;
+ u32 reg_lngth;
+ u32 reg_hw_sop_ctrl;
+ u32 reg_gen_ctrl;
+ u32 reg_bd_pkt_status;
+ u32 reg_sop_status;
+ u32 reg_fifo_ovfl_stat;
+ u32 reg_vld_misc;
+ u32 reg_ts_clk_en;
+ u32 reg_ts_int_msk;
+ u32 reg_ts_int_stat;
+ u32 reg_src_sel;
+
+ /* Default register vals */
+ int pci_irqmask;
+ u32 dma_ctl_val;
+ u32 ts_int_msk_val;
+ u32 gen_ctrl_val;
+ u32 ts_clk_en_val;
+ u32 src_sel_val;
+ u32 vld_misc_val;
+ u32 hw_sop_ctrl_val;
+
+ /* Allow a single tsport to have multiple frontends */
+ u32 num_frontends;
+ void *port_priv;
+};
/* main device struct */
struct cx231xx {
@@ -457,6 +618,9 @@ struct cx231xx {
struct cx231xx_IR *ir;
+ struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
+ atomic_t stream_started; /* stream should be running if true */
+
struct list_head devlist;
int tuner_type; /* type of the tuner */
@@ -465,7 +629,9 @@ struct cx231xx {
/* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */
struct cx231xx_i2c i2c_bus[3];
unsigned int xc_fw_load_done:1;
+ /* locks */
struct mutex gpio_i2c_lock;
+ struct mutex i2c_lock;
/* video for linux */
int users; /* user count for exclusive use */
@@ -479,8 +645,6 @@ struct cx231xx {
/* frame properties */
int width; /* current frame width */
int height; /* current frame height */
- unsigned hscale; /* horizontal scale factor (see datasheet) */
- unsigned vscale; /* vertical scale factor (see datasheet) */
int interlaced; /* 1=interlace fileds, 0=just top fileds */
struct cx231xx_audio adev;
@@ -505,6 +669,8 @@ struct cx231xx {
struct cx231xx_video_mode sliced_cc_mode;
struct cx231xx_video_mode ts1_mode;
+ atomic_t devlist_count;
+
struct usb_device *udev; /* the usb device */
char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */
@@ -550,8 +716,24 @@ struct cx231xx {
u8 vbi_or_sliced_cc_mode; /* 0 - vbi ; 1 - sliced cc mode */
enum cx231xx_std_mode std_mode; /* 0 - Air; 1 - cable */
+ /*mode: digital=1 or analog=0*/
+ u8 mode_tv;
+
+ u8 USE_ISO;
+ struct cx231xx_tvnorm encodernorm;
+ struct cx231xx_tsport ts1, ts2;
+ struct cx2341x_mpeg_params mpeg_params;
+ struct video_device *v4l_device;
+ atomic_t v4l_reader_count;
+ u32 freq;
+ unsigned int input;
+ u32 cx23417_mailbox;
+ u32 __iomem *lmmio;
+ u8 __iomem *bmmio;
};
+extern struct list_head cx231xx_devlist;
+
#define cx25840_call(cx231xx, o, f, args...) \
v4l2_subdev_call(cx231xx->sd_cx25840, o, f, ##args)
#define tuner_call(cx231xx, o, f, args...) \
@@ -577,6 +759,10 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus);
int cx231xx_i2c_unregister(struct cx231xx_i2c *bus);
/* Internal block control functions */
+int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 *data, u8 data_len, int master);
+int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr,
+ u8 saddr_len, u32 data, u8 data_len, int master);
int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr,
u16 saddr, u8 saddr_len, u32 *data, u8 data_len);
int cx231xx_write_i2c_data(struct cx231xx *dev, u8 dev_addr,
@@ -588,6 +774,9 @@ int cx231xx_read_modify_write_i2c_dword(struct cx231xx *dev, u8 dev_addr,
u16 saddr, u32 mask, u32 value);
u32 cx231xx_set_field(u32 field_mask, u32 data);
+/*verve r/w*/
+void initGPIO(struct cx231xx *dev);
+void uninitGPIO(struct cx231xx *dev);
/* afe related functions */
int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count);
int cx231xx_afe_init_channels(struct cx231xx *dev);
@@ -607,6 +796,19 @@ int cx231xx_i2s_blk_set_audio_input(struct cx231xx *dev, u8 audio_input);
/* DIF related functions */
int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode,
u32 function_mode, u32 standard);
+void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode);
+u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd);
+void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
+ u8 spectral_invert, u32 mode);
+void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev);
+void reset_s5h1432_demod(struct cx231xx *dev);
+void cx231xx_dump_HH_reg(struct cx231xx *dev);
+void update_HH_register_after_set_DIF(struct cx231xx *dev);
+void cx231xx_dump_SC_reg(struct cx231xx *dev);
+
+
+
int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard);
int cx231xx_tuner_pre_channel_change(struct cx231xx *dev);
int cx231xx_tuner_post_channel_change(struct cx231xx *dev);
@@ -672,15 +874,28 @@ int cx231xx_set_audio_decoder_input(struct cx231xx *dev,
enum AUDIO_INPUT audio_input);
int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type);
-int cx231xx_resolution_set(struct cx231xx *dev);
int cx231xx_set_video_alternate(struct cx231xx *dev);
int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt);
+int is_fw_load(struct cx231xx *dev);
+int cx231xx_check_fw(struct cx231xx *dev);
int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
int num_bufs, int max_pkt_size,
int (*isoc_copy) (struct cx231xx *dev,
struct urb *urb));
+int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
+ int num_bufs, int max_pkt_size,
+ int (*bulk_copy) (struct cx231xx *dev,
+ struct urb *urb));
+void cx231xx_stop_TS1(struct cx231xx *dev);
+void cx231xx_start_TS1(struct cx231xx *dev);
void cx231xx_uninit_isoc(struct cx231xx *dev);
+void cx231xx_uninit_bulk(struct cx231xx *dev);
int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode);
+int cx231xx_unmute_audio(struct cx231xx *dev);
+int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size);
+void cx231xx_disable656(struct cx231xx *dev);
+void cx231xx_enable656(struct cx231xx *dev);
+int cx231xx_demod_reset(struct cx231xx *dev);
int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio);
/* Device list functions */
@@ -712,7 +927,7 @@ int cx231xx_power_suspend(struct cx231xx *dev);
int cx231xx_init_ctrl_pin_status(struct cx231xx *dev);
int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
u8 analog_or_digital);
-int cx231xx_enable_i2c_for_tuner(struct cx231xx *dev, u8 I2CIndex);
+int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3);
/* video audio decoder related functions */
void video_mux(struct cx231xx *dev, int index);
@@ -733,12 +948,11 @@ extern void cx231xx_card_setup(struct cx231xx *dev);
extern struct cx231xx_board cx231xx_boards[];
extern struct usb_device_id cx231xx_id_table[];
extern const unsigned int cx231xx_bcount;
-void cx231xx_register_i2c_ir(struct cx231xx *dev);
int cx231xx_tuner_callback(void *ptr, int component, int command, int arg);
-/* Provided by cx231xx-input.c */
-int cx231xx_ir_init(struct cx231xx *dev);
-int cx231xx_ir_fini(struct cx231xx *dev);
+/* cx23885-417.c */
+extern int cx231xx_417_register(struct cx231xx *dev);
+extern void cx231xx_417_unregister(struct cx231xx *dev);
/* printk macros */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 53a67824071b..a6cc12f8736c 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1591,7 +1591,7 @@ static int mpeg_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx23885_buffer),
- fh);
+ fh, NULL);
unlock_kernel();
return 0;
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index e76ce8709afd..db054004e462 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -1247,7 +1247,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
- "cx25840", "cx25840", 0x88 >> 1, NULL);
+ NULL, "cx25840", 0x88 >> 1, NULL);
if (dev->sd_cx25840) {
dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index f6b62e7398af..359882419b7f 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -815,6 +815,7 @@ static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
case 0x0e:
/* CX23887-15Z */
dev->hwrevision = 0xc0;
+ break;
case 0x0f:
/* CX23887-14Z */
dev->hwrevision = 0xb1;
@@ -1221,7 +1222,7 @@ void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 3d70af283881..5958cb882e93 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -1017,10 +1017,7 @@ static int dvb_register(struct cx23885_tsport *port)
/* Read entire EEPROM */
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
- printk(KERN_INFO "TeVii S470 MAC= "
- "%02X:%02X:%02X:%02X:%02X:%02X\n",
- eeprom[0xa0], eeprom[0xa1], eeprom[0xa2],
- eeprom[0xa3], eeprom[0xa4], eeprom[0xa5]);
+ printk(KERN_INFO "TeVii S470 MAC= %pM\n", eeprom + 0xa0);
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
break;
}
@@ -1074,7 +1071,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
videobuf_queue_sg_init(&fe0->dvb.dvbq, &dvb_qops,
&dev->pci->dev, &port->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_TOP,
- sizeof(struct cx23885_buffer), port);
+ sizeof(struct cx23885_buffer), port, NULL);
}
err = dvb_register(port);
if (err != 0)
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index da66e5f8d91d..93af9c65b484 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -758,7 +758,7 @@ static int video_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx23885_buffer),
- fh);
+ fh, NULL);
dprintk(1, "post videobuf_queue_init()\n");
@@ -1165,9 +1165,10 @@ static int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name, iname[INPUT(n)->type]);
if ((CX23885_VMUX_TELEVISION == INPUT(n)->type) ||
- (CX23885_VMUX_CABLE == INPUT(n)->type))
+ (CX23885_VMUX_CABLE == INPUT(n)->type)) {
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = CX23885_NORMS;
+ }
return 0;
}
@@ -1511,11 +1512,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
if (dev->tuner_addr)
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[1].i2c_adap,
- "tuner", "tuner", dev->tuner_addr, NULL);
+ NULL, "tuner", dev->tuner_addr, NULL);
else
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_bus[1].i2c_adap,
- "tuner", "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV));
+ &dev->i2c_bus[1].i2c_adap, NULL,
+ "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV));
if (sd) {
struct tuner_setup tun_setup;
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index 2502a0a67097..e78e3e4c8112 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -704,6 +704,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
+ init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index 6faad34df3ac..34b96c7cfd62 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -437,41 +437,45 @@ void cx25840_audio_set_path(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
- /* assert soft reset */
- cx25840_and_or(client, 0x810, ~0x1, 0x01);
+ if (!is_cx2583x(state)) {
+ /* assert soft reset */
+ cx25840_and_or(client, 0x810, ~0x1, 0x01);
- /* stop microcontroller */
- cx25840_and_or(client, 0x803, ~0x10, 0);
+ /* stop microcontroller */
+ cx25840_and_or(client, 0x803, ~0x10, 0);
- /* Mute everything to prevent the PFFT! */
- cx25840_write(client, 0x8d3, 0x1f);
+ /* Mute everything to prevent the PFFT! */
+ cx25840_write(client, 0x8d3, 0x1f);
- if (state->aud_input == CX25840_AUDIO_SERIAL) {
- /* Set Path1 to Serial Audio Input */
- cx25840_write4(client, 0x8d0, 0x01011012);
+ if (state->aud_input == CX25840_AUDIO_SERIAL) {
+ /* Set Path1 to Serial Audio Input */
+ cx25840_write4(client, 0x8d0, 0x01011012);
- /* The microcontroller should not be started for the
- * non-tuner inputs: autodetection is specific for
- * TV audio. */
- } else {
- /* Set Path1 to Analog Demod Main Channel */
- cx25840_write4(client, 0x8d0, 0x1f063870);
+ /* The microcontroller should not be started for the
+ * non-tuner inputs: autodetection is specific for
+ * TV audio. */
+ } else {
+ /* Set Path1 to Analog Demod Main Channel */
+ cx25840_write4(client, 0x8d0, 0x1f063870);
+ }
}
set_audclk_freq(client, state->audclk_freq);
- if (state->aud_input != CX25840_AUDIO_SERIAL) {
- /* When the microcontroller detects the
- * audio format, it will unmute the lines */
- cx25840_and_or(client, 0x803, ~0x10, 0x10);
- }
+ if (!is_cx2583x(state)) {
+ if (state->aud_input != CX25840_AUDIO_SERIAL) {
+ /* When the microcontroller detects the
+ * audio format, it will unmute the lines */
+ cx25840_and_or(client, 0x803, ~0x10, 0x10);
+ }
- /* deassert soft reset */
- cx25840_and_or(client, 0x810, ~0x1, 0x00);
+ /* deassert soft reset */
+ cx25840_and_or(client, 0x810, ~0x1, 0x00);
- /* Ensure the controller is running when we exit */
- if (is_cx2388x(state) || is_cx231xx(state))
- cx25840_and_or(client, 0x803, ~0x10, 0x10);
+ /* Ensure the controller is running when we exit */
+ if (is_cx2388x(state) || is_cx231xx(state))
+ cx25840_and_or(client, 0x803, ~0x10, 0x10);
+ }
}
static void set_volume(struct i2c_client *client, int volume)
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f5a3e74c3c7c..dfb198d0415b 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -42,7 +42,6 @@
#include <linux/delay.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/cx25840.h>
#include "cx25840-core.h"
@@ -871,6 +870,11 @@ static void input_change(struct i2c_client *client)
}
cx25840_and_or(client, 0x401, ~0x60, 0);
cx25840_and_or(client, 0x401, ~0x60, 0x60);
+
+ /* Don't write into audio registers on cx2583x chips */
+ if (is_cx2583x(state))
+ return;
+
cx25840_and_or(client, 0x810, ~0x01, 1);
if (state->radio) {
@@ -1029,10 +1033,8 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
state->vid_input = vid_input;
state->aud_input = aud_input;
- if (!is_cx2583x(state)) {
- cx25840_audio_set_path(client);
- input_change(client);
- }
+ cx25840_audio_set_path(client);
+ input_change(client);
if (is_cx2388x(state)) {
/* Audio channel 1 src : Parallel 1 */
@@ -1553,18 +1555,14 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2583x(state))
- return -EINVAL;
return set_input(client, state->vid_input, input);
}
static int cx25840_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
{
- struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (!is_cx2583x(state))
- input_change(client);
+ input_change(client);
return 0;
}
@@ -2043,9 +2041,25 @@ static const struct i2c_device_id cx25840_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cx25840_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "cx25840",
- .probe = cx25840_probe,
- .remove = cx25840_remove,
- .id_table = cx25840_id,
+static struct i2c_driver cx25840_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cx25840",
+ },
+ .probe = cx25840_probe,
+ .remove = cx25840_remove,
+ .id_table = cx25840_id,
};
+
+static __init int init_cx25840(void)
+{
+ return i2c_add_driver(&cx25840_driver);
+}
+
+static __exit void exit_cx25840(void)
+{
+ i2c_del_driver(&cx25840_driver);
+}
+
+module_init(init_cx25840);
+module_exit(exit_cx25840);
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index c2b4c14dc9ab..97a4e9b25fe4 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -706,6 +706,7 @@ static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
+ init_ir_raw_event(&p->ir_core_data);
p->ir_core_data.pulse = u;
p->ir_core_data.duration = v;
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 4f383cdf5296..4aaa47c0eabf 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -40,6 +40,7 @@
#include <sound/control.h>
#include <sound/initval.h>
#include <sound/tlv.h>
+#include <media/wm8775.h>
#include "cx88.h"
#include "cx88-reg.h"
@@ -94,7 +95,7 @@ typedef struct cx88_audio_dev snd_cx88_card_t;
****************************************************************************/
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(enable, bool, NULL, 0444);
@@ -131,7 +132,7 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
{
struct cx88_audio_buffer *buf = chip->buf;
struct cx88_core *core=chip->core;
- struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
+ const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
cx_clear(MO_AUD_DMACNTRL, 0x11);
@@ -197,7 +198,7 @@ static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
/*
* BOARD Specific: IRQ dma bits
*/
-static char *cx88_aud_irqs[32] = {
+static const char *cx88_aud_irqs[32] = {
"dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */
NULL, /* reserved */
"dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */
@@ -308,7 +309,7 @@ static int dsp_buffer_free(snd_cx88_card_t *chip)
* Digital hardware definition
*/
#define DEFAULT_FIFO_SIZE 4096
-static struct snd_pcm_hardware snd_cx88_digital_hw = {
+static const struct snd_pcm_hardware snd_cx88_digital_hw = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
@@ -533,7 +534,7 @@ static struct snd_pcm_ops snd_cx88_pcm_ops = {
/*
* create a PCM device
*/
-static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, char *name)
+static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
{
int err;
struct snd_pcm *pcm;
@@ -586,26 +587,47 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
int left, right, v, b;
int changed = 0;
u32 old;
+ struct v4l2_control client_ctl;
+
+ /* Pass volume & balance onto any WM8775 */
+ if (value->value.integer.value[0] >= value->value.integer.value[1]) {
+ v = value->value.integer.value[0] << 10;
+ b = value->value.integer.value[0] ?
+ (0x8000 * value->value.integer.value[1]) / value->value.integer.value[0] :
+ 0x8000;
+ } else {
+ v = value->value.integer.value[1] << 10;
+ b = value->value.integer.value[1] ?
+ 0xffff - (0x8000 * value->value.integer.value[0]) / value->value.integer.value[1] :
+ 0x8000;
+ }
+ client_ctl.value = v;
+ client_ctl.id = V4L2_CID_AUDIO_VOLUME;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+
+ client_ctl.value = b;
+ client_ctl.id = V4L2_CID_AUDIO_BALANCE;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
left = value->value.integer.value[0] & 0x3f;
right = value->value.integer.value[1] & 0x3f;
b = right - left;
if (b < 0) {
- v = 0x3f - left;
- b = (-b) | 0x40;
+ v = 0x3f - left;
+ b = (-b) | 0x40;
} else {
- v = 0x3f - right;
+ v = 0x3f - right;
}
/* Do we really know this will always be called with IRQs on? */
spin_lock_irq(&chip->reg_lock);
old = cx_read(AUD_VOL_CTL);
if (v != (old & 0x3f)) {
- cx_write(AUD_VOL_CTL, (old & ~0x3f) | v);
- changed = 1;
+ cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, (old & ~0x3f) | v);
+ changed = 1;
}
- if (cx_read(AUD_BAL_CTL) != b) {
- cx_write(AUD_BAL_CTL, b);
- changed = 1;
+ if ((cx_read(AUD_BAL_CTL) & 0x7f) != b) {
+ cx_write(AUD_BAL_CTL, b);
+ changed = 1;
}
spin_unlock_irq(&chip->reg_lock);
@@ -614,11 +636,11 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(snd_cx88_db_scale, -6300, 100, 0);
-static struct snd_kcontrol_new snd_cx88_volume = {
+static const struct snd_kcontrol_new snd_cx88_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- .name = "Playback Volume",
+ .name = "Analog-TV Volume",
.info = snd_cx88_volume_info,
.get = snd_cx88_volume_get,
.put = snd_cx88_volume_put,
@@ -649,31 +671,74 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
vol = cx_read(AUD_VOL_CTL);
if (value->value.integer.value[0] != !(vol & bit)) {
vol ^= bit;
- cx_write(AUD_VOL_CTL, vol);
+ cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
+ /* Pass mute onto any WM8775 */
+ if ((1<<6) == bit) {
+ struct v4l2_control client_ctl;
+ client_ctl.value = 0 != (vol & bit);
+ client_ctl.id = V4L2_CID_AUDIO_MUTE;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+ }
ret = 1;
}
spin_unlock_irq(&chip->reg_lock);
return ret;
}
-static struct snd_kcontrol_new snd_cx88_dac_switch = {
+static const struct snd_kcontrol_new snd_cx88_dac_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Playback Switch",
+ .name = "Audio-Out Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
.private_value = (1<<8),
};
-static struct snd_kcontrol_new snd_cx88_source_switch = {
+static const struct snd_kcontrol_new snd_cx88_source_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Switch",
+ .name = "Analog-TV Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
.private_value = (1<<6),
};
+static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
+ struct v4l2_control client_ctl;
+
+ client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
+ call_hw(core, WM8775_GID, core, g_ctrl, &client_ctl);
+ value->value.integer.value[0] = client_ctl.value ? 1 : 0;
+
+ return 0;
+}
+
+static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
+ struct v4l2_control client_ctl;
+
+ client_ctl.value = 0 != value->value.integer.value[0];
+ client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+
+ return 0;
+}
+
+static struct snd_kcontrol_new snd_cx88_alc_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Line-In ALC Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_cx88_alc_get,
+ .put = snd_cx88_alc_put,
+};
+
/****************************************************************************
Basic Flow for Sound Devices
****************************************************************************/
@@ -683,7 +748,7 @@ static struct snd_kcontrol_new snd_cx88_source_switch = {
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0, }
@@ -795,6 +860,7 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
{
struct snd_card *card;
snd_cx88_card_t *chip;
+ struct v4l2_subdev *sd;
int err;
if (devno >= SNDRV_CARDS)
@@ -830,6 +896,15 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
if (err < 0)
goto error;
+ /* If there's a wm8775 then add a Line-In ALC switch */
+ list_for_each_entry(sd, &chip->core->v4l2_dev.subdevs, list) {
+ if (WM8775_GID == sd->grp_id) {
+ snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch,
+ chip));
+ break;
+ }
+ }
+
strcpy (card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
sprintf(card->longname, "%s at %#llx",
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 660b2a927feb..417d1d5c73c4 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1057,7 +1057,7 @@ static int mpeg_open(struct file *file)
dprintk( 1, "%s\n", __func__);
- lock_kernel();
+ mutex_lock(&dev->core->lock);
/* Make sure we can acquire the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
@@ -1065,7 +1065,7 @@ static int mpeg_open(struct file *file)
err = drv->request_acquire(drv);
if(err != 0) {
dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err);
- unlock_kernel();
+ mutex_unlock(&dev->core->lock);;
return err;
}
}
@@ -1073,7 +1073,7 @@ static int mpeg_open(struct file *file)
if (!atomic_read(&dev->core->mpeg_users) && blackbird_initialize_codec(dev) < 0) {
if (drv)
drv->request_release(drv);
- unlock_kernel();
+ mutex_unlock(&dev->core->lock);
return -EINVAL;
}
dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
@@ -1083,7 +1083,7 @@ static int mpeg_open(struct file *file)
if (NULL == fh) {
if (drv)
drv->request_release(drv);
- unlock_kernel();
+ mutex_unlock(&dev->core->lock);
return -ENOMEM;
}
file->private_data = fh;
@@ -1094,15 +1094,14 @@ static int mpeg_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx88_buffer),
- fh);
+ fh, NULL);
/* FIXME: locking against other video device */
cx88_set_scale(dev->core, dev->width, dev->height,
fh->mpegq.field);
- unlock_kernel();
atomic_inc(&dev->core->mpeg_users);
-
+ mutex_unlock(&dev->core->lock);
return 0;
}
@@ -1120,8 +1119,11 @@ static int mpeg_release(struct file *file)
videobuf_stop(&fh->mpegq);
videobuf_mmap_free(&fh->mpegq);
+
+ mutex_lock(&dev->core->lock);
file->private_data = NULL;
kfree(fh);
+ mutex_unlock(&dev->core->lock);
/* Make sure we release the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index e8416b76da67..b26fcba8600c 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -970,15 +970,22 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
+ .audio_chip = V4L2_IDENT_WM8775,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
+ /* 2: Line-In */
+ .audioroute = 2,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
+ /* 2: Line-In */
+ .audioroute = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
+ /* 2: Line-In */
+ .audioroute = 2,
}},
.mpeg = CX88_MPEG_DVB,
},
@@ -2104,6 +2111,18 @@ static const struct cx88_board cx88_boards[] = {
} },
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_TWINHAN_VP1027_DVBS] = {
+ .name = "Twinhan VP-1027 DVB-S",
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_DVB,
+ .vmux = 0,
+ } },
+ .mpeg = CX88_MPEG_DVB,
+ },
};
/* ------------------------------------------------------------------ */
@@ -2576,6 +2595,10 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0xb034,
.subdevice = 0x3034,
.card = CX88_BOARD_PROF_7301,
+ }, {
+ .subvendor = 0x1822,
+ .subdevice = 0x0023,
+ .card = CX88_BOARD_TWINHAN_VP1027_DVBS,
},
};
@@ -2673,10 +2696,10 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
/* ----------------------------------------------------------------------- */
/* some GDI (was: Modular Technology) specific stuff */
-static struct {
+static const struct {
int id;
int fm;
- char *name;
+ const char *name;
} gdi_tuner[] = {
[ 0x01 ] = { .id = TUNER_ABSENT,
.name = "NTSC_M" },
@@ -2710,7 +2733,7 @@ static struct {
static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
- char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
+ const char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
? gdi_tuner[eeprom_data[0x0d]].name : NULL;
info_printk(core, "GDI: tuner=%s\n", name ? name : "unknown");
@@ -3070,6 +3093,13 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
cx_set(MO_GP1_IO, 0x10);
mdelay(50);
break;
+
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
+ cx_write(MO_GP0_IO, 0x00003230);
+ cx_write(MO_GP0_IO, 0x00003210);
+ msleep(1);
+ cx_write(MO_GP0_IO, 0x00001230);
+ break;
}
}
@@ -3485,19 +3515,19 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
later code configures a tea5767.
*/
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", "tuner",
+ NULL, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
if (has_demod)
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner", "tuner",
+ &core->i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (core->board.tuner_addr == ADDR_UNSET) {
v4l2_i2c_new_subdev(&core->v4l2_dev,
- &core->i2c_adap, "tuner", "tuner",
+ &core->i2c_adap, NULL, "tuner",
0, has_demod ? tv_addrs + 4 : tv_addrs);
} else {
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "tuner", "tuner", core->board.tuner_addr, NULL);
+ NULL, "tuner", core->board.tuner_addr, NULL);
}
}
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 85eb266fb351..2e145f0a5fd9 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -217,7 +217,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
@@ -253,7 +253,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
* 0x0c00 - FIFOs
*/
-struct sram_channel cx88_sram_channels[] = {
+const struct sram_channel const cx88_sram_channels[] = {
[SRAM_CH21] = {
.name = "video y / packed",
.cmds_start = 0x180040,
@@ -353,7 +353,7 @@ struct sram_channel cx88_sram_channels[] = {
};
int cx88_sram_channel_setup(struct cx88_core *core,
- struct sram_channel *ch,
+ const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i,lines;
@@ -394,7 +394,7 @@ int cx88_sram_channel_setup(struct cx88_core *core,
static int cx88_risc_decode(u32 risc)
{
- static char *instr[16] = {
+ static const char * const instr[16] = {
[ RISC_SYNC >> 28 ] = "sync",
[ RISC_WRITE >> 28 ] = "write",
[ RISC_WRITEC >> 28 ] = "writec",
@@ -406,14 +406,14 @@ static int cx88_risc_decode(u32 risc)
[ RISC_WRITECM >> 28 ] = "writecm",
[ RISC_WRITECR >> 28 ] = "writecr",
};
- static int incr[16] = {
+ static int const incr[16] = {
[ RISC_WRITE >> 28 ] = 2,
[ RISC_JUMP >> 28 ] = 2,
[ RISC_WRITERM >> 28 ] = 3,
[ RISC_WRITECM >> 28 ] = 3,
[ RISC_WRITECR >> 28 ] = 4,
};
- static char *bits[] = {
+ static const char * const bits[] = {
"12", "13", "14", "resync",
"cnt0", "cnt1", "18", "19",
"20", "21", "22", "23",
@@ -432,9 +432,9 @@ static int cx88_risc_decode(u32 risc)
void cx88_sram_channel_dump(struct cx88_core *core,
- struct sram_channel *ch)
+ const struct sram_channel *ch)
{
- static char *name[] = {
+ static const char * const name[] = {
"initial risc",
"cdt base",
"cdt size",
@@ -489,14 +489,14 @@ void cx88_sram_channel_dump(struct cx88_core *core,
core->name,cx_read(ch->cnt2_reg));
}
-static char *cx88_pci_irqs[32] = {
+static const char *cx88_pci_irqs[32] = {
"vid", "aud", "ts", "vip", "hst", "5", "6", "tm1",
"src_dma", "dst_dma", "risc_rd_err", "risc_wr_err",
"brdg_err", "src_dma_err", "dst_dma_err", "ipb_dma_err",
"i2c", "i2c_rack", "ir_smp", "gpio0", "gpio1"
};
-void cx88_print_irqbits(char *name, char *tag, char **strings,
+void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
int len, u32 bits, u32 mask)
{
unsigned int i;
@@ -770,7 +770,7 @@ static const u32 xtal = 28636363;
static int set_pll(struct cx88_core *core, int prescale, u32 ofreq)
{
- static u32 pre[] = { 0, 0, 0, 3, 2, 1 };
+ static const u32 pre[] = { 0, 0, 0, 3, 2, 1 };
u64 pll;
u32 reg;
int i;
@@ -879,7 +879,7 @@ static int set_tvaudio(struct cx88_core *core)
} else {
printk("%s/0: tvaudio support needs work for this tv norm [%s], sorry\n",
core->name, v4l2_norm_to_name(core->tvnorm));
- core->tvaudio = 0;
+ core->tvaudio = WW_NONE;
return 0;
}
@@ -1020,15 +1020,15 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
struct video_device *cx88_vdev_init(struct cx88_core *core,
struct pci_dev *pci,
- struct video_device *template,
- char *type)
+ const struct video_device *template_,
+ const char *type)
{
struct video_device *vfd;
vfd = video_device_alloc();
if (NULL == vfd)
return NULL;
- *vfd = *template;
+ *vfd = *template_;
vfd->v4l2_dev = &core->v4l2_dev;
vfd->parent = &pci->dev;
vfd->release = video_device_release;
diff --git a/drivers/media/video/cx88/cx88-dsp.c b/drivers/media/video/cx88/cx88-dsp.c
index a94e00a4ac5d..a9907265ff66 100644
--- a/drivers/media/video/cx88/cx88-dsp.c
+++ b/drivers/media/video/cx88/cx88-dsp.c
@@ -230,7 +230,7 @@ static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
{
- struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27];
+ const struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27];
s16 *samples;
unsigned int i;
@@ -292,11 +292,20 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
switch (core->tvaudio) {
case WW_BG:
case WW_DK:
+ case WW_EIAJ:
+ case WW_M:
ret = detect_a2_a2m_eiaj(core, samples, N);
break;
case WW_BTSC:
ret = detect_btsc(core, samples, N);
break;
+ case WW_NONE:
+ case WW_I:
+ case WW_L:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
+ break;
}
kfree(samples);
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index faa8e8163a4a..367a653f4c95 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -56,6 +56,7 @@
#include "stv0900.h"
#include "stb6100.h"
#include "stb6100_proc.h"
+#include "mb86a16.h"
MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -105,7 +106,7 @@ static void dvb_buf_release(struct videobuf_queue *q,
cx88_free_buffer(q, (struct cx88_buffer*)vb);
}
-static struct videobuf_queue_ops dvb_qops = {
+static const struct videobuf_queue_ops dvb_qops = {
.buf_setup = dvb_buf_setup,
.buf_prepare = dvb_buf_prepare,
.buf_queue = dvb_buf_queue,
@@ -167,12 +168,12 @@ static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
{
- static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
- static u8 reset [] = { RESET, 0x80 };
- static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
- static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
- static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
+ static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
+ static const u8 reset [] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
+ static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(200);
@@ -187,12 +188,12 @@ static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
static int dvico_dual_demod_init(struct dvb_frontend *fe)
{
- static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
- static u8 reset [] = { RESET, 0x80 };
- static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
- static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
- static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
+ static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
+ static const u8 reset [] = { RESET, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
+ static const u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
+ static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(200);
@@ -208,13 +209,13 @@ static int dvico_dual_demod_init(struct dvb_frontend *fe)
static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
{
- static u8 clock_config [] = { 0x89, 0x38, 0x39 };
- static u8 reset [] = { 0x50, 0x80 };
- static u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config [] = { 0x89, 0x38, 0x39 };
+ static const u8 reset [] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
+ static const u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
- static u8 dntv_extra[] = { 0xB5, 0x7A };
- static u8 capt_range_cfg[] = { 0x75, 0x32 };
+ static const u8 dntv_extra[] = { 0xB5, 0x7A };
+ static const u8 capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(2000);
@@ -229,37 +230,41 @@ static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
return 0;
}
-static struct mt352_config dvico_fusionhdtv = {
+static const struct mt352_config dvico_fusionhdtv = {
.demod_address = 0x0f,
.demod_init = dvico_fusionhdtv_demod_init,
};
-static struct mt352_config dntv_live_dvbt_config = {
+static const struct mt352_config dntv_live_dvbt_config = {
.demod_address = 0x0f,
.demod_init = dntv_live_dvbt_demod_init,
};
-static struct mt352_config dvico_fusionhdtv_dual = {
+static const struct mt352_config dvico_fusionhdtv_dual = {
.demod_address = 0x0f,
.demod_init = dvico_dual_demod_init,
};
-static struct zl10353_config cx88_terratec_cinergy_ht_pci_mkii_config = {
+static const struct zl10353_config cx88_terratec_cinergy_ht_pci_mkii_config = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.if2 = 45600,
};
+static struct mb86a16_config twinhan_vp1027 = {
+ .demod_address = 0x08,
+};
+
#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
{
- static u8 clock_config [] = { 0x89, 0x38, 0x38 };
- static u8 reset [] = { 0x50, 0x80 };
- static u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
- static u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
+ static const u8 clock_config [] = { 0x89, 0x38, 0x38 };
+ static const u8 reset [] = { 0x50, 0x80 };
+ static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
+ static const u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
- static u8 dntv_extra[] = { 0xB5, 0x7A };
- static u8 capt_range_cfg[] = { 0x75, 0x32 };
+ static const u8 dntv_extra[] = { 0xB5, 0x7A };
+ static const u8 capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(2000);
@@ -274,41 +279,41 @@ static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
return 0;
}
-static struct mt352_config dntv_live_dvbt_pro_config = {
+static const struct mt352_config dntv_live_dvbt_pro_config = {
.demod_address = 0x0f,
.no_tuner = 1,
.demod_init = dntv_live_dvbt_pro_demod_init,
};
#endif
-static struct zl10353_config dvico_fusionhdtv_hybrid = {
+static const struct zl10353_config dvico_fusionhdtv_hybrid = {
.demod_address = 0x0f,
.no_tuner = 1,
};
-static struct zl10353_config dvico_fusionhdtv_xc3028 = {
+static const struct zl10353_config dvico_fusionhdtv_xc3028 = {
.demod_address = 0x0f,
.if2 = 45600,
.no_tuner = 1,
};
-static struct mt352_config dvico_fusionhdtv_mt352_xc3028 = {
+static const struct mt352_config dvico_fusionhdtv_mt352_xc3028 = {
.demod_address = 0x0f,
.if2 = 4560,
.no_tuner = 1,
.demod_init = dvico_fusionhdtv_demod_init,
};
-static struct zl10353_config dvico_fusionhdtv_plus_v1_1 = {
+static const struct zl10353_config dvico_fusionhdtv_plus_v1_1 = {
.demod_address = 0x0f,
};
-static struct cx22702_config connexant_refboard_config = {
+static const struct cx22702_config connexant_refboard_config = {
.demod_address = 0x43,
.output_mode = CX22702_SERIAL_OUTPUT,
};
-static struct cx22702_config hauppauge_hvr_config = {
+static const struct cx22702_config hauppauge_hvr_config = {
.demod_address = 0x63,
.output_mode = CX22702_SERIAL_OUTPUT,
};
@@ -320,7 +325,7 @@ static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured)
return 0;
}
-static struct or51132_config pchdtv_hd3000 = {
+static const struct or51132_config pchdtv_hd3000 = {
.demod_address = 0x15,
.set_ts_params = or51132_set_ts_param,
};
@@ -355,14 +360,14 @@ static struct lgdt330x_config fusionhdtv_3_gold = {
.set_ts_params = lgdt330x_set_ts_param,
};
-static struct lgdt330x_config fusionhdtv_5_gold = {
+static const struct lgdt330x_config fusionhdtv_5_gold = {
.demod_address = 0x0e,
.demod_chip = LGDT3303,
.serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */
.set_ts_params = lgdt330x_set_ts_param,
};
-static struct lgdt330x_config pchdtv_hd5500 = {
+static const struct lgdt330x_config pchdtv_hd5500 = {
.demod_address = 0x59,
.demod_chip = LGDT3303,
.serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */
@@ -376,7 +381,7 @@ static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
return 0;
}
-static struct nxt200x_config ati_hdtvwonder = {
+static const struct nxt200x_config ati_hdtvwonder = {
.demod_address = 0x0a,
.set_ts_params = nxt200x_set_ts_param,
};
@@ -429,15 +434,15 @@ static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
cx_set(MO_GP0_IO, 0x6040);
switch (voltage) {
- case SEC_VOLTAGE_13:
- cx_clear(MO_GP0_IO, 0x20);
- break;
- case SEC_VOLTAGE_18:
- cx_set(MO_GP0_IO, 0x20);
- break;
- case SEC_VOLTAGE_OFF:
- cx_clear(MO_GP0_IO, 0x20);
- break;
+ case SEC_VOLTAGE_13:
+ cx_clear(MO_GP0_IO, 0x20);
+ break;
+ case SEC_VOLTAGE_18:
+ cx_set(MO_GP0_IO, 0x20);
+ break;
+ case SEC_VOLTAGE_OFF:
+ cx_clear(MO_GP0_IO, 0x20);
+ break;
}
if (core->prev_set_voltage)
@@ -445,23 +450,49 @@ static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
return 0;
}
-static struct cx24123_config geniatech_dvbs_config = {
+static int vp1027_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct cx8802_dev *dev = fe->dvb->priv;
+ struct cx88_core *core = dev->core;
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ dprintk(1, "LNB SEC Voltage=13\n");
+ cx_write(MO_GP0_IO, 0x00001220);
+ break;
+ case SEC_VOLTAGE_18:
+ dprintk(1, "LNB SEC Voltage=18\n");
+ cx_write(MO_GP0_IO, 0x00001222);
+ break;
+ case SEC_VOLTAGE_OFF:
+ dprintk(1, "LNB Voltage OFF\n");
+ cx_write(MO_GP0_IO, 0x00001230);
+ break;
+ }
+
+ if (core->prev_set_voltage)
+ return core->prev_set_voltage(fe, voltage);
+ return 0;
+}
+
+static const struct cx24123_config geniatech_dvbs_config = {
.demod_address = 0x55,
.set_ts_params = cx24123_set_ts_param,
};
-static struct cx24123_config hauppauge_novas_config = {
+static const struct cx24123_config hauppauge_novas_config = {
.demod_address = 0x55,
.set_ts_params = cx24123_set_ts_param,
};
-static struct cx24123_config kworld_dvbs_100_config = {
+static const struct cx24123_config kworld_dvbs_100_config = {
.demod_address = 0x15,
.set_ts_params = cx24123_set_ts_param,
.lnb_polarity = 1,
};
-static struct s5h1409_config pinnacle_pctv_hd_800i_config = {
+static const struct s5h1409_config pinnacle_pctv_hd_800i_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_PARALLEL_OUTPUT,
.gpio = S5H1409_GPIO_ON,
@@ -471,7 +502,7 @@ static struct s5h1409_config pinnacle_pctv_hd_800i_config = {
.mpeg_timing = S5H1409_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK,
};
-static struct s5h1409_config dvico_hdtv5_pci_nano_config = {
+static const struct s5h1409_config dvico_hdtv5_pci_nano_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_SERIAL_OUTPUT,
.gpio = S5H1409_GPIO_OFF,
@@ -480,7 +511,7 @@ static struct s5h1409_config dvico_hdtv5_pci_nano_config = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
-static struct s5h1409_config kworld_atsc_120_config = {
+static const struct s5h1409_config kworld_atsc_120_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_SERIAL_OUTPUT,
.gpio = S5H1409_GPIO_OFF,
@@ -489,24 +520,24 @@ static struct s5h1409_config kworld_atsc_120_config = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
-static struct xc5000_config pinnacle_pctv_hd_800i_tuner_config = {
+static const struct xc5000_config pinnacle_pctv_hd_800i_tuner_config = {
.i2c_address = 0x64,
.if_khz = 5380,
};
-static struct zl10353_config cx88_pinnacle_hybrid_pctv = {
+static const struct zl10353_config cx88_pinnacle_hybrid_pctv = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.if2 = 45600,
};
-static struct zl10353_config cx88_geniatech_x8000_mt = {
+static const struct zl10353_config cx88_geniatech_x8000_mt = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.disable_i2c_gate_ctrl = 1,
};
-static struct s5h1411_config dvico_fusionhdtv7_config = {
+static const struct s5h1411_config dvico_fusionhdtv7_config = {
.output_mode = S5H1411_SERIAL_OUTPUT,
.gpio = S5H1411_GPIO_ON,
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
@@ -516,7 +547,7 @@ static struct s5h1411_config dvico_fusionhdtv7_config = {
.status_mode = S5H1411_DEMODLOCKING
};
-static struct xc5000_config dvico_fusionhdtv7_tuner_config = {
+static const struct xc5000_config dvico_fusionhdtv7_tuner_config = {
.i2c_address = 0xc2 >> 1,
.if_khz = 5380,
};
@@ -601,19 +632,19 @@ static int cx24116_reset_device(struct dvb_frontend *fe)
return 0;
}
-static struct cx24116_config hauppauge_hvr4000_config = {
+static const struct cx24116_config hauppauge_hvr4000_config = {
.demod_address = 0x05,
.set_ts_params = cx24116_set_ts_param,
.reset_device = cx24116_reset_device,
};
-static struct cx24116_config tevii_s460_config = {
+static const struct cx24116_config tevii_s460_config = {
.demod_address = 0x55,
.set_ts_params = cx24116_set_ts_param,
.reset_device = cx24116_reset_device,
};
-static struct stv0900_config prof_7301_stv0900_config = {
+static const struct stv0900_config prof_7301_stv0900_config = {
.demod_address = 0x6a,
/* demod_mode = 0,*/
.xtal = 27000000,
@@ -625,12 +656,12 @@ static struct stv0900_config prof_7301_stv0900_config = {
.set_ts_params = stv0900_set_ts_param,
};
-static struct stb6100_config prof_7301_stb6100_config = {
+static const struct stb6100_config prof_7301_stb6100_config = {
.tuner_address = 0x60,
.refclock = 27000000,
};
-static struct stv0299_config tevii_tuner_sharp_config = {
+static const struct stv0299_config tevii_tuner_sharp_config = {
.demod_address = 0x68,
.inittab = sharp_z0194a_inittab,
.mclk = 88000000UL,
@@ -643,7 +674,7 @@ static struct stv0299_config tevii_tuner_sharp_config = {
.set_ts_params = cx24116_set_ts_param,
};
-static struct stv0288_config tevii_tuner_earda_config = {
+static const struct stv0288_config tevii_tuner_earda_config = {
.demod_address = 0x68,
.min_delay_ms = 100,
.set_ts_params = cx24116_set_ts_param,
@@ -676,7 +707,7 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
-static u8 samsung_smt_7020_inittab[] = {
+static const u8 samsung_smt_7020_inittab[] = {
0x01, 0x15,
0x02, 0x00,
0x03, 0x00,
@@ -850,7 +881,7 @@ static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
}
-static struct stv0299_config samsung_stv0299_config = {
+static const struct stv0299_config samsung_stv0299_config = {
.demod_address = 0x68,
.inittab = samsung_smt_7020_inittab,
.mclk = 88000000UL,
@@ -1416,6 +1447,18 @@ static int dvb_register(struct cx8802_dev *dev)
}
break;
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
+ dev->ts_gen_cntrl = 0x00;
+ fe0->dvb.frontend = dvb_attach(mb86a16_attach,
+ &twinhan_vp1027,
+ &core->i2c_adap);
+ if (fe0->dvb.frontend) {
+ core->prev_set_voltage =
+ fe0->dvb.frontend->ops.set_voltage;
+ fe0->dvb.frontend->ops.set_voltage =
+ vp1027_set_voltage;
+ }
+ break;
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
@@ -1576,7 +1619,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_TOP,
sizeof(struct cx88_buffer),
- dev);
+ dev, NULL);
/* init struct videobuf_dvb */
fe->dvb.name = dev->core->name;
}
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 82db555b22dd..f53836bb6a5a 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -108,7 +108,7 @@ static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
-static char *i2c_devs[128] = {
+static const char * const i2c_devs[128] = {
[ 0x1c >> 1 ] = "lgdt330x",
[ 0x86 >> 1 ] = "tda9887/cx22702",
[ 0xa0 >> 1 ] = "eeprom",
@@ -117,7 +117,7 @@ static char *i2c_devs[128] = {
[ 0xc8 >> 1 ] = "xc5000",
};
-static void do_i2c_scan(char *name, struct i2c_client *c)
+static void do_i2c_scan(const char *name, struct i2c_client *c)
{
unsigned char buf;
int i,rc;
@@ -183,30 +183,3 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
return core->i2c_rc;
}
-
-void cx88_i2c_init_ir(struct cx88_core *core)
-{
- /* Instantiate the IR receiver device, if present */
- if (0 == core->i2c_rc) {
- struct i2c_board_info info;
- const unsigned short addr_list[] = {
- 0x18, 0x6b, 0x71,
- I2C_CLIENT_END
- };
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
- /* Use quick read command for probe, some IR chips don't
- * support writes */
- i2c_new_probed_device(&core->i2c_adap, &info, addr_list,
- i2c_probe_func_quick_read);
- }
-}
-
-/* ----------------------------------------------------------------------- */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index eccc5e49a350..fc777bc6e716 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -405,6 +405,11 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
ir->mask_keycode = 0x7e;
ir->polling = 100; /* ms */
break;
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
+ ir_codes = RC_MAP_TWINHAN_VP1027_DVBS;
+ ir_type = IR_TYPE_NEC;
+ ir->sampling = 0xff00; /* address */
+ break;
}
if (NULL == ir_codes) {
@@ -530,6 +535,7 @@ void cx88_ir_irq(struct cx88_core *core)
case CX88_BOARD_PROF_7300:
case CX88_BOARD_PROF_7301:
case CX88_BOARD_PROF_6200:
+ case CX88_BOARD_TWINHAN_VP1027_DVBS:
ircode = ir_decode_pulsedistance(ir->samples, ir->scount, 1, 4);
if (ircode == 0xffffffff) { /* decoding error */
@@ -609,13 +615,54 @@ void cx88_ir_irq(struct cx88_core *core)
return;
}
+
+void cx88_i2c_init_ir(struct cx88_core *core)
+{
+ struct i2c_board_info info;
+ const unsigned short addr_list[] = {
+ 0x18, 0x6b, 0x71,
+ I2C_CLIENT_END
+ };
+ const unsigned short *addrp;
+ /* Instantiate the IR receiver device, if present */
+ if (0 != core->i2c_rc)
+ return;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+
+ /*
+ * We can't call i2c_new_probed_device() because it uses
+ * quick writes for probing and at least some RC receiver
+ * devices only reply to reads.
+ * Also, Hauppauge XVR needs to be specified, as address 0x71
+ * conflicts with another remote type used with saa7134
+ */
+ for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) {
+ info.platform_data = NULL;
+ memset(&core->init_data, 0, sizeof(core->init_data));
+
+ if (*addrp == 0x71) {
+ /* Hauppauge XVR */
+ core->init_data.name = "cx88 Hauppauge XVR remote";
+ core->init_data.ir_codes = RC_MAP_HAUPPAUGE_NEW;
+ core->init_data.type = IR_TYPE_RC5;
+ core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+
+ info.platform_data = &core->init_data;
+ }
+ if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
+ I2C_SMBUS_READ, 0,
+ I2C_SMBUS_QUICK, NULL) >= 0) {
+ info.addr = *addrp;
+ i2c_new_device(&core->i2c_adap, &info);
+ break;
+ }
+ }
+}
+
/* ---------------------------------------------------------------------- */
MODULE_AUTHOR("Gerd Knorr, Pavel Machek, Chris Pascoe");
MODULE_DESCRIPTION("input driver for cx88 GPIO-based IR remote controls");
MODULE_LICENSE("GPL");
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 499f8d512ad6..f7d71acbb078 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -313,7 +313,7 @@ void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
/* ----------------------------------------------------------- */
-static void do_cancel_buffers(struct cx8802_dev *dev, char *reason, int restart)
+static void do_cancel_buffers(struct cx8802_dev *dev, const char *reason, int restart)
{
struct cx88_dmaqueue *q = &dev->mpegq;
struct cx88_buffer *buf;
@@ -358,7 +358,7 @@ static void cx8802_timeout(unsigned long data)
do_cancel_buffers(dev,"timeout",1);
}
-static char *cx88_mpeg_irqs[32] = {
+static const char * cx88_mpeg_irqs[32] = {
"ts_risci1", NULL, NULL, NULL,
"ts_risci2", NULL, NULL, NULL,
"ts_oflow", NULL, NULL, NULL,
@@ -849,7 +849,7 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
kfree(dev);
}
-static struct pci_device_id cx8802_pci_tbl[] = {
+static const struct pci_device_id cx8802_pci_tbl[] = {
{
.vendor = 0x14f1,
.device = 0x8802,
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 239631568f3b..08220de3d74d 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -70,7 +70,7 @@ MODULE_PARM_DESC(radio_deemphasis, "Radio deemphasis time constant, "
/* ----------------------------------------------------------- */
-static char *aud_ctl_names[64] = {
+static const char * const aud_ctl_names[64] = {
[EN_BTSC_FORCE_MONO] = "BTSC_FORCE_MONO",
[EN_BTSC_FORCE_STEREO] = "BTSC_FORCE_STEREO",
[EN_BTSC_FORCE_SAP] = "BTSC_FORCE_SAP",
@@ -360,7 +360,15 @@ static void set_audio_standard_NICAM(struct cx88_core *core, u32 mode)
set_audio_registers(core, nicam_bgdki_common);
set_audio_registers(core, nicam_i);
break;
- default:
+ case WW_NONE:
+ case WW_BTSC:
+ case WW_BG:
+ case WW_DK:
+ case WW_EIAJ:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
+ case WW_M:
dprintk("%s PAL-BGDK NICAM (status: known-good)\n", __func__);
set_audio_registers(core, nicam_bgdki_common);
set_audio_registers(core, nicam_default);
@@ -621,7 +629,13 @@ static void set_audio_standard_A2(struct cx88_core *core, u32 mode)
dprintk("%s AM-L (status: devel)\n", __func__);
set_audio_registers(core, am_l);
break;
- default:
+ case WW_NONE:
+ case WW_BTSC:
+ case WW_EIAJ:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
+ case WW_M:
dprintk("%s Warning: wrong value\n", __func__);
return;
break;
@@ -779,7 +793,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
set_audio_finish(core, EN_I2SIN_ENABLE);
break;
case WW_NONE:
- default:
+ case WW_I2SPT:
printk("%s/0: unknown tv audio mode [%d]\n",
core->name, core->tvaudio);
break;
@@ -795,8 +809,8 @@ void cx88_newstation(struct cx88_core *core)
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
{
- static char *m[] = { "stereo", "dual mono", "mono", "sap" };
- static char *p[] = { "no pilot", "pilot c1", "pilot c2", "?" };
+ static const char * const m[] = { "stereo", "dual mono", "mono", "sap" };
+ static const char * const p[] = { "no pilot", "pilot c1", "pilot c2", "?" };
u32 reg, mode, pilot;
reg = cx_read(AUD_STATUS);
@@ -840,7 +854,12 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
break;
}
break;
- default:
+ case WW_NONE:
+ case WW_I:
+ case WW_L:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
/* nothing */
break;
}
@@ -945,6 +964,9 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
}
break;
case WW_I2SADC:
+ case WW_NONE:
+ case WW_EIAJ:
+ case WW_I2SPT:
/* DO NOTHING */
break;
}
@@ -1000,7 +1022,12 @@ int cx88_audio_thread(void *data)
/* automatically switch to best available mode */
cx88_set_stereo(core, mode, 0);
break;
- default:
+ case WW_NONE:
+ case WW_BTSC:
+ case WW_EIAJ:
+ case WW_I2SPT:
+ case WW_FM:
+ case WW_I2SADC:
hw_autodetect:
/* stereo autodetection is supported by hardware so
we don't need to do it manually. Do nothing. */
diff --git a/drivers/media/video/cx88/cx88-vbi.c b/drivers/media/video/cx88/cx88-vbi.c
index d9445b0e7ab2..f8f8389c0362 100644
--- a/drivers/media/video/cx88/cx88-vbi.c
+++ b/drivers/media/video/cx88/cx88-vbi.c
@@ -230,7 +230,7 @@ static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
cx88_free_buffer(q,buf);
}
-struct videobuf_queue_ops cx8800_vbi_qops = {
+const struct videobuf_queue_ops cx8800_vbi_qops = {
.buf_setup = vbi_setup,
.buf_prepare = vbi_prepare,
.buf_queue = vbi_queue,
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 0fab65c3ab39..d2f159daa8b5 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -41,6 +41,7 @@
#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/wm8775.h>
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@@ -78,7 +79,7 @@ MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
/* ------------------------------------------------------------------- */
/* static data */
-static struct cx8800_fmt formats[] = {
+static const struct cx8800_fmt formats[] = {
{
.name = "8 bpp, gray",
.fourcc = V4L2_PIX_FMT_GREY,
@@ -142,7 +143,7 @@ static struct cx8800_fmt formats[] = {
},
};
-static struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
+static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
@@ -159,7 +160,7 @@ static const struct v4l2_queryctrl no_ctl = {
.flags = V4L2_CTRL_FLAG_DISABLED,
};
-static struct cx88_ctrl cx8800_ctls[] = {
+static const struct cx88_ctrl cx8800_ctls[] = {
/* --- video --- */
{
.v = {
@@ -288,7 +289,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
.shift = 0,
}
};
-static const int CX8800_CTLS = ARRAY_SIZE(cx8800_ctls);
+enum { CX8800_CTLS = ARRAY_SIZE(cx8800_ctls) };
/* Must be sorted from low to high control ID! */
const u32 cx88_user_ctrls[] = {
@@ -306,7 +307,7 @@ const u32 cx88_user_ctrls[] = {
};
EXPORT_SYMBOL(cx88_user_ctrls);
-static const u32 *ctrl_classes[] = {
+static const u32 * const ctrl_classes[] = {
cx88_user_ctrls,
NULL
};
@@ -710,7 +711,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
cx88_free_buffer(q,buf);
}
-static struct videobuf_queue_ops cx8800_video_qops = {
+static const struct videobuf_queue_ops cx8800_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -752,7 +753,7 @@ static int video_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct cx8800_dev *dev = video_drvdata(file);
- struct cx88_core *core;
+ struct cx88_core *core = dev->core;
struct cx8800_fh *fh;
enum v4l2_buf_type type = 0;
int radio = 0;
@@ -769,19 +770,14 @@ static int video_open(struct file *file)
break;
}
- lock_kernel();
-
- core = dev->core;
-
dprintk(1, "open dev=%s radio=%d type=%s\n",
video_device_node_name(vdev), radio, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (unlikely(!fh))
return -ENOMEM;
- }
+
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -790,18 +786,20 @@ static int video_open(struct file *file)
fh->height = 240;
fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+ mutex_lock(&core->lock);
+
videobuf_queue_sg_init(&fh->vidq, &cx8800_video_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx88_buffer),
- fh);
+ fh, NULL);
videobuf_queue_sg_init(&fh->vbiq, &cx8800_vbi_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
sizeof(struct cx88_buffer),
- fh);
+ fh, NULL);
if (fh->radio) {
dprintk(1,"video_open: setting radio device\n");
@@ -826,9 +824,9 @@ static int video_open(struct file *file)
}
call_all(core, tuner, s_radio);
}
- unlock_kernel();
atomic_inc(&core->users);
+ mutex_unlock(&core->lock);
return 0;
}
@@ -920,10 +918,11 @@ static int video_release(struct file *file)
videobuf_mmap_free(&fh->vidq);
videobuf_mmap_free(&fh->vbiq);
+
+ mutex_lock(&dev->core->lock);
file->private_data = NULL;
kfree(fh);
- mutex_lock(&dev->core->lock);
if(atomic_dec_and_test(&dev->core->users))
call_all(dev->core, core, s_power, 0);
mutex_unlock(&dev->core->lock);
@@ -944,7 +943,7 @@ video_mmap(struct file *file, struct vm_area_struct * vma)
int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl)
{
- struct cx88_ctrl *c = NULL;
+ const struct cx88_ctrl *c = NULL;
u32 value;
int i;
@@ -976,9 +975,10 @@ EXPORT_SYMBOL(cx88_get_control);
int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
{
- struct cx88_ctrl *c = NULL;
+ const struct cx88_ctrl *c = NULL;
u32 value,mask;
int i;
+ struct v4l2_control client_ctl;
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == ctl->id) {
@@ -992,6 +992,27 @@ int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
ctl->value = c->v.minimum;
if (ctl->value > c->v.maximum)
ctl->value = c->v.maximum;
+
+ /* Pass changes onto any WM8775 */
+ client_ctl.id = ctl->id;
+ switch (ctl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ client_ctl.value = ctl->value;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ client_ctl.value = (ctl->value) ?
+ (0x90 + ctl->value) << 8 : 0;
+ break;
+ case V4L2_CID_AUDIO_BALANCE:
+ client_ctl.value = ctl->value << 9;
+ break;
+ default:
+ client_ctl.id = 0;
+ break;
+ }
+ if (client_ctl.id)
+ call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+
mask=c->mask;
switch (ctl->id) {
case V4L2_CID_AUDIO_BALANCE:
@@ -1072,7 +1093,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
- struct cx8800_fmt *fmt;
+ const struct cx8800_fmt *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
@@ -1247,7 +1268,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *tvnorms)
/* only one input in this sample driver */
int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
{
- static const char *iname[] = {
+ static const char * const iname[] = {
[ CX88_VMUX_COMPOSITE1 ] = "Composite1",
[ CX88_VMUX_COMPOSITE2 ] = "Composite2",
[ CX88_VMUX_COMPOSITE3 ] = "Composite3",
@@ -1267,9 +1288,10 @@ int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name,iname[INPUT(n).type]);
if ((CX88_VMUX_TELEVISION == INPUT(n).type) ||
- (CX88_VMUX_CABLE == INPUT(n).type))
+ (CX88_VMUX_CABLE == INPUT(n).type)) {
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = CX88_NORMS;
+ }
return 0;
}
EXPORT_SYMBOL(cx88_enum_input);
@@ -1537,7 +1559,9 @@ static int radio_queryctrl (struct file *file, void *priv,
if (c->id < V4L2_CID_BASE ||
c->id >= V4L2_CID_LASTP1)
return -EINVAL;
- if (c->id == V4L2_CID_AUDIO_MUTE) {
+ if (c->id == V4L2_CID_AUDIO_MUTE ||
+ c->id == V4L2_CID_AUDIO_VOLUME ||
+ c->id == V4L2_CID_AUDIO_BALANCE) {
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == c->id)
break;
@@ -1578,7 +1602,7 @@ static void cx8800_vid_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock,flags);
}
-static char *cx88_vid_irqs[32] = {
+static const char *cx88_vid_irqs[32] = {
"y_risci1", "u_risci1", "v_risci1", "vbi_risc1",
"y_risci2", "u_risci2", "v_risci2", "vbi_risc2",
"y_oflow", "u_oflow", "v_oflow", "vbi_oflow",
@@ -1723,7 +1747,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static struct video_device cx8800_vbi_template;
-static struct video_device cx8800_video_template = {
+static const struct video_device cx8800_video_template = {
.name = "cx8800-video",
.fops = &video_fops,
.ioctl_ops = &video_ioctl_ops,
@@ -1758,7 +1782,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
#endif
};
-static struct video_device cx8800_radio_template = {
+static const struct video_device cx8800_radio_template = {
.name = "cx8800-radio",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
@@ -1872,20 +1896,20 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
if (core->board.audio_chip == V4L2_IDENT_WM8775)
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
- "wm8775", "wm8775", 0x36 >> 1, NULL);
+ NULL, "wm8775", 0x36 >> 1, NULL);
if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) {
/* This probes for a tda9874 as is used on some
Pixelview Ultra boards. */
v4l2_i2c_new_subdev(&core->v4l2_dev,
&core->i2c_adap,
- "tvaudio", "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
+ NULL, "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
}
switch (core->boardnr) {
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD: {
- static struct i2c_board_info rtc_info = {
+ static const struct i2c_board_info rtc_info = {
I2C_BOARD_INFO("isl1208", 0x6f)
};
@@ -2082,7 +2106,7 @@ static int cx8800_resume(struct pci_dev *pci_dev)
/* ----------------------------------------------------------- */
-static struct pci_device_id cx8800_pci_tbl[] = {
+static const struct pci_device_id cx8800_pci_tbl[] = {
{
.vendor = 0x14f1,
.device = 0x8800,
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
index 794f2932b755..ec5476d8b10b 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -121,8 +121,6 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
sizeof(vp3054_i2c->algo));
- vp3054_i2c->adap.class |= I2C_CLASS_TV_DIGITAL;
-
vp3054_i2c->adap.dev.parent = &dev->pci->dev;
strlcpy(vp3054_i2c->adap.name, core->name,
sizeof(vp3054_i2c->adap.name));
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 33d161a11725..e8c732e7ae4f 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -31,9 +31,8 @@
#include <media/videobuf-dma-sg.h>
#include <media/v4l2-chip-ident.h>
#include <media/cx2341x.h>
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
#include <media/videobuf-dvb.h>
-#endif
+#include <media/ir-kbd-i2c.h>
#include "btcx-risc.h"
#include "cx88-reg.h"
@@ -108,7 +107,7 @@ static unsigned int inline norm_maxh(v4l2_std_id norm)
/* static data */
struct cx8800_fmt {
- char *name;
+ const char *name;
u32 fourcc; /* v4l2 format id */
int depth;
int flags;
@@ -138,7 +137,7 @@ struct cx88_ctrl {
/* more */
struct sram_channel {
- char *name;
+ const char *name;
u32 cmds_start;
u32 ctrl_start;
u32 cdt;
@@ -149,7 +148,7 @@ struct sram_channel {
u32 cnt1_reg;
u32 cnt2_reg;
};
-extern struct sram_channel cx88_sram_channels[];
+extern const struct sram_channel const cx88_sram_channels[];
/* ----------------------------------------------------------- */
/* card configuration */
@@ -240,6 +239,7 @@ extern struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_WINFAST_DTV2000H_J 82
#define CX88_BOARD_PROF_7301 83
#define CX88_BOARD_SAMSUNG_SMT_7020 84
+#define CX88_BOARD_TWINHAN_VP1027_DVBS 85
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
@@ -262,7 +262,7 @@ struct cx88_input {
};
struct cx88_board {
- char *name;
+ const char *name;
unsigned int tuner_type;
unsigned int radio_type;
unsigned char tuner_addr;
@@ -281,6 +281,20 @@ struct cx88_subid {
u32 card;
};
+enum cx88_tvaudio {
+ WW_NONE = 1,
+ WW_BTSC,
+ WW_BG,
+ WW_DK,
+ WW_I,
+ WW_L,
+ WW_EIAJ,
+ WW_I2SPT,
+ WW_FM,
+ WW_I2SADC,
+ WW_M
+};
+
#define INPUT(nr) (core->board.input[nr])
/* ----------------------------------------------------------- */
@@ -300,7 +314,7 @@ struct cx88_buffer {
/* cx88 specific */
unsigned int bpl;
struct btcx_riscmem risc;
- struct cx8800_fmt *fmt;
+ const struct cx8800_fmt *fmt;
u32 count;
};
@@ -352,7 +366,7 @@ struct cx88_core {
/* state info */
struct task_struct *kthread;
v4l2_std_id tvnorm;
- u32 tvaudio;
+ enum cx88_tvaudio tvaudio;
u32 audiomode_manual;
u32 audiomode_current;
u32 input;
@@ -363,6 +377,9 @@ struct cx88_core {
/* IR remote control state */
struct cx88_IR *ir;
+ /* I2C remote data */
+ struct IR_i2c_init_data init_data;
+
struct mutex lock;
/* various v4l controls */
u32 freq;
@@ -381,17 +398,19 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
return container_of(v4l2_dev, struct cx88_core, v4l2_dev);
}
-#define call_all(core, o, f, args...) \
+#define call_hw(core, grpid, o, f, args...) \
do { \
if (!core->i2c_rc) { \
if (core->gate_ctrl) \
core->gate_ctrl(core, 1); \
- v4l2_device_call_all(&core->v4l2_dev, 0, o, f, ##args); \
+ v4l2_device_call_all(&core->v4l2_dev, grpid, o, f, ##args); \
if (core->gate_ctrl) \
core->gate_ctrl(core, 0); \
} \
} while (0)
+#define call_all(core, o, f, args...) call_hw(core, 0, o, f, ##args)
+
struct cx8800_dev;
struct cx8802_dev;
@@ -410,7 +429,7 @@ struct cx8800_fh {
unsigned int nclips;
/* video capture */
- struct cx8800_fmt *fmt;
+ const struct cx8800_fmt *fmt;
unsigned int width,height;
struct videobuf_queue vidq;
@@ -565,7 +584,7 @@ struct cx8802_dev {
/* ----------------------------------------------------------- */
/* cx88-core.c */
-extern void cx88_print_irqbits(char *name, char *tag, char **strings,
+extern void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
int len, u32 bits, u32 mask);
extern int cx88_core_irq(struct cx88_core *core, u32 status);
@@ -592,10 +611,10 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf);
extern void cx88_risc_disasm(struct cx88_core *core,
struct btcx_riscmem *risc);
extern int cx88_sram_channel_setup(struct cx88_core *core,
- struct sram_channel *ch,
+ const struct sram_channel *ch,
unsigned int bpl, u32 risc);
extern void cx88_sram_channel_dump(struct cx88_core *core,
- struct sram_channel *ch);
+ const struct sram_channel *ch);
extern int cx88_set_scale(struct cx88_core *core, unsigned int width,
unsigned int height, enum v4l2_field field);
@@ -603,8 +622,8 @@ extern int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
extern struct video_device *cx88_vdev_init(struct cx88_core *core,
struct pci_dev *pci,
- struct video_device *template,
- char *type);
+ const struct video_device *template_,
+ const char *type);
extern struct cx88_core* cx88_core_get(struct pci_dev *pci);
extern void cx88_core_put(struct cx88_core *core,
struct pci_dev *pci);
@@ -630,13 +649,12 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
struct cx88_dmaqueue *q);
void cx8800_vbi_timeout(unsigned long data);
-extern struct videobuf_queue_ops cx8800_vbi_qops;
+extern const struct videobuf_queue_ops cx8800_vbi_qops;
/* ----------------------------------------------------------- */
/* cx88-i2c.c */
extern int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
-extern void cx88_i2c_init_ir(struct cx88_core *core);
/* ----------------------------------------------------------- */
@@ -651,18 +669,6 @@ extern void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
/* ----------------------------------------------------------- */
/* cx88-tvaudio.c */
-#define WW_NONE 1
-#define WW_BTSC 2
-#define WW_BG 3
-#define WW_DK 4
-#define WW_I 5
-#define WW_L 6
-#define WW_EIAJ 7
-#define WW_I2SPT 8
-#define WW_FM 9
-#define WW_I2SADC 10
-#define WW_M 11
-
void cx88_set_tvaudio(struct cx88_core *core);
void cx88_newstation(struct cx88_core *core);
void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t);
@@ -686,6 +692,7 @@ int cx88_ir_fini(struct cx88_core *core);
void cx88_ir_irq(struct cx88_core *core);
int cx88_ir_start(struct cx88_core *core);
void cx88_ir_stop(struct cx88_core *core);
+extern void cx88_i2c_init_ir(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
@@ -705,10 +712,3 @@ int cx88_set_freq (struct cx88_core *core,struct v4l2_frequency *f);
int cx88_get_control(struct cx88_core *core, struct v4l2_control *ctl);
int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl);
int cx88_video_mux(struct cx88_core *core, unsigned int input);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 1c2588247289..d8e38cc4ec40 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -370,7 +370,7 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
* For a given standard, this functions sets up the default
* pix format & crop values in the vpfe device and ccdc. It first
* starts with defaults based values from the standard table.
- * It then checks if sub device support g_fmt and then override the
+ * It then checks if sub device support g_mbus_fmt and then override the
* values based on that.Sets crop values to match with scan resolution
* starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
* values in ccdc
@@ -379,6 +379,8 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
const v4l2_std_id *std_id)
{
struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
@@ -403,29 +405,36 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
vpfe_dev->crop.left = 0;
vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
- vpfe_dev->fmt.fmt.pix.width = vpfe_dev->crop.width;
- vpfe_dev->fmt.fmt.pix.height = vpfe_dev->crop.height;
+ pix->width = vpfe_dev->crop.width;
+ pix->height = vpfe_dev->crop.height;
/* first field and frame format based on standard frame format */
if (vpfe_dev->std_info.frame_format) {
- vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+ pix->field = V4L2_FIELD_INTERLACED;
/* assume V4L2_PIX_FMT_UYVY as default */
- vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
+ pix->pixelformat = V4L2_PIX_FMT_UYVY;
+ v4l2_fill_mbus_format(&mbus_fmt, pix,
+ V4L2_MBUS_FMT_YUYV10_2X10);
} else {
- vpfe_dev->fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ pix->field = V4L2_FIELD_NONE;
/* assume V4L2_PIX_FMT_SBGGR8 */
- vpfe_dev->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
+ pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ v4l2_fill_mbus_format(&mbus_fmt, pix,
+ V4L2_MBUS_FMT_SBGGR8_1X8);
}
- /* if sub device supports g_fmt, override the defaults */
+ /* if sub device supports g_mbus_fmt, override the defaults */
ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id, video, g_fmt, &vpfe_dev->fmt);
+ sdinfo->grp_id, video, g_mbus_fmt, &mbus_fmt);
if (ret && ret != -ENOIOCTLCMD) {
v4l2_err(&vpfe_dev->v4l2_dev,
- "error in getting g_fmt from sub device\n");
+ "error in getting g_mbus_fmt from sub device\n");
return ret;
}
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
/* Sets the values in CCDC */
ret = vpfe_config_ccdc_image_format(vpfe_dev);
@@ -434,11 +443,8 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
/* Update the values of sizeimage and bytesperline */
if (!ret) {
- vpfe_dev->fmt.fmt.pix.bytesperline =
- ccdc_dev->hw_ops.get_line_length();
- vpfe_dev->fmt.fmt.pix.sizeimage =
- vpfe_dev->fmt.fmt.pix.bytesperline *
- vpfe_dev->fmt.fmt.pix.height;
+ pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
+ pix->sizeimage = pix->bytesperline * pix->height;
}
return ret;
}
@@ -1366,7 +1372,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
req_buf->type,
vpfe_dev->fmt.fmt.pix.field,
sizeof(struct videobuf_buffer),
- fh);
+ fh, NULL);
fh->io_allowed = 1;
vpfe_dev->io_usrs = 1;
@@ -1980,7 +1986,7 @@ static __init int vpfe_probe(struct platform_device *pdev)
vpfe_dev->sd[i] =
v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
i2c_adap,
- sdinfo->name,
+ NULL,
&sdinfo->board_info,
NULL);
if (vpfe_dev->sd[i]) {
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index a7f48b53d3fc..6ac6acd16352 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -731,7 +731,6 @@ static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
*/
static unsigned int vpif_poll(struct file *filep, poll_table * wait)
{
- int err = 0;
struct vpif_fh *fh = filep->private_data;
struct channel_obj *channel = fh->channel;
struct common_obj *common = &(channel->common[VPIF_VIDEO_INDEX]);
@@ -739,8 +738,7 @@ static unsigned int vpif_poll(struct file *filep, poll_table * wait)
vpif_dbg(2, debug, "vpif_poll\n");
if (common->started)
- err = videobuf_poll_stream(filep, &common->buffer_queue, wait);
-
+ return videobuf_poll_stream(filep, &common->buffer_queue, wait);
return 0;
}
@@ -793,7 +791,7 @@ static int vpif_open(struct file *filep)
}
/* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(struct vpif_fh), GFP_KERNEL);
+ fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
if (NULL == fh) {
vpif_err("unable to allocate memory for file handle object\n");
ret = -ENOMEM;
@@ -929,7 +927,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
&common->irqlock,
reqbuf->type,
common->fmt.fmt.pix.field,
- sizeof(struct videobuf_buffer), fh);
+ sizeof(struct videobuf_buffer), fh,
+ NULL);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -1030,9 +1029,10 @@ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
goto qbuf_exit;
if ((VIDEOBUF_NEEDS_INIT != buf1->state)
- && (buf1->baddr != tbuf.m.userptr))
+ && (buf1->baddr != tbuf.m.userptr)) {
vpif_buffer_release(&common->buffer_queue, buf1);
buf1->baddr = tbuf.m.userptr;
+ }
break;
default:
@@ -1994,7 +1994,7 @@ static __init int vpif_probe(struct platform_device *pdev)
config = pdev->dev.platform_data;
subdev_count = config->subdev_count;
- vpif_obj.sd = kmalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+ vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
GFP_KERNEL);
if (vpif_obj.sd == NULL) {
vpif_err("unable to allocate memory for subdevice pointers\n");
@@ -2013,7 +2013,7 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.sd[i] =
v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
i2c_adap,
- subdevdata->name,
+ NULL,
&subdevdata->board_info,
NULL);
@@ -2113,7 +2113,7 @@ static const struct dev_pm_ops vpif_dev_pm_ops = {
.resume = vpif_resume,
};
-static struct platform_driver vpif_driver = {
+static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = "vpif_capture",
.owner = THIS_MODULE,
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index da07607cbc55..685f6a6ee603 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -600,7 +600,7 @@ static int vpif_open(struct file *filep)
ch = video_get_drvdata(vdev);
/* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(struct vpif_fh), GFP_KERNEL);
+ fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
if (fh == NULL) {
vpif_err("unable to allocate memory for file handle object\n");
return -ENOMEM;
@@ -853,7 +853,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
&video_qops, NULL,
&common->irqlock,
reqbuf->type, field,
- sizeof(struct videobuf_buffer), fh);
+ sizeof(struct videobuf_buffer), fh,
+ NULL);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -935,9 +936,10 @@ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
goto qbuf_exit;
if ((VIDEOBUF_NEEDS_INIT != buf1->state)
- && (buf1->baddr != tbuf.m.userptr))
+ && (buf1->baddr != tbuf.m.userptr)) {
vpif_buffer_release(&common->buffer_queue, buf1);
buf1->baddr = tbuf.m.userptr;
+ }
break;
default:
@@ -1395,7 +1397,7 @@ static int initialize_vpif(void)
/* Allocate memory for six channel objects */
for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
vpif_obj.dev[i] =
- kmalloc(sizeof(struct channel_obj), GFP_KERNEL);
+ kzalloc(sizeof(struct channel_obj), GFP_KERNEL);
/* If memory allocation fails, return error */
if (!vpif_obj.dev[i]) {
free_channel_objects_index = i;
@@ -1541,7 +1543,7 @@ static __init int vpif_probe(struct platform_device *pdev)
config = pdev->dev.platform_data;
subdev_count = config->subdev_count;
subdevdata = config->subdevinfo;
- vpif_obj.sd = kmalloc(sizeof(struct v4l2_subdev *) * subdev_count,
+ vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
GFP_KERNEL);
if (vpif_obj.sd == NULL) {
vpif_err("unable to allocate memory for subdevice pointers\n");
@@ -1551,7 +1553,7 @@ static __init int vpif_probe(struct platform_device *pdev)
for (i = 0; i < subdev_count; i++) {
vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
- i2c_adap, subdevdata[i].name,
+ i2c_adap, NULL,
&subdevdata[i].board_info,
NULL);
if (!vpif_obj.sd[i]) {
@@ -1610,7 +1612,7 @@ static int vpif_remove(struct platform_device *device)
return 0;
}
-static struct platform_driver vpif_driver = {
+static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = "vpif_display",
.owner = THIS_MODULE,
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index e182abf476c9..3c48a72eb7de 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -102,6 +102,9 @@ static void em28xx_audio_isocirq(struct urb *urb)
break;
}
+ if (atomic_read(&dev->stream_started) == 0)
+ return;
+
if (dev->adev.capture_pcm_substream) {
substream = dev->adev.capture_pcm_substream;
runtime = substream->runtime;
@@ -217,31 +220,6 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
return 0;
}
-static int em28xx_cmd(struct em28xx *dev, int cmd, int arg)
-{
- dprintk("%s transfer\n", (dev->adev.capture_stream == STREAM_ON) ?
- "stop" : "start");
-
- switch (cmd) {
- case EM28XX_CAPTURE_STREAM_EN:
- if (dev->adev.capture_stream == STREAM_OFF &&
- arg == EM28XX_START_AUDIO) {
- dev->adev.capture_stream = STREAM_ON;
- em28xx_init_audio_isoc(dev);
- } else if (dev->adev.capture_stream == STREAM_ON &&
- arg == EM28XX_STOP_AUDIO) {
- dev->adev.capture_stream = STREAM_OFF;
- em28xx_deinit_isoc_audio(dev);
- } else {
- em28xx_errdev("An underrun very likely occurred. "
- "Ignoring it.\n");
- }
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
size_t size)
{
@@ -303,7 +281,6 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
dev->mute = 0;
mutex_lock(&dev->lock);
ret = em28xx_audio_analog_set(dev);
- mutex_unlock(&dev->lock);
if (ret < 0)
goto err;
@@ -311,11 +288,10 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
if (dev->alt == 0 && dev->adev.users == 0) {
int errCode;
dev->alt = 7;
- errCode = usb_set_interface(dev->udev, 0, 7);
dprintk("changing alternate number to 7\n");
+ errCode = usb_set_interface(dev->udev, 0, 7);
}
- mutex_lock(&dev->lock);
dev->adev.users++;
mutex_unlock(&dev->lock);
@@ -325,6 +301,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
return 0;
err:
+ mutex_unlock(&dev->lock);
+
em28xx_err("Error while configuring em28xx mixer\n");
return ret;
}
@@ -338,6 +316,11 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
dev->mute = 1;
mutex_lock(&dev->lock);
dev->adev.users--;
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
+
em28xx_audio_analog_set(dev);
if (substream->runtime->dma_area) {
dprintk("freeing\n");
@@ -375,8 +358,10 @@ static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream)
dprintk("Stop capture, if needed\n");
- if (dev->adev.capture_stream == STREAM_ON)
- em28xx_cmd(dev, EM28XX_CAPTURE_STREAM_EN, EM28XX_STOP_AUDIO);
+ if (atomic_read(&dev->stream_started) > 0) {
+ atomic_set(&dev->stream_started, 0);
+ schedule_work(&dev->wq_trigger);
+ }
return 0;
}
@@ -391,31 +376,37 @@ static int snd_em28xx_prepare(struct snd_pcm_substream *substream)
return 0;
}
+static void audio_trigger(struct work_struct *work)
+{
+ struct em28xx *dev = container_of(work, struct em28xx, wq_trigger);
+
+ if (atomic_read(&dev->stream_started)) {
+ dprintk("starting capture");
+ em28xx_init_audio_isoc(dev);
+ } else {
+ dprintk("stopping capture");
+ em28xx_deinit_isoc_audio(dev);
+ }
+}
+
static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
int retval;
- dprintk("Should %s capture\n", (cmd == SNDRV_PCM_TRIGGER_START) ?
- "start" : "stop");
-
- spin_lock(&dev->adev.slock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- em28xx_cmd(dev, EM28XX_CAPTURE_STREAM_EN, EM28XX_START_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
- em28xx_cmd(dev, EM28XX_CAPTURE_STREAM_EN, EM28XX_STOP_AUDIO);
- retval = 0;
+ atomic_set(&dev->stream_started, 1);
break;
default:
retval = -EINVAL;
}
-
- spin_unlock(&dev->adev.slock);
- return retval;
+ schedule_work(&dev->wq_trigger);
+ return 0;
}
static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
@@ -495,6 +486,8 @@ static int em28xx_audio_init(struct em28xx *dev)
strcpy(card->shortname, "Em28xx Audio");
strcpy(card->longname, "Empia Em28xx Audio");
+ INIT_WORK(&dev->wq_trigger, audio_trigger);
+
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index e7efb4bffabd..54859233f311 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -187,6 +187,18 @@ static struct em28xx_reg_seq pinnacle_hybrid_pro_digital[] = {
{ -1, -1, -1, -1},
};
+static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_analog[] = {
+ {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10},
+ {EM2880_R04_GPO, 0x00, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq terratec_cinergy_USB_XS_FR_digital[] = {
+ {EM28XX_R08_GPIO, 0x6e, ~EM_GPIO_4, 10},
+ {EM2880_R04_GPO, 0x08, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
/* eb1a:2868 Reddo DVB-C USB TV Box
GPIO4 - CU1216L NIM
Other GPIOs seems to be don't care. */
@@ -781,22 +793,22 @@ struct em28xx_board em28xx_boards[] = {
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
- .dvb_gpio = default_digital,
+ .dvb_gpio = terratec_cinergy_USB_XS_FR_digital,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
- .gpio = default_analog,
+ .gpio = terratec_cinergy_USB_XS_FR_analog,
}, {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
- .gpio = default_analog,
+ .gpio = terratec_cinergy_USB_XS_FR_analog,
}, {
.type = EM28XX_VMUX_SVIDEO,
.vmux = TVP5150_SVIDEO,
.amux = EM28XX_AMUX_LINE_IN,
- .gpio = default_analog,
+ .gpio = terratec_cinergy_USB_XS_FR_analog,
} },
},
[EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = {
@@ -1648,6 +1660,22 @@ struct em28xx_board em28xx_boards[] = {
.gpio = terratec_av350_unmute_gpio,
} },
},
+
+ [EM2860_BOARD_ELGATO_VIDEO_CAPTURE] = {
+ .name = "Elgato Video Capture",
+ .decoder = EM28XX_SAA711X,
+ .tuner_type = TUNER_ABSENT, /* Capture only device */
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = SAA7115_SVIDEO3,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
+
[EM2882_BOARD_EVGA_INDTUBE] = {
.name = "Evga inDtube",
.tuner_type = TUNER_XC2028,
@@ -1772,6 +1800,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
{ USB_DEVICE(0x0ccd, 0x0096),
.driver_info = EM2860_BOARD_TERRATEC_GRABBY },
+ { USB_DEVICE(0x0fd9, 0x0033),
+ .driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE},
{ USB_DEVICE(0x185b, 0x2870),
.driver_info = EM2870_BOARD_COMPRO_VIDEOMATE },
{ USB_DEVICE(0x185b, 0x2041),
@@ -2168,6 +2198,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
ctl->demod = XC3028_FE_ZARLINK456;
break;
case EM2880_BOARD_TERRATEC_HYBRID_XS:
+ case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
case EM2881_BOARD_PINNACLE_HYBRID_PRO:
ctl->demod = XC3028_FE_ZARLINK456;
break;
@@ -2523,39 +2554,39 @@ void em28xx_card_setup(struct em28xx *dev)
/* request some modules */
if (dev->board.has_msp34xx)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "msp3400", "msp3400", 0, msp3400_addrs);
+ NULL, "msp3400", 0, msp3400_addrs);
if (dev->board.decoder == EM28XX_SAA711X)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "saa7115", "saa7115_auto", 0, saa711x_addrs);
+ NULL, "saa7115_auto", 0, saa711x_addrs);
if (dev->board.decoder == EM28XX_TVP5150)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tvp5150", "tvp5150", 0, tvp5150_addrs);
+ NULL, "tvp5150", 0, tvp5150_addrs);
if (dev->em28xx_sensor == EM28XX_MT9V011) {
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "mt9v011", "mt9v011", 0, mt9v011_addrs);
+ &dev->i2c_adap, NULL, "mt9v011", 0, mt9v011_addrs);
v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
}
if (dev->board.adecoder == EM28XX_TVAUDIO)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tvaudio", "tvaudio", dev->board.tvaudio_addr, NULL);
+ NULL, "tvaudio", dev->board.tvaudio_addr, NULL);
if (dev->board.tuner_type != TUNER_ABSENT) {
int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
if (dev->board.radio.type)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tuner", "tuner", dev->board.radio_addr, NULL);
+ NULL, "tuner", dev->board.radio_addr, NULL);
if (has_demod)
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (dev->tuner_addr == 0) {
enum v4l2_i2c_tuner_type type =
@@ -2563,14 +2594,14 @@ void em28xx_card_setup(struct em28xx *dev)
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(type));
if (sd)
dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
} else {
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tuner", "tuner", dev->tuner_addr, NULL);
+ NULL, "tuner", dev->tuner_addr, NULL);
}
}
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 7b9ec6e493e4..908e3bc88303 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -277,12 +277,13 @@ static void em28xx_copy_vbi(struct em28xx *dev,
{
void *startwrite, *startread;
int offset;
- int bytesperline = dev->vbi_width;
+ int bytesperline;
if (dev == NULL) {
em28xx_isocdbg("dev is null\n");
return;
}
+ bytesperline = dev->vbi_width;
if (dma_q == NULL) {
em28xx_isocdbg("dma_q is null\n");
@@ -862,17 +863,14 @@ static int res_get(struct em28xx_fh *fh, unsigned int bit)
return 1;
/* is it free? */
- mutex_lock(&dev->lock);
if (dev->resources & bit) {
/* no, someone else uses it */
- mutex_unlock(&dev->lock);
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
dev->resources |= bit;
em28xx_videodbg("res: get %d\n", bit);
- mutex_unlock(&dev->lock);
return 1;
}
@@ -892,11 +890,9 @@ static void res_free(struct em28xx_fh *fh, unsigned int bits)
BUG_ON((fh->resources & bits) != bits);
- mutex_lock(&dev->lock);
fh->resources &= ~bits;
dev->resources &= ~bits;
em28xx_videodbg("res: put %d\n", bits);
- mutex_unlock(&dev->lock);
}
static int get_ressource(struct em28xx_fh *fh)
@@ -1023,8 +1019,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- mutex_lock(&dev->lock);
-
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.pixelformat = dev->format->fourcc;
@@ -1038,8 +1032,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
else
f->fmt.pix.field = dev->interlaced ?
V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
-
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1137,22 +1129,15 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
vidioc_try_fmt_vid_cap(file, priv, f);
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
em28xx_errdev("%s queue busy\n", __func__);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
- rc = em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
+ return em28xx_set_video_format(dev, f->fmt.pix.pixelformat,
f->fmt.pix.width, f->fmt.pix.height);
-
-out:
- mutex_unlock(&dev->lock);
- return rc;
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
@@ -1181,7 +1166,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
dev->norm = *norm;
/* Adjusts width/height, if needed */
@@ -1197,7 +1181,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
em28xx_resolution_set(dev);
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1302,9 +1285,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
dev->ctl_input = i;
- mutex_lock(&dev->lock);
video_mux(dev, dev->ctl_input);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1365,15 +1346,12 @@ static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a)
if (0 == INPUT(a->index)->type)
return -EINVAL;
- mutex_lock(&dev->lock);
-
dev->ctl_ainput = INPUT(a->index)->amux;
dev->ctl_aoutput = INPUT(a->index)->aout;
if (!dev->ctl_aoutput)
dev->ctl_aoutput = EM28XX_AOUT_MASTER;
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1393,17 +1371,15 @@ static int vidioc_queryctrl(struct file *file, void *priv,
qc->id = id;
- /* enumberate AC97 controls */
+ /* enumerate AC97 controls */
if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
rc = ac97_queryctrl(qc);
if (!rc)
return 0;
}
- /* enumberate V4L2 device controls */
- mutex_lock(&dev->lock);
+ /* enumerate V4L2 device controls */
v4l2_device_call_all(&dev->v4l2_dev, 0, core, queryctrl, qc);
- mutex_unlock(&dev->lock);
if (qc->type)
return 0;
@@ -1423,7 +1399,6 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
return rc;
rc = 0;
- mutex_lock(&dev->lock);
/* Set an AC97 control */
if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
@@ -1437,7 +1412,6 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
rc = 0;
}
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1452,8 +1426,6 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
/* Set an AC97 control */
if (dev->audio_mode.ac97 != EM28XX_NO_AC97)
rc = ac97_set_ctrl(dev, ctrl);
@@ -1480,8 +1452,6 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
rc = em28xx_audio_analog_set(dev);
}
}
-
- mutex_unlock(&dev->lock);
return rc;
}
@@ -1502,10 +1472,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
strcpy(t->name, "Tuner");
t->type = V4L2_TUNER_ANALOG_TV;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1523,10 +1490,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1536,11 +1500,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- mutex_lock(&dev->lock);
f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->ctl_freq;
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1563,13 +1524,9 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
return -EINVAL;
- mutex_lock(&dev->lock);
-
dev->ctl_freq = f->frequency;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
- mutex_unlock(&dev->lock);
-
return 0;
}
@@ -1610,9 +1567,7 @@ static int vidioc_g_register(struct file *file, void *priv,
switch (reg->match.type) {
case V4L2_CHIP_MATCH_AC97:
- mutex_lock(&dev->lock);
ret = em28xx_read_ac97(dev, reg->reg);
- mutex_unlock(&dev->lock);
if (ret < 0)
return ret;
@@ -1634,9 +1589,7 @@ static int vidioc_g_register(struct file *file, void *priv,
/* Match host */
reg->size = em28xx_reg_len(reg->reg);
if (reg->size == 1) {
- mutex_lock(&dev->lock);
ret = em28xx_read_reg(dev, reg->reg);
- mutex_unlock(&dev->lock);
if (ret < 0)
return ret;
@@ -1644,10 +1597,8 @@ static int vidioc_g_register(struct file *file, void *priv,
reg->val = ret;
} else {
__le16 val = 0;
- mutex_lock(&dev->lock);
ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
reg->reg, (char *)&val, 2);
- mutex_unlock(&dev->lock);
if (ret < 0)
return ret;
@@ -1663,15 +1614,10 @@ static int vidioc_s_register(struct file *file, void *priv,
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
__le16 buf;
- int rc;
switch (reg->match.type) {
case V4L2_CHIP_MATCH_AC97:
- mutex_lock(&dev->lock);
- rc = em28xx_write_ac97(dev, reg->reg, reg->val);
- mutex_unlock(&dev->lock);
-
- return rc;
+ return em28xx_write_ac97(dev, reg->reg, reg->val);
case V4L2_CHIP_MATCH_I2C_DRIVER:
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_register, reg);
return 0;
@@ -1687,12 +1633,8 @@ static int vidioc_s_register(struct file *file, void *priv,
/* Match host */
buf = cpu_to_le16(reg->val);
- mutex_lock(&dev->lock);
- rc = em28xx_write_regs(dev, reg->reg, (char *)&buf,
+ return em28xx_write_regs(dev, reg->reg, (char *)&buf,
em28xx_reg_len(reg->reg));
- mutex_unlock(&dev->lock);
-
- return rc;
}
#endif
@@ -1829,16 +1771,12 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
-
f->fmt.sliced.service_set = 0;
v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
- mutex_unlock(&dev->lock);
-
return rc;
}
@@ -1853,9 +1791,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
if (rc < 0)
return rc;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
- mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
return -EINVAL;
@@ -2040,9 +1976,7 @@ static int radio_g_tuner(struct file *file, void *priv,
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -2075,9 +2009,7 @@ static int radio_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
- mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_tuner, t);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -2137,8 +2069,6 @@ static int em28xx_v4l2_open(struct file *filp)
break;
}
- mutex_lock(&dev->lock);
-
em28xx_videodbg("open dev=%s type=%s users=%d\n",
video_device_node_name(vdev), v4l2_type_names[fh_type],
dev->users);
@@ -2147,7 +2077,6 @@ static int em28xx_v4l2_open(struct file *filp)
fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL);
if (!fh) {
em28xx_errdev("em28xx-video.c: Out of memory?!\n");
- mutex_unlock(&dev->lock);
return -ENOMEM;
}
fh->dev = dev;
@@ -2181,15 +2110,13 @@ static int em28xx_v4l2_open(struct file *filp)
videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops,
NULL, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, field,
- sizeof(struct em28xx_buffer), fh);
+ sizeof(struct em28xx_buffer), fh, &dev->lock);
videobuf_queue_vmalloc_init(&fh->vb_vbiq, &em28xx_vbi_qops,
NULL, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
- sizeof(struct em28xx_buffer), fh);
-
- mutex_unlock(&dev->lock);
+ sizeof(struct em28xx_buffer), fh, &dev->lock);
return errCode;
}
@@ -2388,7 +2315,7 @@ static const struct v4l2_file_operations em28xx_v4l_fops = {
.read = em28xx_v4l2_read,
.poll = em28xx_v4l2_poll,
.mmap = em28xx_v4l2_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
@@ -2496,6 +2423,7 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
+ vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
dev->name, type_name);
@@ -2516,6 +2444,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
/* set default norm */
dev->norm = em28xx_video_template.current_norm;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
dev->ctl_input = 0;
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 1c61a6b65d28..6a75e6a4fc21 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -25,12 +25,13 @@
#ifndef _EM28XX_H
#define _EM28XX_H
+#include <linux/workqueue.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
#include <linux/videodev2.h>
+
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
-
-#include <linux/i2c.h>
-#include <linux/mutex.h>
#include <media/ir-kbd-i2c.h>
#include <media/ir-core.h>
#if defined(CONFIG_VIDEO_EM28XX_DVB) || defined(CONFIG_VIDEO_EM28XX_DVB_MODULE)
@@ -73,6 +74,7 @@
#define EM2820_BOARD_VIDEOLOGY_20K14XUSB 30
#define EM2821_BOARD_USBGEAR_VD204 31
#define EM2821_BOARD_SUPERCOMP_USB_2 32
+#define EM2860_BOARD_ELGATO_VIDEO_CAPTURE 33
#define EM2860_BOARD_TERRATEC_HYBRID_XS 34
#define EM2860_BOARD_TYPHOON_DVD_MAKER 35
#define EM2860_BOARD_NETGMBH_CAM 36
@@ -184,11 +186,6 @@ enum em28xx_mode {
EM28XX_DIGITAL_MODE,
};
-enum em28xx_stream_state {
- STREAM_OFF,
- STREAM_INTERRUPT,
- STREAM_ON,
-};
struct em28xx;
@@ -463,7 +460,6 @@ struct em28xx_audio {
struct snd_card *sndcard;
int users;
- enum em28xx_stream_state capture_stream;
spinlock_t slock;
};
@@ -505,6 +501,10 @@ struct em28xx {
unsigned int has_audio_class:1;
unsigned int has_alsa_audio:1;
+ /* Controls audio streaming */
+ struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
+ atomic_t stream_started; /* stream should be running if true */
+
struct em28xx_fmt *format;
struct em28xx_IR *ir;
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 43d208f1f586..9a075d83dd1f 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_platform.h>
+#include <linux/slab.h>
#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -425,7 +426,7 @@ static void free_buffer(struct videobuf_queue *vq, struct viu_buf *buf)
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
if (vq->int_ops && vq->int_ops->vaddr)
vaddr = vq->int_ops->vaddr(vb);
@@ -1287,7 +1288,7 @@ static int viu_open(struct file *file)
videobuf_queue_dma_contig_init(&fh->vb_vidq, &viu_video_qops,
dev->dev, &fh->vbq_lock,
fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct viu_buf), fh);
+ sizeof(struct viu_buf), fh, NULL);
return 0;
}
@@ -1485,7 +1486,7 @@ static int __devinit viu_of_probe(struct platform_device *op,
ad = i2c_get_adapter(0);
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
- "saa7115", "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
+ NULL, "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
viu_dev->vidq.timeout.function = viu_vid_timeout;
viu_dev->vidq.timeout.data = (unsigned long)viu_dev;
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 23db0c29f68c..dda56ff834f4 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -77,6 +77,15 @@ config USB_GSPCA_JEILINJ
To compile this driver as a module, choose M here: the
module will be called gspca_jeilinj.
+config USB_GSPCA_KONICA
+ tristate "Konica USB Camera V4L2 driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the Konica chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_konica.
+
config USB_GSPCA_MARS
tristate "Mars USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
@@ -337,6 +346,15 @@ config USB_GSPCA_VC032X
To compile this driver as a module, choose M here: the
module will be called gspca_vc032x.
+config USB_GSPCA_XIRLINK_CIT
+ tristate "Xirlink C-It USB Camera Driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for Xirlink C-It bases cameras.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_xirlink_cit.
+
config USB_GSPCA_ZC3XX
tristate "ZC3XX USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index f6616db0b7f8..24e695b8b077 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_USB_GSPCA_CPIA1) += gspca_cpia1.o
obj-$(CONFIG_USB_GSPCA_ETOMS) += gspca_etoms.o
obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o
obj-$(CONFIG_USB_GSPCA_JEILINJ) += gspca_jeilinj.o
+obj-$(CONFIG_USB_GSPCA_KONICA) += gspca_konica.o
obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o
obj-$(CONFIG_USB_GSPCA_MR97310A) += gspca_mr97310a.o
obj-$(CONFIG_USB_GSPCA_OV519) += gspca_ov519.o
@@ -33,6 +34,7 @@ obj-$(CONFIG_USB_GSPCA_STV0680) += gspca_stv0680.o
obj-$(CONFIG_USB_GSPCA_T613) += gspca_t613.o
obj-$(CONFIG_USB_GSPCA_TV8532) += gspca_tv8532.o
obj-$(CONFIG_USB_GSPCA_VC032X) += gspca_vc032x.o
+obj-$(CONFIG_USB_GSPCA_XIRLINK_CIT) += gspca_xirlink_cit.o
obj-$(CONFIG_USB_GSPCA_ZC3XX) += gspca_zc3xx.o
gspca_main-objs := gspca.o
@@ -42,6 +44,7 @@ gspca_cpia1-objs := cpia1.o
gspca_etoms-objs := etoms.o
gspca_finepix-objs := finepix.o
gspca_jeilinj-objs := jeilinj.o
+gspca_konica-objs := konica.o
gspca_mars-objs := mars.o
gspca_mr97310a-objs := mr97310a.o
gspca_ov519-objs := ov519.o
@@ -70,6 +73,7 @@ gspca_sunplus-objs := sunplus.o
gspca_t613-objs := t613.o
gspca_tv8532-objs := tv8532.o
gspca_vc032x-objs := vc032x.o
+gspca_xirlink_cit-objs := xirlink_cit.o
gspca_zc3xx-objs := zc3xx.o
obj-$(CONFIG_USB_M5602) += m5602/
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index fce8d9492641..629043933501 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -62,7 +62,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -152,7 +152,8 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x003c, 0x0005);
reg_w(gspca_dev, 0x003c, 0x0006);
reg_w(gspca_dev, 0x003c, 0x0007);
- usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->nbalt - 1);
+ usb_set_interface(gspca_dev->dev, gspca_dev->iface,
+ gspca_dev->nbalt - 1);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -180,7 +181,7 @@ static void sd_isoc_irq(struct urb *urb)
if (gspca_dev->frozen)
return;
#endif
- PDEBUG(D_ERR|D_PACK, "urb status: %d", urb->status);
+ err("urb status: %d", urb->status);
return;
}
@@ -208,8 +209,7 @@ static void sd_isoc_irq(struct urb *urb)
if (st == 0)
st = urb->iso_frame_desc[i].status;
if (st) {
- PDEBUG(D_ERR,
- "ISOC data error: [%d] status=%d",
+ err("ISOC data error: [%d] status=%d",
i, st);
gspca_dev->last_packet_type = DISCARD_PACKET;
continue;
@@ -256,10 +256,10 @@ static void sd_isoc_irq(struct urb *urb)
/* resubmit the URBs */
st = usb_submit_urb(urb0, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb(0) ret %d", st);
+ err("usb_submit_urb(0) ret %d", st);
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
+ err("usb_submit_urb() ret %d", st);
}
/* sub-driver description */
@@ -304,18 +304,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index d6a75772f3f8..1eacb6c7926d 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -687,7 +687,7 @@ static void cx11646_jpeg(struct gspca_dev*gspca_dev)
reg_w_val(gspca_dev, 0x00c0, 0x00);
reg_r(gspca_dev, 0x0001, 1);
length = 8;
- switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
+ switch (gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv) {
case 0:
for (i = 0; i < 27; i++) {
if (i == 26)
@@ -901,7 +901,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static void setbrightness(struct gspca_dev*gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
__u8 regE5cbx[] = { 0x88, 0x00, 0xd4, 0x01, 0x88, 0x01, 0x01, 0x01 };
@@ -924,7 +924,7 @@ static void setbrightness(struct gspca_dev*gspca_dev)
reg_w_val(gspca_dev, 0x0070, reg70);
}
-static void setcontrast(struct gspca_dev*gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
__u8 regE5acx[] = { 0x88, 0x0a, 0x0c, 0x01 }; /* seem MSB */
@@ -1068,17 +1068,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index 3747a1dcff54..9b121681d135 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -1,7 +1,7 @@
/*
* cpia CPiA (1) gspca driver
*
- * Copyright (C) 2010 Hans de Goede <hdgoede@redhat.com>
+ * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
*
* This module is adapted from the in kernel v4l1 cpia driver which is :
*
@@ -30,7 +30,7 @@
#include "gspca.h"
-MODULE_AUTHOR("Hans de Goede <hdgoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Vision CPiA");
MODULE_LICENSE("GPL");
@@ -373,9 +373,14 @@ static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getilluminator1(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getilluminator2(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
{
+#define BRIGHTNESS_IDX 0
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -390,6 +395,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setbrightness,
.get = sd_getbrightness,
},
+#define CONTRAST_IDX 1
{
{
.id = V4L2_CID_CONTRAST,
@@ -404,6 +410,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setcontrast,
.get = sd_getcontrast,
},
+#define SATURATION_IDX 2
{
{
.id = V4L2_CID_SATURATION,
@@ -418,6 +425,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setsaturation,
.get = sd_getsaturation,
},
+#define POWER_LINE_FREQUENCY_IDX 3
{
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -432,6 +440,37 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setfreq,
.get = sd_getfreq,
},
+#define ILLUMINATORS_1_IDX 4
+ {
+ {
+ .id = V4L2_CID_ILLUMINATORS_1,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Illuminator 1",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define ILLUMINATORS_1_DEF 0
+ .default_value = ILLUMINATORS_1_DEF,
+ },
+ .set = sd_setilluminator1,
+ .get = sd_getilluminator1,
+ },
+#define ILLUMINATORS_2_IDX 5
+ {
+ {
+ .id = V4L2_CID_ILLUMINATORS_2,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Illuminator 2",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define ILLUMINATORS_2_DEF 0
+ .default_value = ILLUMINATORS_2_DEF,
+ },
+ .set = sd_setilluminator2,
+ .get = sd_getilluminator2,
+ },
+#define COMP_TARGET_IDX 6
{
{
#define V4L2_CID_COMP_TARGET V4L2_CID_PRIVATE_BASE
@@ -510,7 +549,7 @@ retry:
gspca_dev->usb_buf, databytes, 1000);
if (ret < 0)
- PDEBUG(D_ERR, "usb_control_msg %02x, error %d", command[1],
+ err("usb_control_msg %02x, error %d", command[1],
ret);
if (ret == -EPIPE && retries > 0) {
@@ -1059,7 +1098,6 @@ static int command_resume(struct gspca_dev *gspca_dev)
0, sd->params.streamStartLine, 0, 0);
}
-#if 0 /* Currently unused */
static int command_setlights(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1079,7 +1117,6 @@ static int command_setlights(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0,
p1 | p2 | 0xE0, 0);
}
-#endif
static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
{
@@ -1236,7 +1273,7 @@ static void monitor_exposure(struct gspca_dev *gspca_dev)
cmd[7] = 0;
ret = cpia_usb_transferCmd(gspca_dev, cmd);
if (ret) {
- PDEBUG(D_ERR, "ReadVPRegs(30,4,9,8) - failed: %d", ret);
+ err("ReadVPRegs(30,4,9,8) - failed: %d", ret);
return;
}
exp_acc = gspca_dev->usb_buf[0];
@@ -1716,7 +1753,9 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
+#ifdef GSPCA_DEBUG
struct sd *sd = (struct sd *) gspca_dev;
+#endif
int ret;
/* Start / Stop the camera to make sure we are talking to
@@ -1726,6 +1765,14 @@ static int sd_init(struct gspca_dev *gspca_dev)
if (ret)
return ret;
+ /* Ensure the QX3 illuminators' states are restored upon resume,
+ or disable the illuminator controls, if this isn't a QX3 */
+ if (sd->params.qx3.qx3_detected)
+ command_setlights(gspca_dev);
+ else
+ gspca_dev->ctrl_dis |=
+ ((1 << ILLUMINATORS_1_IDX) | (1 << ILLUMINATORS_2_IDX));
+
sd_stopN(gspca_dev);
PDEBUG(D_PROBE, "CPIA Version: %d.%02d (%d.%d)",
@@ -1929,6 +1976,72 @@ static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setilluminator(struct gspca_dev *gspca_dev, __s32 val, int n)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
+
+ if (!sd->params.qx3.qx3_detected)
+ return -EINVAL;
+
+ switch (n) {
+ case 1:
+ sd->params.qx3.bottomlight = val ? 1 : 0;
+ break;
+ case 2:
+ sd->params.qx3.toplight = val ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = command_setlights(gspca_dev);
+ if (ret && ret != -EINVAL)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val)
+{
+ return sd_setilluminator(gspca_dev, val, 1);
+}
+
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val)
+{
+ return sd_setilluminator(gspca_dev, val, 2);
+}
+
+static int sd_getilluminator(struct gspca_dev *gspca_dev, __s32 *val, int n)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (!sd->params.qx3.qx3_detected)
+ return -EINVAL;
+
+ switch (n) {
+ case 1:
+ *val = sd->params.qx3.bottomlight;
+ break;
+ case 2:
+ *val = sd->params.qx3.toplight;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sd_getilluminator1(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ return sd_getilluminator(gspca_dev, val, 1);
+}
+
+static int sd_getilluminator2(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ return sd_getilluminator(gspca_dev, val, 2);
+}
+
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
@@ -2004,17 +2117,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index ecd4d743d2bc..a594b36d6199 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -710,9 +710,9 @@ static void Et_setgainG(struct gspca_dev *gspca_dev, __u8 gain)
}
#define BLIMIT(bright) \
- (__u8)((bright > 0x1f)?0x1f:((bright < 4)?3:bright))
+ (u8)((bright > 0x1f) ? 0x1f : ((bright < 4) ? 3 : bright))
#define LIMIT(color) \
- (unsigned char)((color > 0xff)?0xff:((color < 0)?0:color))
+ (u8)((color > 0xff) ? 0xff : ((color < 0) ? 0 : color))
static void do_autogain(struct gspca_dev *gspca_dev)
{
@@ -896,18 +896,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 5d90e7448579..d78226455d1f 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -182,7 +182,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* Init the device */
ret = command(gspca_dev, 0);
if (ret < 0) {
- PDEBUG(D_STREAM, "init failed %d", ret);
+ err("init failed %d", ret);
return ret;
}
@@ -194,14 +194,14 @@ static int sd_start(struct gspca_dev *gspca_dev)
FPIX_MAX_TRANSFER, &len,
FPIX_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_STREAM, "usb_bulk_msg failed %d", ret);
+ err("usb_bulk_msg failed %d", ret);
return ret;
}
/* Request a frame, but don't read it */
ret = command(gspca_dev, 1);
if (ret < 0) {
- PDEBUG(D_STREAM, "frame request failed %d", ret);
+ err("frame request failed %d", ret);
return ret;
}
@@ -291,19 +291,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/gl860/gl860-mi2020.c b/drivers/media/video/gspca/gl860/gl860-mi2020.c
index 57782e011c9e..2edda6b7d653 100644
--- a/drivers/media/video/gspca/gl860/gl860-mi2020.c
+++ b/drivers/media/video/gspca/gl860/gl860-mi2020.c
@@ -69,15 +69,15 @@ static u8 dat_multi5[] = { 0x8c, 0xa1, 0x03 };
static u8 dat_multi6[] = { 0x90, 0x00, 0x05 };
static struct validx tbl_init_at_startup[] = {
- {0x0000, 0x0000}, {0x0010, 0x0010}, {0x0008, 0x00c0}, {0x0001,0x00c1},
+ {0x0000, 0x0000}, {0x0010, 0x0010}, {0x0008, 0x00c0}, {0x0001, 0x00c1},
{0x0001, 0x00c2}, {0x0020, 0x0006}, {0x006a, 0x000d},
{53, 0xffff},
{0x0040, 0x0000}, {0x0063, 0x0006},
};
static struct validx tbl_common_0B[] = {
- {0x0002, 0x0004}, {0x006a, 0x0007}, {0x00ef, 0x0006}, {0x006a,0x000d},
- {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0003, 0x00c1}, {0x0042,0x00c2},
+ {0x0002, 0x0004}, {0x006a, 0x0007}, {0x00ef, 0x0006}, {0x006a, 0x000d},
+ {0x0000, 0x00c0}, {0x0010, 0x0010}, {0x0003, 0x00c1}, {0x0042, 0x00c2},
{0x0004, 0x00d8}, {0x0000, 0x0058}, {0x0041, 0x0000},
};
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index e86eb8b4aedc..b05bec7321b5 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -540,15 +540,12 @@ static int __init sd_mod_init(void)
if (usb_register(&sd_driver) < 0)
return -1;
- PDEBUG(D_PROBE, "driver registered");
-
return 0;
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "driver deregistered");
}
module_init(sd_mod_init);
@@ -588,8 +585,7 @@ int gl860_RTx(struct gspca_dev *gspca_dev,
}
if (r < 0)
- PDEBUG(D_ERR,
- "ctrl transfer failed %4d "
+ err("ctrl transfer failed %4d "
"[p%02x r%d v%04x i%04x len%d]",
r, pref, req, val, index, len);
else if (len > 1 && r < len)
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 78abc1c1f9d5..8fe8fb486d62 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -148,7 +148,7 @@ static void int_irq(struct urb *urb)
if (ret == 0) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0)
- PDEBUG(D_ERR, "Resubmit URB failed with error %i", ret);
+ err("Resubmit URB failed with error %i", ret);
}
}
@@ -177,8 +177,8 @@ static int gspca_input_connect(struct gspca_dev *dev)
err = input_register_device(input_dev);
if (err) {
- PDEBUG(D_ERR, "Input device registration failed "
- "with error %i", err);
+ err("Input device registration failed with error %i",
+ err);
input_dev->dev.parent = NULL;
input_free_device(input_dev);
} else {
@@ -328,8 +328,7 @@ static void fill_frame(struct gspca_dev *gspca_dev,
}
st = urb->iso_frame_desc[i].status;
if (st) {
- PDEBUG(D_ERR,
- "ISOC data error: [%d] len=%d, status=%d",
+ err("ISOC data error: [%d] len=%d, status=%d",
i, len, st);
gspca_dev->last_packet_type = DISCARD_PACKET;
continue;
@@ -347,7 +346,7 @@ resubmit:
/* resubmit the URB */
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
+ err("usb_submit_urb() ret %d", st);
}
/*
@@ -401,7 +400,7 @@ resubmit:
if (gspca_dev->cam.bulk_nurbs != 0) {
st = usb_submit_urb(urb, GFP_ATOMIC);
if (st < 0)
- PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st);
+ err("usb_submit_urb() ret %d", st);
}
}
@@ -433,12 +432,13 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
/* if there are no queued buffer, discard the whole frame */
if (i == atomic_read(&gspca_dev->fr_q)) {
gspca_dev->last_packet_type = DISCARD_PACKET;
+ gspca_dev->sequence++;
return;
}
j = gspca_dev->fr_queue[i];
frame = &gspca_dev->frame[j];
frame->v4l2_buf.timestamp = ktime_to_timeval(ktime_get());
- frame->v4l2_buf.sequence = ++gspca_dev->sequence;
+ frame->v4l2_buf.sequence = gspca_dev->sequence++;
gspca_dev->image = frame->data;
gspca_dev->image_len = 0;
} else {
@@ -590,7 +590,7 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
return 0;
ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
if (ret < 0)
- PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
+ err("set alt 0 err %d", ret);
return ret;
}
@@ -652,7 +652,7 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
: USB_ENDPOINT_XFER_ISOC;
i = gspca_dev->alt; /* previous alt setting */
if (gspca_dev->cam.reverse_alts) {
- if (gspca_dev->audio)
+ if (gspca_dev->audio && i < gspca_dev->nbalt - 2)
i++;
while (++i < gspca_dev->nbalt) {
ep = alt_xfer(&intf->altsetting[i], xfer);
@@ -660,7 +660,7 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
break;
}
} else {
- if (gspca_dev->audio)
+ if (gspca_dev->audio && i > 1)
i--;
while (--i >= 0) {
ep = alt_xfer(&intf->altsetting[i], xfer);
@@ -850,8 +850,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
break;
gspca_stream_off(gspca_dev);
if (ret != -ENOSPC) {
- PDEBUG(D_ERR|D_STREAM,
- "usb_submit_urb alt %d err %d",
+ err("usb_submit_urb alt %d err %d",
gspca_dev->alt, ret);
goto out;
}
@@ -880,6 +879,7 @@ out:
static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
{
+ struct gspca_ctrl *ctrl;
int i;
i = gspca_dev->cam.nmodes - 1; /* take the highest mode */
@@ -887,6 +887,16 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
gspca_dev->width = gspca_dev->cam.cam_mode[i].width;
gspca_dev->height = gspca_dev->cam.cam_mode[i].height;
gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat;
+
+ /* set the current control values to their default values
+ * which may have changed in sd_init() */
+ ctrl = gspca_dev->cam.ctrls;
+ if (ctrl != NULL) {
+ for (i = 0;
+ i < gspca_dev->sd_desc->nctrls;
+ i++, ctrl++)
+ ctrl->val = ctrl->def;
+ }
}
static int wxh_to_mode(struct gspca_dev *gspca_dev,
@@ -1310,7 +1320,7 @@ out:
return ret;
}
-static const struct ctrl *get_ctrl(struct gspca_dev *gspca_dev,
+static int get_ctrl(struct gspca_dev *gspca_dev,
int id)
{
const struct ctrl *ctrls;
@@ -1322,9 +1332,9 @@ static const struct ctrl *get_ctrl(struct gspca_dev *gspca_dev,
if (gspca_dev->ctrl_dis & (1 << i))
continue;
if (id == ctrls->qctrl.id)
- return ctrls;
+ return i;
}
- return NULL;
+ return -1;
}
static int vidioc_queryctrl(struct file *file, void *priv,
@@ -1332,34 +1342,40 @@ static int vidioc_queryctrl(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = priv;
const struct ctrl *ctrls;
- int i;
+ struct gspca_ctrl *gspca_ctrl;
+ int i, idx;
u32 id;
- ctrls = NULL;
id = q_ctrl->id;
if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
id &= V4L2_CTRL_ID_MASK;
id++;
+ idx = -1;
for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
if (gspca_dev->ctrl_dis & (1 << i))
continue;
if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
continue;
- if (ctrls && gspca_dev->sd_desc->ctrls[i].qctrl.id
- > ctrls->qctrl.id)
+ if (idx >= 0
+ && gspca_dev->sd_desc->ctrls[i].qctrl.id
+ > gspca_dev->sd_desc->ctrls[idx].qctrl.id)
continue;
- ctrls = &gspca_dev->sd_desc->ctrls[i];
+ idx = i;
}
- if (ctrls == NULL)
- return -EINVAL;
} else {
- ctrls = get_ctrl(gspca_dev, id);
- if (ctrls == NULL)
- return -EINVAL;
- i = ctrls - gspca_dev->sd_desc->ctrls;
+ idx = get_ctrl(gspca_dev, id);
}
- memcpy(q_ctrl, ctrls, sizeof *q_ctrl);
- if (gspca_dev->ctrl_inac & (1 << i))
+ if (idx < 0)
+ return -EINVAL;
+ ctrls = &gspca_dev->sd_desc->ctrls[idx];
+ memcpy(q_ctrl, &ctrls->qctrl, sizeof *q_ctrl);
+ if (gspca_dev->cam.ctrls != NULL) {
+ gspca_ctrl = &gspca_dev->cam.ctrls[idx];
+ q_ctrl->default_value = gspca_ctrl->def;
+ q_ctrl->minimum = gspca_ctrl->min;
+ q_ctrl->maximum = gspca_ctrl->max;
+ }
+ if (gspca_dev->ctrl_inac & (1 << idx))
q_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
}
@@ -1369,23 +1385,46 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = priv;
const struct ctrl *ctrls;
- int ret;
+ struct gspca_ctrl *gspca_ctrl;
+ int idx, ret;
- ctrls = get_ctrl(gspca_dev, ctrl->id);
- if (ctrls == NULL)
+ idx = get_ctrl(gspca_dev, ctrl->id);
+ if (idx < 0)
return -EINVAL;
-
- if (ctrl->value < ctrls->qctrl.minimum
- || ctrl->value > ctrls->qctrl.maximum)
- return -ERANGE;
+ if (gspca_dev->ctrl_inac & (1 << idx))
+ return -EINVAL;
+ ctrls = &gspca_dev->sd_desc->ctrls[idx];
+ if (gspca_dev->cam.ctrls != NULL) {
+ gspca_ctrl = &gspca_dev->cam.ctrls[idx];
+ if (ctrl->value < gspca_ctrl->min
+ || ctrl->value > gspca_ctrl->max)
+ return -ERANGE;
+ } else {
+ gspca_ctrl = NULL;
+ if (ctrl->value < ctrls->qctrl.minimum
+ || ctrl->value > ctrls->qctrl.maximum)
+ return -ERANGE;
+ }
PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ if (!gspca_dev->present) {
+ ret = -ENODEV;
+ goto out;
+ }
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
+ if (ctrls->set != NULL) {
ret = ctrls->set(gspca_dev, ctrl->value);
- else
- ret = -ENODEV;
+ goto out;
+ }
+ if (gspca_ctrl != NULL) {
+ gspca_ctrl->val = ctrl->value;
+ if (ctrls->set_control != NULL
+ && gspca_dev->streaming)
+ ctrls->set_control(gspca_dev);
+ }
+ ret = gspca_dev->usb_err;
+out:
mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
@@ -1395,19 +1434,28 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = priv;
const struct ctrl *ctrls;
- int ret;
+ int idx, ret;
- ctrls = get_ctrl(gspca_dev, ctrl->id);
- if (ctrls == NULL)
+ idx = get_ctrl(gspca_dev, ctrl->id);
+ if (idx < 0)
return -EINVAL;
+ ctrls = &gspca_dev->sd_desc->ctrls[idx];
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ if (!gspca_dev->present) {
+ ret = -ENODEV;
+ goto out;
+ }
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
+ if (ctrls->get != NULL) {
ret = ctrls->get(gspca_dev, &ctrl->value);
- else
- ret = -ENODEV;
+ goto out;
+ }
+ if (gspca_dev->cam.ctrls != NULL)
+ ctrl->value = gspca_dev->cam.ctrls[idx].val;
+ ret = 0;
+out:
mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
@@ -2127,6 +2175,22 @@ static struct video_device gspca_template = {
.release = gspca_release,
};
+/* initialize the controls */
+static void ctrls_init(struct gspca_dev *gspca_dev)
+{
+ struct gspca_ctrl *ctrl;
+ int i;
+
+ for (i = 0, ctrl = gspca_dev->cam.ctrls;
+ i < gspca_dev->sd_desc->nctrls;
+ i++, ctrl++) {
+ ctrl->def = gspca_dev->sd_desc->ctrls[i].qctrl.default_value;
+ ctrl->val = ctrl->def;
+ ctrl->min = gspca_dev->sd_desc->ctrls[i].qctrl.minimum;
+ ctrl->max = gspca_dev->sd_desc->ctrls[i].qctrl.maximum;
+ }
+}
+
/*
* probe and create a new gspca device
*
@@ -2188,6 +2252,8 @@ int gspca_dev_probe2(struct usb_interface *intf,
ret = sd_desc->config(gspca_dev, id);
if (ret < 0)
goto out;
+ if (gspca_dev->cam.ctrls != NULL)
+ ctrls_init(gspca_dev);
ret = sd_desc->init(gspca_dev);
if (ret < 0)
goto out;
@@ -2243,7 +2309,7 @@ int gspca_dev_probe(struct usb_interface *intf,
/* we don't handle multi-config cameras */
if (dev->descriptor.bNumConfigurations != 1) {
- PDEBUG(D_ERR, "%04x:%04x too many config",
+ err("%04x:%04x too many config",
id->idVendor, id->idProduct);
return -ENODEV;
}
@@ -2428,7 +2494,7 @@ EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
/* -- module insert / remove -- */
static int __init gspca_init(void)
{
- info("main v%d.%d.%d registered",
+ info("v%d.%d.%d registered",
(DRIVER_VERSION_NUMBER >> 16) & 0xff,
(DRIVER_VERSION_NUMBER >> 8) & 0xff,
DRIVER_VERSION_NUMBER & 0xff);
@@ -2436,7 +2502,6 @@ static int __init gspca_init(void)
}
static void __exit gspca_exit(void)
{
- info("main deregistered");
}
module_init(gspca_init);
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index b749c36d9f7e..d4d210b56b49 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -52,11 +52,20 @@ struct framerates {
int nrates;
};
+/* control definition */
+struct gspca_ctrl {
+ s16 val; /* current value */
+ s16 def; /* default value */
+ s16 min, max; /* minimum and maximum values */
+};
+
/* device information - set at probe time */
struct cam {
const struct v4l2_pix_format *cam_mode; /* size nmodes */
const struct framerates *mode_framerates; /* must have size nmode,
* just like cam_mode */
+ struct gspca_ctrl *ctrls; /* control table - size nctrls */
+ /* may be NULL */
u32 bulk_size; /* buffer size when image transfer by bulk */
u32 input_flags; /* value for ENUM_INPUT status flags */
u8 nmodes; /* size of cam_mode */
@@ -99,6 +108,7 @@ struct ctrl {
struct v4l2_queryctrl qctrl;
int (*set)(struct gspca_dev *, __s32);
int (*get)(struct gspca_dev *, __s32 *);
+ cam_v_op set_control;
};
/* subdriver description */
@@ -106,7 +116,7 @@ struct sd_desc {
/* information */
const char *name; /* sub-driver name */
/* controls */
- const struct ctrl *ctrls;
+ const struct ctrl *ctrls; /* static control definition */
int nctrls;
/* mandatory operations */
cam_cf_op config; /* called on probe */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index 12d9cf4caba2..a35e87bb0388 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -82,7 +82,7 @@ static int jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command)
usb_sndbulkpipe(gspca_dev->dev, 3),
gspca_dev->usb_buf, 2, NULL, 500);
if (retval < 0)
- PDEBUG(D_ERR, "command write [%02x] error %d",
+ err("command write [%02x] error %d",
gspca_dev->usb_buf[0], retval);
return retval;
}
@@ -97,7 +97,7 @@ static int jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
gspca_dev->usb_buf, 1, NULL, 500);
response = gspca_dev->usb_buf[0];
if (retval < 0)
- PDEBUG(D_ERR, "read command [%02x] error %d",
+ err("read command [%02x] error %d",
gspca_dev->usb_buf[0], retval);
return retval;
}
@@ -191,7 +191,7 @@ static void jlj_dostream(struct work_struct *work)
buffer = kmalloc(JEILINJ_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- PDEBUG(D_ERR, "Couldn't allocate USB buffer");
+ err("Couldn't allocate USB buffer");
goto quit_stream;
}
while (gspca_dev->present && gspca_dev->streaming) {
@@ -354,19 +354,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
new file mode 100644
index 000000000000..d2ce65dcbfdc
--- /dev/null
+++ b/drivers/media/video/gspca/konica.c
@@ -0,0 +1,646 @@
+/*
+ * Driver for USB webcams based on Konica chipset. This
+ * chipset is used in Intel YC76 camera.
+ *
+ * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the usbvideo v4l1 konicawc driver which is:
+ *
+ * Copyright (C) 2002 Simon Evans <spse@secret.org.uk>
+ *
+ * The code for making gspca work with a webcam with 2 isoc endpoints was
+ * taken from the benq gspca subdriver which is:
+ *
+ * Copyright (C) 2009 Jean-Francois Moine (http://moinejf.free.fr)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define MODULE_NAME "konica"
+
+#include <linux/input.h>
+#include "gspca.h"
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Konica chipset USB Camera Driver");
+MODULE_LICENSE("GPL");
+
+#define WHITEBAL_REG 0x01
+#define BRIGHTNESS_REG 0x02
+#define SHARPNESS_REG 0x03
+#define CONTRAST_REG 0x04
+#define SATURATION_REG 0x05
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct urb *last_data_urb;
+ u8 snapshot_pressed;
+ u8 brightness;
+ u8 contrast;
+ u8 saturation;
+ u8 whitebal;
+ u8 sharpness;
+};
+
+/* V4L2 controls supported by the driver */
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setwhitebal(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getwhitebal(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
+
+static const struct ctrl sd_ctrls[] = {
+#define SD_BRIGHTNESS 0
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+#define BRIGHTNESS_DEFAULT 4
+ .default_value = BRIGHTNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setbrightness,
+ .get = sd_getbrightness,
+ },
+#define SD_CONTRAST 1
+ {
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 4,
+#define CONTRAST_DEFAULT 10
+ .default_value = CONTRAST_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setcontrast,
+ .get = sd_getcontrast,
+ },
+#define SD_SATURATION 2
+ {
+ {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+#define SATURATION_DEFAULT 4
+ .default_value = SATURATION_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setsaturation,
+ .get = sd_getsaturation,
+ },
+#define SD_WHITEBAL 3
+ {
+ {
+ .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "White Balance",
+ .minimum = 0,
+ .maximum = 33,
+ .step = 1,
+#define WHITEBAL_DEFAULT 25
+ .default_value = WHITEBAL_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setwhitebal,
+ .get = sd_getwhitebal,
+ },
+#define SD_SHARPNESS 4
+ {
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+#define SHARPNESS_DEFAULT 4
+ .default_value = SHARPNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setsharpness,
+ .get = sd_getsharpness,
+ },
+};
+
+/* .priv is what goes to register 8 for this mode, known working values:
+ 0x00 -> 176x144, cropped
+ 0x01 -> 176x144, cropped
+ 0x02 -> 176x144, cropped
+ 0x03 -> 176x144, cropped
+ 0x04 -> 176x144, binned
+ 0x05 -> 320x240
+ 0x06 -> 320x240
+ 0x07 -> 160x120, cropped
+ 0x08 -> 160x120, cropped
+ 0x09 -> 160x120, binned (note has 136 lines)
+ 0x0a -> 160x120, binned (note has 136 lines)
+ 0x0b -> 160x120, cropped
+*/
+static const struct v4l2_pix_format vga_mode[] = {
+ {160, 120, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 136 * 3 / 2 + 960,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0x0a},
+ {176, 144, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2 + 960,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0x04},
+ {320, 240, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 2 + 960,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0x05},
+};
+
+static void sd_isoc_irq(struct urb *urb);
+
+static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x02,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ NULL,
+ 0,
+ 1000);
+ if (ret < 0) {
+ err("reg_w err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
+}
+
+static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x03,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ gspca_dev->usb_buf,
+ 2,
+ 1000);
+ if (ret < 0) {
+ err("reg_w err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
+}
+
+static void konica_stream_on(struct gspca_dev *gspca_dev)
+{
+ reg_w(gspca_dev, 1, 0x0b);
+}
+
+static void konica_stream_off(struct gspca_dev *gspca_dev)
+{
+ reg_w(gspca_dev, 0, 0x0b);
+}
+
+/* this function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->cam.cam_mode = vga_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
+ gspca_dev->cam.no_urb_create = 1;
+ /* The highest alt setting has an isoc packetsize of 0, so we
+ don't want to use it */
+ gspca_dev->nbalt--;
+
+ sd->brightness = BRIGHTNESS_DEFAULT;
+ sd->contrast = CONTRAST_DEFAULT;
+ sd->saturation = SATURATION_DEFAULT;
+ sd->whitebal = WHITEBAL_DEFAULT;
+ sd->sharpness = SHARPNESS_DEFAULT;
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ /* HDG not sure if these 2 reads are needed */
+ reg_r(gspca_dev, 0, 0x10);
+ PDEBUG(D_PROBE, "Reg 0x10 reads: %02x %02x",
+ gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
+ reg_r(gspca_dev, 0, 0x10);
+ PDEBUG(D_PROBE, "Reg 0x10 reads: %02x %02x",
+ gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
+ reg_w(gspca_dev, 0, 0x0d);
+
+ return 0;
+}
+
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct urb *urb;
+ int i, n, packet_size;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ err("Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+
+ reg_w(gspca_dev, sd->brightness, BRIGHTNESS_REG);
+ reg_w(gspca_dev, sd->whitebal, WHITEBAL_REG);
+ reg_w(gspca_dev, sd->contrast, CONTRAST_REG);
+ reg_w(gspca_dev, sd->saturation, SATURATION_REG);
+ reg_w(gspca_dev, sd->sharpness, SHARPNESS_REG);
+
+ n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
+ reg_w(gspca_dev, n, 0x08);
+
+ konica_stream_on(gspca_dev);
+
+ if (gspca_dev->usb_err)
+ return gspca_dev->usb_err;
+
+ /* create 4 URBs - 2 on endpoint 0x83 and 2 on 0x082 */
+#if MAX_NURBS < 4
+#error "Not enough URBs in the gspca table"
+#endif
+#define SD_NPKT 32
+ for (n = 0; n < 4; n++) {
+ i = n & 1 ? 0 : 1;
+ packet_size =
+ le16_to_cpu(alt->endpoint[i].desc.wMaxPacketSize);
+ urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL);
+ if (!urb) {
+ err("usb_alloc_urb failed");
+ return -ENOMEM;
+ }
+ gspca_dev->urb[n] = urb;
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
+ packet_size * SD_NPKT,
+ GFP_KERNEL,
+ &urb->transfer_dma);
+ if (urb->transfer_buffer == NULL) {
+ err("usb_buffer_alloc failed");
+ return -ENOMEM;
+ }
+
+ urb->dev = gspca_dev->dev;
+ urb->context = gspca_dev;
+ urb->transfer_buffer_length = packet_size * SD_NPKT;
+ urb->pipe = usb_rcvisocpipe(gspca_dev->dev,
+ n & 1 ? 0x81 : 0x82);
+ urb->transfer_flags = URB_ISO_ASAP
+ | URB_NO_TRANSFER_DMA_MAP;
+ urb->interval = 1;
+ urb->complete = sd_isoc_irq;
+ urb->number_of_packets = SD_NPKT;
+ for (i = 0; i < SD_NPKT; i++) {
+ urb->iso_frame_desc[i].length = packet_size;
+ urb->iso_frame_desc[i].offset = packet_size * i;
+ }
+ }
+
+ return 0;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ konica_stream_off(gspca_dev);
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ /* Don't keep the button in the pressed state "forever" if it was
+ pressed when streaming is stopped */
+ if (sd->snapshot_pressed) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ sd->snapshot_pressed = 0;
+ }
+#endif
+}
+
+/* reception of an URB */
+static void sd_isoc_irq(struct urb *urb)
+{
+ struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct urb *data_urb, *status_urb;
+ u8 *data;
+ int i, st;
+
+ PDEBUG(D_PACK, "sd isoc irq");
+ if (!gspca_dev->streaming)
+ return;
+
+ if (urb->status != 0) {
+ if (urb->status == -ESHUTDOWN)
+ return; /* disconnection */
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ return;
+#endif
+ PDEBUG(D_ERR, "urb status: %d", urb->status);
+ st = usb_submit_urb(urb, GFP_ATOMIC);
+ if (st < 0)
+ err("resubmit urb error %d", st);
+ return;
+ }
+
+ /* if this is a data URB (ep 0x82), wait */
+ if (urb->transfer_buffer_length > 32) {
+ sd->last_data_urb = urb;
+ return;
+ }
+
+ status_urb = urb;
+ data_urb = sd->last_data_urb;
+ sd->last_data_urb = NULL;
+
+ if (!data_urb || data_urb->start_frame != status_urb->start_frame) {
+ PDEBUG(D_ERR|D_PACK, "lost sync on frames");
+ goto resubmit;
+ }
+
+ if (data_urb->number_of_packets != status_urb->number_of_packets) {
+ PDEBUG(D_ERR|D_PACK,
+ "no packets does not match, data: %d, status: %d",
+ data_urb->number_of_packets,
+ status_urb->number_of_packets);
+ goto resubmit;
+ }
+
+ for (i = 0; i < status_urb->number_of_packets; i++) {
+ if (data_urb->iso_frame_desc[i].status ||
+ status_urb->iso_frame_desc[i].status) {
+ PDEBUG(D_ERR|D_PACK,
+ "pkt %d data-status %d, status-status %d", i,
+ data_urb->iso_frame_desc[i].status,
+ status_urb->iso_frame_desc[i].status);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ continue;
+ }
+
+ if (status_urb->iso_frame_desc[i].actual_length != 1) {
+ PDEBUG(D_ERR|D_PACK,
+ "bad status packet length %d",
+ status_urb->iso_frame_desc[i].actual_length);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ continue;
+ }
+
+ st = *((u8 *)status_urb->transfer_buffer
+ + status_urb->iso_frame_desc[i].offset);
+
+ data = (u8 *)data_urb->transfer_buffer
+ + data_urb->iso_frame_desc[i].offset;
+
+ /* st: 0x80-0xff: frame start with frame number (ie 0-7f)
+ * otherwise:
+ * bit 0 0: keep packet
+ * 1: drop packet (padding data)
+ *
+ * bit 4 0 button not clicked
+ * 1 button clicked
+ * button is used to `take a picture' (in software)
+ */
+ if (st & 0x80) {
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
+ } else {
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ u8 button_state = st & 0x40 ? 1 : 0;
+ if (sd->snapshot_pressed != button_state) {
+ input_report_key(gspca_dev->input_dev,
+ KEY_CAMERA,
+ button_state);
+ input_sync(gspca_dev->input_dev);
+ sd->snapshot_pressed = button_state;
+ }
+#endif
+ if (st & 0x01)
+ continue;
+ }
+ gspca_frame_add(gspca_dev, INTER_PACKET, data,
+ data_urb->iso_frame_desc[i].actual_length);
+ }
+
+resubmit:
+ if (data_urb) {
+ st = usb_submit_urb(data_urb, GFP_ATOMIC);
+ if (st < 0)
+ PDEBUG(D_ERR|D_PACK,
+ "usb_submit_urb(data_urb) ret %d", st);
+ }
+ st = usb_submit_urb(status_urb, GFP_ATOMIC);
+ if (st < 0)
+ err("usb_submit_urb(status_urb) ret %d", st);
+}
+
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->brightness = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->brightness, BRIGHTNESS_REG);
+ konica_stream_on(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->brightness;
+
+ return 0;
+}
+
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->contrast = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->contrast, CONTRAST_REG);
+ konica_stream_on(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->contrast;
+
+ return 0;
+}
+
+static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->saturation = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->saturation, SATURATION_REG);
+ konica_stream_on(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->saturation;
+
+ return 0;
+}
+
+static int sd_setwhitebal(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->whitebal = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->whitebal, WHITEBAL_REG);
+ konica_stream_on(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getwhitebal(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->whitebal;
+
+ return 0;
+}
+
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->sharpness = val;
+ if (gspca_dev->streaming) {
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, sd->sharpness, SHARPNESS_REG);
+ konica_stream_on(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->sharpness;
+
+ return 0;
+}
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .stopN = sd_stopN,
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ .other_input = 1,
+#endif
+};
+
+/* -- module initialisation -- */
+static const __devinitdata struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ return usb_register(&sd_driver);
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index b073d66acd04..c872b93a3351 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -406,18 +406,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init mod_m5602_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit mod_m5602_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(mod_m5602_init);
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
index c0722fa64606..0d605a52b924 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
@@ -109,14 +109,14 @@ static const struct ctrl mt9m111_ctrls[] = {
#define GREEN_BALANCE_IDX 4
{
{
- .id = M5602_V4L2_CID_GREEN_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "green balance",
- .minimum = 0x00,
- .maximum = 0x7ff,
- .step = 0x1,
- .default_value = MT9M111_GREEN_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = M5602_V4L2_CID_GREEN_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "green balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_GREEN_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_green_balance,
.get = mt9m111_get_green_balance
@@ -124,14 +124,14 @@ static const struct ctrl mt9m111_ctrls[] = {
#define BLUE_BALANCE_IDX 5
{
{
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "blue balance",
- .minimum = 0x00,
- .maximum = 0x7ff,
- .step = 0x1,
- .default_value = MT9M111_BLUE_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "blue balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_BLUE_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_blue_balance,
.get = mt9m111_get_blue_balance
@@ -139,14 +139,14 @@ static const struct ctrl mt9m111_ctrls[] = {
#define RED_BALANCE_IDX 5
{
{
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "red balance",
- .minimum = 0x00,
- .maximum = 0x7ff,
- .step = 0x1,
- .default_value = MT9M111_RED_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "red balance",
+ .minimum = 0x00,
+ .maximum = 0x7ff,
+ .step = 0x1,
+ .default_value = MT9M111_RED_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_red_balance,
.get = mt9m111_get_red_balance
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index b3de77823091..b1f0c492036a 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -70,7 +70,7 @@
#define MT9M111_COLORPIPE 0x01
#define MT9M111_CAMERA_CONTROL 0x02
-#define MT9M111_RESET (1 << 0)
+#define MT9M111_RESET (1 << 0)
#define MT9M111_RESTART (1 << 1)
#define MT9M111_ANALOG_STANDBY (1 << 2)
#define MT9M111_CHIP_ENABLE (1 << 3)
@@ -97,7 +97,7 @@
#define MT9M111_2D_DEFECT_CORRECTION_ENABLE (1 << 0)
#define INITIAL_MAX_GAIN 64
-#define MT9M111_DEFAULT_GAIN 283
+#define MT9M111_DEFAULT_GAIN 283
#define MT9M111_GREEN_GAIN_DEFAULT 0x20
#define MT9M111_BLUE_GAIN_DEFAULT 0x20
#define MT9M111_RED_GAIN_DEFAULT 0x20
@@ -125,8 +125,7 @@ static const struct m5602_sensor mt9m111 = {
.start = mt9m111_start,
};
-static const unsigned char preinit_mt9m111[][4] =
-{
+static const unsigned char preinit_mt9m111[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -165,8 +164,7 @@ static const unsigned char preinit_mt9m111[][4] =
{BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00}
};
-static const unsigned char init_mt9m111[][4] =
-{
+static const unsigned char init_mt9m111[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -257,8 +255,7 @@ static const unsigned char init_mt9m111[][4] =
{SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
};
-static const unsigned char start_mt9m111[][4] =
-{
+static const unsigned char start_mt9m111[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -271,5 +268,4 @@ static const unsigned char start_mt9m111[][4] =
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
{BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.c b/drivers/media/video/gspca/m5602/m5602_ov7660.c
index 62c1cbf06666..b12f60464b3b 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov7660.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.c
@@ -54,13 +54,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define AUTO_WHITE_BALANCE_IDX 4
{
{
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto white balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov7660_set_auto_white_balance,
.get = ov7660_get_auto_white_balance
@@ -68,13 +68,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define AUTO_GAIN_CTRL_IDX 5
{
{
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto gain control",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto gain control",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov7660_set_auto_gain,
.get = ov7660_get_auto_gain
@@ -82,13 +82,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define AUTO_EXPOSURE_IDX 6
{
{
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov7660_set_auto_exposure,
.get = ov7660_get_auto_exposure
@@ -96,13 +96,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define HFLIP_IDX 7
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov7660_set_hflip,
.get = ov7660_get_hflip
@@ -110,13 +110,13 @@ static const struct ctrl ov7660_ctrls[] = {
#define VFLIP_IDX 8
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov7660_set_vflip,
.get = ov7660_get_vflip
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.h b/drivers/media/video/gspca/m5602/m5602_ov7660.h
index 4d9dcf29da2e..2efd607987ec 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.h
@@ -80,7 +80,7 @@
#define OV7660_DEFAULT_GAIN 0x0e
#define OV7660_DEFAULT_RED_GAIN 0x80
-#define OV7660_DEFAULT_BLUE_GAIN 0x80
+#define OV7660_DEFAULT_BLUE_GAIN 0x80
#define OV7660_DEFAULT_SATURATION 0x00
#define OV7660_DEFAULT_EXPOSURE 0x20
@@ -105,8 +105,7 @@ static const struct m5602_sensor ov7660 = {
.disconnect = ov7660_disconnect,
};
-static const unsigned char preinit_ov7660[][4] =
-{
+static const unsigned char preinit_ov7660[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -140,8 +139,7 @@ static const unsigned char preinit_ov7660[][4] =
{BRIDGE, M5602_XB_GPIO_EN_L, 0x00}
};
-static const unsigned char init_ov7660[][4] =
-{
+static const unsigned char init_ov7660[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -259,5 +257,4 @@ static const unsigned char init_ov7660[][4] =
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index 069ba0044f8b..8ded8b100576 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -121,8 +121,8 @@ static const struct ctrl ov9650_ctrls[] = {
.minimum = 0x00,
.maximum = 0x1ff,
.step = 0x4,
- .default_value = EXPOSURE_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = EXPOSURE_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = ov9650_set_exposure,
.get = ov9650_get_exposure
@@ -146,13 +146,13 @@ static const struct ctrl ov9650_ctrls[] = {
{
{
.id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "red balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = RED_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "red balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = RED_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = ov9650_set_red_balance,
.get = ov9650_get_red_balance
@@ -161,13 +161,13 @@ static const struct ctrl ov9650_ctrls[] = {
{
{
.id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "blue balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = BLUE_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "blue balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = BLUE_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = ov9650_set_blue_balance,
.get = ov9650_get_blue_balance
@@ -175,13 +175,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define HFLIP_IDX 4
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov9650_set_hflip,
.get = ov9650_get_hflip
@@ -189,13 +189,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define VFLIP_IDX 5
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = ov9650_set_vflip,
.get = ov9650_get_vflip
@@ -203,13 +203,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define AUTO_WHITE_BALANCE_IDX 6
{
{
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto white balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov9650_set_auto_white_balance,
.get = ov9650_get_auto_white_balance
@@ -217,13 +217,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define AUTO_GAIN_CTRL_IDX 7
{
{
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto gain control",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto gain control",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov9650_set_auto_gain,
.get = ov9650_get_auto_gain
@@ -231,13 +231,13 @@ static const struct ctrl ov9650_ctrls[] = {
#define AUTO_EXPOSURE_IDX 8
{
{
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1
},
.set = ov9650_set_auto_exposure,
.get = ov9650_get_auto_exposure
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index c98c40d69e05..da9a129b739d 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -110,7 +110,7 @@
#define OV9650_VARIOPIXEL (1 << 2)
#define OV9650_SYSTEM_CLK_SEL (1 << 7)
-#define OV9650_SLAM_MODE (1 << 4)
+#define OV9650_SLAM_MODE (1 << 4)
#define OV9650_QVGA_VARIOPIXEL (1 << 7)
@@ -154,8 +154,7 @@ static const struct m5602_sensor ov9650 = {
.disconnect = ov9650_disconnect,
};
-static const unsigned char preinit_ov9650[][3] =
-{
+static const unsigned char preinit_ov9650[][3] = {
/* [INITCAM] */
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
@@ -180,8 +179,7 @@ static const unsigned char preinit_ov9650[][3] =
{SENSOR, OV9650_OFON, 0x40}
};
-static const unsigned char init_ov9650[][3] =
-{
+static const unsigned char init_ov9650[][3] = {
/* [INITCAM] */
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
@@ -297,8 +295,7 @@ static const unsigned char init_ov9650[][3] =
{SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X},
};
-static const unsigned char res_init_ov9650[][3] =
-{
+static const unsigned char res_init_ov9650[][3] = {
{SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X},
{BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x82},
@@ -307,5 +304,4 @@ static const unsigned char res_init_ov9650[][3] =
{BRIDGE, M5602_XB_PIX_OF_LINE_L, 0x00},
{BRIDGE, M5602_XB_SIG_INI, 0x01}
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c
index 925b87d66f40..1febd34c2f05 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.c
@@ -58,14 +58,14 @@ static const struct ctrl po1030_ctrls[] = {
#define GAIN_IDX 0
{
{
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
- .minimum = 0x00,
- .maximum = 0x4f,
- .step = 0x1,
- .default_value = PO1030_GLOBAL_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "gain",
+ .minimum = 0x00,
+ .maximum = 0x4f,
+ .step = 0x1,
+ .default_value = PO1030_GLOBAL_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_gain,
.get = po1030_get_gain
@@ -73,14 +73,14 @@ static const struct ctrl po1030_ctrls[] = {
#define EXPOSURE_IDX 1
{
{
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0x02ff,
- .step = 0x1,
- .default_value = PO1030_EXPOSURE_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .minimum = 0x00,
+ .maximum = 0x02ff,
+ .step = 0x1,
+ .default_value = PO1030_EXPOSURE_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_exposure,
.get = po1030_get_exposure
@@ -88,14 +88,14 @@ static const struct ctrl po1030_ctrls[] = {
#define RED_BALANCE_IDX 2
{
{
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "red balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = PO1030_RED_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "red balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = PO1030_RED_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_red_balance,
.get = po1030_get_red_balance
@@ -103,14 +103,14 @@ static const struct ctrl po1030_ctrls[] = {
#define BLUE_BALANCE_IDX 3
{
{
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "blue balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = PO1030_BLUE_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "blue balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = PO1030_BLUE_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_blue_balance,
.get = po1030_get_blue_balance
@@ -118,13 +118,13 @@ static const struct ctrl po1030_ctrls[] = {
#define HFLIP_IDX 4
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_hflip,
.get = po1030_get_hflip
@@ -132,13 +132,13 @@ static const struct ctrl po1030_ctrls[] = {
#define VFLIP_IDX 5
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_vflip,
.get = po1030_get_vflip
@@ -146,13 +146,13 @@ static const struct ctrl po1030_ctrls[] = {
#define AUTO_WHITE_BALANCE_IDX 6
{
{
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto white balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_auto_white_balance,
.get = po1030_get_auto_white_balance
@@ -160,13 +160,13 @@ static const struct ctrl po1030_ctrls[] = {
#define AUTO_EXPOSURE_IDX 7
{
{
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "auto exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "auto exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
.set = po1030_set_auto_exposure,
.get = po1030_get_auto_exposure
@@ -174,14 +174,14 @@ static const struct ctrl po1030_ctrls[] = {
#define GREEN_BALANCE_IDX 8
{
{
- .id = M5602_V4L2_CID_GREEN_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "green balance",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = PO1030_GREEN_GAIN_DEFAULT,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .id = M5602_V4L2_CID_GREEN_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "green balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 0x1,
+ .default_value = PO1030_GREEN_GAIN_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = po1030_set_green_balance,
.get = po1030_get_green_balance
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index 1ea380b2bbe7..338359596398 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -139,9 +139,9 @@
#define PO1030_GLOBAL_GAIN_DEFAULT 0x12
#define PO1030_EXPOSURE_DEFAULT 0x0085
-#define PO1030_BLUE_GAIN_DEFAULT 0x36
-#define PO1030_RED_GAIN_DEFAULT 0x36
-#define PO1030_GREEN_GAIN_DEFAULT 0x40
+#define PO1030_BLUE_GAIN_DEFAULT 0x36
+#define PO1030_RED_GAIN_DEFAULT 0x36
+#define PO1030_GREEN_GAIN_DEFAULT 0x40
/*****************************************************************************/
@@ -166,8 +166,7 @@ static const struct m5602_sensor po1030 = {
.disconnect = po1030_disconnect,
};
-static const unsigned char preinit_po1030[][3] =
-{
+static const unsigned char preinit_po1030[][3] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -193,8 +192,7 @@ static const unsigned char preinit_po1030[][3] =
{BRIDGE, M5602_XB_GPIO_DAT, 0x00}
};
-static const unsigned char init_po1030[][3] =
-{
+static const unsigned char init_po1030[][3] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
@@ -271,5 +269,4 @@ static const unsigned char init_po1030[][3] =
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
{BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
};
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index da0a38c78708..d27280be9852 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -143,13 +143,13 @@ static const struct ctrl s5k4aa_ctrls[] = {
#define VFLIP_IDX 0
{
{
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "vertical flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = s5k4aa_set_vflip,
.get = s5k4aa_get_vflip
@@ -157,13 +157,13 @@ static const struct ctrl s5k4aa_ctrls[] = {
#define HFLIP_IDX 1
{
{
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "horizontal flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0
},
.set = s5k4aa_set_hflip,
.get = s5k4aa_get_hflip
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index 4440da4e7f0f..8cc7a3f6da72 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -83,8 +83,7 @@ static const struct m5602_sensor s5k4aa = {
.disconnect = s5k4aa_disconnect,
};
-static const unsigned char preinit_s5k4aa[][4] =
-{
+static const unsigned char preinit_s5k4aa[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -127,8 +126,7 @@ static const unsigned char preinit_s5k4aa[][4] =
{SENSOR, S5K4AA_PAGE_MAP, 0x00, 0x00}
};
-static const unsigned char init_s5k4aa[][4] =
-{
+static const unsigned char init_s5k4aa[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -179,8 +177,7 @@ static const unsigned char init_s5k4aa[][4] =
{SENSOR, 0x37, 0x00, 0x00},
};
-static const unsigned char VGA_s5k4aa[][4] =
-{
+static const unsigned char VGA_s5k4aa[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -235,8 +232,7 @@ static const unsigned char VGA_s5k4aa[][4] =
{SENSOR, 0x02, 0x0e, 0x00},
};
-static const unsigned char SXGA_s5k4aa[][4] =
-{
+static const unsigned char SXGA_s5k4aa[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -284,6 +280,4 @@ static const unsigned char SXGA_s5k4aa[][4] =
{SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
{SENSOR, 0x02, 0x0e, 0x00},
};
-
-
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index 7814b078acde..80a63a236e24 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -35,7 +35,7 @@
#define S5K83A_MAXIMUM_EXPOSURE 0x3c
#define S5K83A_FLIP_MASK 0x10
#define S5K83A_GPIO_LED_MASK 0x10
-#define S5K83A_GPIO_ROTATION_MASK 0x40
+#define S5K83A_GPIO_ROTATION_MASK 0x40
/*****************************************************************************/
@@ -67,8 +67,7 @@ struct s5k83a_priv {
s32 *settings;
};
-static const unsigned char preinit_s5k83a[][4] =
-{
+static const unsigned char preinit_s5k83a[][4] = {
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
@@ -108,8 +107,7 @@ static const unsigned char preinit_s5k83a[][4] =
/* This could probably be considerably shortened.
I don't have the hardware to experiment with it, patches welcome
*/
-static const unsigned char init_s5k83a[][4] =
-{
+static const unsigned char init_s5k83a[][4] = {
/* The following sequence is useless after a clean boot
but is necessary after resume from suspend */
{BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
@@ -166,8 +164,7 @@ static const unsigned char init_s5k83a[][4] =
{SENSOR, 0x00, 0x06, 0x00},
};
-static const unsigned char start_s5k83a[][4] =
-{
+static const unsigned char start_s5k83a[][4] = {
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -193,5 +190,4 @@ static const unsigned char start_s5k83a[][4] =
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
};
-
#endif
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index 031f7195ce0d..a81536e78698 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -28,14 +28,23 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/Mars USB Camera Driver");
MODULE_LICENSE("GPL");
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ COLORS,
+ GAMMA,
+ SHARPNESS,
+ ILLUM_TOP,
+ ILLUM_BOT,
+ NCTRLS /* number of controls */
+};
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
- u8 colors;
- u8 gamma;
- u8 sharpness;
+ struct gspca_ctrl ctrls[NCTRLS];
+
u8 quality;
#define QUALITY_MIN 40
#define QUALITY_MAX 70
@@ -45,17 +54,15 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcolors(struct gspca_dev *gspca_dev);
+static void setgamma(struct gspca_dev *gspca_dev);
+static void setsharpness(struct gspca_dev *gspca_dev);
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val);
+
+static const struct ctrl sd_ctrls[NCTRLS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -63,13 +70,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 30,
.step = 1,
-#define BRIGHTNESS_DEF 15
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 15,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness
},
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -77,13 +82,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 1,
.maximum = 255,
.step = 1,
-#define COLOR_DEF 200
- .default_value = COLOR_DEF,
+ .default_value = 200,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors
},
- {
+[GAMMA] = {
{
.id = V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -91,13 +94,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 3,
.step = 1,
-#define GAMMA_DEF 1
- .default_value = GAMMA_DEF,
+ .default_value = 1,
},
- .set = sd_setgamma,
- .get = sd_getgamma,
+ .set_control = setgamma
},
- {
+[SHARPNESS] = {
{
.id = V4L2_CID_SHARPNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -105,11 +106,35 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 2,
.step = 1,
-#define SHARPNESS_DEF 1
- .default_value = SHARPNESS_DEF,
+ .default_value = 1,
+ },
+ .set_control = setsharpness
+ },
+[ILLUM_TOP] = {
+ {
+ .id = V4L2_CID_ILLUMINATORS_1,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Top illuminator",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_UPDATE,
+ },
+ .set = sd_setilluminator1
+ },
+[ILLUM_BOT] = {
+ {
+ .id = V4L2_CID_ILLUMINATORS_2,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Bottom illuminator",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_UPDATE,
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
+ .set = sd_setilluminator2
},
};
@@ -138,21 +163,25 @@ static const __u8 mi_data[0x20] = {
};
/* write <len> bytes from gspca_dev->usb_buf */
-static int reg_w(struct gspca_dev *gspca_dev,
+static void reg_w(struct gspca_dev *gspca_dev,
int len)
{
int alen, ret;
+ if (gspca_dev->usb_err < 0)
+ return;
+
ret = usb_bulk_msg(gspca_dev->dev,
usb_sndbulkpipe(gspca_dev->dev, 4),
gspca_dev->usb_buf,
len,
&alen,
500); /* timeout in milliseconds */
- if (ret < 0)
- PDEBUG(D_ERR, "reg write [%02x] error %d",
+ if (ret < 0) {
+ err("reg write [%02x] error %d",
gspca_dev->usb_buf[0], ret);
- return ret;
+ gspca_dev->usb_err = ret;
+ }
}
static void mi_w(struct gspca_dev *gspca_dev,
@@ -167,6 +196,59 @@ static void mi_w(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 4);
}
+static void setbrightness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x61;
+ gspca_dev->usb_buf[1] = sd->ctrls[BRIGHTNESS].val;
+ reg_w(gspca_dev, 2);
+}
+
+static void setcolors(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s16 val;
+
+ val = sd->ctrls[COLORS].val;
+ gspca_dev->usb_buf[0] = 0x5f;
+ gspca_dev->usb_buf[1] = val << 3;
+ gspca_dev->usb_buf[2] = ((val >> 2) & 0xf8) | 0x04;
+ reg_w(gspca_dev, 3);
+}
+
+static void setgamma(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x06;
+ gspca_dev->usb_buf[1] = sd->ctrls[GAMMA].val * 0x40;
+ reg_w(gspca_dev, 2);
+}
+
+static void setsharpness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x67;
+ gspca_dev->usb_buf[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
+ reg_w(gspca_dev, 2);
+}
+
+static void setilluminators(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->usb_buf[0] = 0x22;
+ if (sd->ctrls[ILLUM_TOP].val)
+ gspca_dev->usb_buf[1] = 0x76;
+ else if (sd->ctrls[ILLUM_BOT].val)
+ gspca_dev->usb_buf[1] = 0x7a;
+ else
+ gspca_dev->usb_buf[1] = 0x7e;
+ reg_w(gspca_dev, 2);
+}
+
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
@@ -177,10 +259,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
- sd->brightness = BRIGHTNESS_DEF;
- sd->colors = COLOR_DEF;
- sd->gamma = GAMMA_DEF;
- sd->sharpness = SHARPNESS_DEF;
+ cam->ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
gspca_dev->nbalt = 9; /* use the altsetting 08 */
return 0;
@@ -189,13 +268,13 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
+ gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
return 0;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int err_code;
u8 *data;
int i;
@@ -208,9 +287,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[0] = 0x01; /* address */
data[1] = 0x01;
- err_code = reg_w(gspca_dev, 2);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 2);
/*
Initialize the MR97113 chip register
@@ -223,7 +300,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[5] = 0x30; /* reg 4, MI, PAS5101 :
* 0x30 for 24mhz , 0x28 for 12mhz */
data[6] = 0x02; /* reg 5, H start - was 0x04 */
- data[7] = sd->gamma * 0x40; /* reg 0x06: gamma */
+ data[7] = sd->ctrls[GAMMA].val * 0x40; /* reg 0x06: gamma */
data[8] = 0x01; /* reg 7, V start - was 0x03 */
/* if (h_size == 320 ) */
/* data[9]= 0x56; * reg 8, 24MHz, 2:1 scale down */
@@ -232,16 +309,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
/*jfm: from win trace*/
data[10] = 0x18;
- err_code = reg_w(gspca_dev, 11);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 11);
data[0] = 0x23; /* address */
data[1] = 0x09; /* reg 35, append frame header */
- err_code = reg_w(gspca_dev, 2);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 2);
data[0] = 0x3c; /* address */
/* if (gspca_dev->width == 1280) */
@@ -250,9 +323,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* else */
data[1] = 50; /* 50 reg 60, pc-cam frame size
* (unit: 4KB) 200KB */
- err_code = reg_w(gspca_dev, 2);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 2);
/* auto dark-gain */
data[0] = 0x5e; /* address */
@@ -261,37 +332,29 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* reg 0x5f/0x60 (LE) = saturation */
/* h (60): xxxx x100
* l (5f): xxxx x000 */
- data[2] = sd->colors << 3;
- data[3] = ((sd->colors >> 2) & 0xf8) | 0x04;
- data[4] = sd->brightness; /* reg 0x61 = brightness */
+ data[2] = sd->ctrls[COLORS].val << 3;
+ data[3] = ((sd->ctrls[COLORS].val >> 2) & 0xf8) | 0x04;
+ data[4] = sd->ctrls[BRIGHTNESS].val; /* reg 0x61 = brightness */
data[5] = 0x00;
- err_code = reg_w(gspca_dev, 6);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 6);
data[0] = 0x67;
/*jfm: from win trace*/
- data[1] = sd->sharpness * 4 + 3;
+ data[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
data[2] = 0x14;
- err_code = reg_w(gspca_dev, 3);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 3);
data[0] = 0x69;
data[1] = 0x2f;
data[2] = 0x28;
data[3] = 0x42;
- err_code = reg_w(gspca_dev, 4);
- if (err_code < 0)
- return err_code;
+ reg_w(gspca_dev, 4);
data[0] = 0x63;
data[1] = 0x07;
- err_code = reg_w(gspca_dev, 2);
+ reg_w(gspca_dev, 2);
/*jfm: win trace - many writes here to reg 0x64*/
- if (err_code < 0)
- return err_code;
/* initialize the MI sensor */
for (i = 0; i < sizeof mi_data; i++)
@@ -300,18 +363,26 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[0] = 0x00;
data[1] = 0x4d; /* ISOC transfering enable... */
reg_w(gspca_dev, 2);
- return 0;
+
+ gspca_dev->ctrl_inac = 0; /* activate the illuminator controls */
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
- int result;
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
+ if (sd->ctrls[ILLUM_TOP].val || sd->ctrls[ILLUM_BOT].val) {
+ sd->ctrls[ILLUM_TOP].val = 0;
+ sd->ctrls[ILLUM_BOT].val = 0;
+ setilluminators(gspca_dev);
+ msleep(20);
+ }
gspca_dev->usb_buf[0] = 1;
gspca_dev->usb_buf[1] = 0;
- result = reg_w(gspca_dev, 2);
- if (result < 0)
- PDEBUG(D_ERR, "Camera Stop failed");
+ reg_w(gspca_dev, 2);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -352,91 +423,28 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming) {
- gspca_dev->usb_buf[0] = 0x61;
- gspca_dev->usb_buf[1] = val;
- reg_w(gspca_dev, 2);
- }
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->brightness;
- return 0;
+ /* only one illuminator may be on */
+ sd->ctrls[ILLUM_TOP].val = val;
+ if (val)
+ sd->ctrls[ILLUM_BOT].val = 0;
+ setilluminators(gspca_dev);
+ return gspca_dev->usb_err;
}
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- sd->colors = val;
- if (gspca_dev->streaming) {
-
- /* see sd_start */
- gspca_dev->usb_buf[0] = 0x5f;
- gspca_dev->usb_buf[1] = sd->colors << 3;
- gspca_dev->usb_buf[2] = ((sd->colors >> 2) & 0xf8) | 0x04;
- reg_w(gspca_dev, 3);
- }
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gamma = val;
- if (gspca_dev->streaming) {
- gspca_dev->usb_buf[0] = 0x06;
- gspca_dev->usb_buf[1] = val * 0x40;
- reg_w(gspca_dev, 2);
- }
- return 0;
-}
-
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gamma;
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming) {
- gspca_dev->usb_buf[0] = 0x67;
- gspca_dev->usb_buf[1] = val * 4 + 3;
- reg_w(gspca_dev, 2);
- }
- return 0;
-}
-
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
- return 0;
+ /* only one illuminator may be on */
+ sd->ctrls[ILLUM_BOT].val = val;
+ if (val)
+ sd->ctrls[ILLUM_TOP].val = 0;
+ setilluminators(gspca_dev);
+ return gspca_dev->usb_err;
}
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
@@ -471,7 +479,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -510,18 +518,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 33744e724eaa..7607a288b51c 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -9,14 +9,14 @@
* is Copyright (C) 2009 Theodore Kilgore <kilgota@auburn.edu>
*
* Support for the control settings for the CIF cameras is
- * Copyright (C) 2009 Hans de Goede <hdgoede@redhat.com> and
+ * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com> and
* Thomas Kaiser <thomas@kaiser-linux.li>
*
* Support for the control settings for the VGA cameras is
* Copyright (C) 2009 Theodore Kilgore <kilgota@auburn.edu>
*
* Several previously unsupported cameras are owned and have been tested by
- * Hans de Goede <hdgoede@redhat.com> and
+ * Hans de Goede <hdegoede@redhat.com> and
* Thomas Kaiser <thomas@kaiser-linux.li> and
* Theodore Kilgore <kilgota@auburn.edu> and
* Edmond Rodriguez <erodrig_97@yahoo.com> and
@@ -267,7 +267,7 @@ static int mr_write(struct gspca_dev *gspca_dev, int len)
usb_sndbulkpipe(gspca_dev->dev, 4),
gspca_dev->usb_buf, len, NULL, 500);
if (rc < 0)
- PDEBUG(D_ERR, "reg write [%02x] error %d",
+ err("reg write [%02x] error %d",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -281,7 +281,7 @@ static int mr_read(struct gspca_dev *gspca_dev, int len)
usb_rcvbulkpipe(gspca_dev->dev, 3),
gspca_dev->usb_buf, len, NULL, 500);
if (rc < 0)
- PDEBUG(D_ERR, "reg read [%02x] error %d",
+ err("reg read [%02x] error %d",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -540,7 +540,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor_type = 1;
break;
default:
- PDEBUG(D_ERR, "Unknown CIF Sensor id : %02x",
+ err("Unknown CIF Sensor id : %02x",
gspca_dev->usb_buf[1]);
return -ENODEV;
}
@@ -575,10 +575,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor_type = 2;
} else if ((gspca_dev->usb_buf[0] != 0x03) &&
(gspca_dev->usb_buf[0] != 0x04)) {
- PDEBUG(D_ERR, "Unknown VGA Sensor id Byte 0: %02x",
+ err("Unknown VGA Sensor id Byte 0: %02x",
gspca_dev->usb_buf[0]);
- PDEBUG(D_ERR, "Defaults assumed, may not work");
- PDEBUG(D_ERR, "Please report this");
+ err("Defaults assumed, may not work");
+ err("Please report this");
}
/* Sakar Digital color needs to be adjusted. */
if ((gspca_dev->usb_buf[0] == 0x03) &&
@@ -595,12 +595,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* Nothing to do here. */
break;
default:
- PDEBUG(D_ERR,
- "Unknown VGA Sensor id Byte 1: %02x",
+ err("Unknown VGA Sensor id Byte 1: %02x",
gspca_dev->usb_buf[1]);
- PDEBUG(D_ERR,
- "Defaults assumed, may not work");
- PDEBUG(D_ERR, "Please report this");
+ err("Defaults assumed, may not work");
+ err("Please report this");
}
}
PDEBUG(D_PROBE, "MR97310A VGA camera detected, sensor: %d",
@@ -675,7 +673,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
__u8 *data = gspca_dev->usb_buf;
int err_code;
- const __u8 startup_string[] = {
+ static const __u8 startup_string[] = {
0x00,
0x0d,
0x01,
@@ -721,7 +719,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
return err_code;
if (!sd->sensor_type) {
- const struct sensor_w_data cif_sensor0_init_data[] = {
+ static const struct sensor_w_data cif_sensor0_init_data[] = {
{0x02, 0x00, {0x03, 0x5a, 0xb5, 0x01,
0x0f, 0x14, 0x0f, 0x10}, 8},
{0x0c, 0x00, {0x04, 0x01, 0x01, 0x00, 0x1f}, 5},
@@ -742,7 +740,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
err_code = sensor_write_regs(gspca_dev, cif_sensor0_init_data,
ARRAY_SIZE(cif_sensor0_init_data));
} else { /* sd->sensor_type = 1 */
- const struct sensor_w_data cif_sensor1_init_data[] = {
+ static const struct sensor_w_data cif_sensor1_init_data[] = {
/* Reg 3,4, 7,8 get set by the controls */
{0x02, 0x00, {0x10}, 1},
{0x05, 0x01, {0x22}, 1}, /* 5/6 also seen as 65h/32h */
@@ -777,8 +775,9 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
__u8 *data = gspca_dev->usb_buf;
int err_code;
- const __u8 startup_string[] = {0x00, 0x0d, 0x01, 0x00, 0x00, 0x2b,
- 0x00, 0x00, 0x00, 0x50, 0xc0};
+ static const __u8 startup_string[] =
+ {0x00, 0x0d, 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00,
+ 0x00, 0x50, 0xc0};
/* What some of these mean is explained in start_cif_cam(), above */
memcpy(data, startup_string, 11);
@@ -830,7 +829,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
return err_code;
if (!sd->sensor_type) {
- const struct sensor_w_data vga_sensor0_init_data[] = {
+ static const struct sensor_w_data vga_sensor0_init_data[] = {
{0x01, 0x00, {0x0c, 0x00, 0x04}, 3},
{0x14, 0x00, {0x01, 0xe4, 0x02, 0x84}, 4},
{0x20, 0x00, {0x00, 0x80, 0x00, 0x08}, 4},
@@ -841,20 +840,20 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
err_code = sensor_write_regs(gspca_dev, vga_sensor0_init_data,
ARRAY_SIZE(vga_sensor0_init_data));
} else if (sd->sensor_type == 1) {
- const struct sensor_w_data color_adj[] = {
+ static const struct sensor_w_data color_adj[] = {
{0x02, 0x00, {0x06, 0x59, 0x0c, 0x16, 0x00,
/* adjusted blue, green, red gain correct
too much blue from the Sakar Digital */
0x05, 0x01, 0x04}, 8}
};
- const struct sensor_w_data color_no_adj[] = {
+ static const struct sensor_w_data color_no_adj[] = {
{0x02, 0x00, {0x06, 0x59, 0x0c, 0x16, 0x00,
/* default blue, green, red gain settings */
0x07, 0x00, 0x01}, 8}
};
- const struct sensor_w_data vga_sensor1_init_data[] = {
+ static const struct sensor_w_data vga_sensor1_init_data[] = {
{0x11, 0x04, {0x01}, 1},
{0x0a, 0x00, {0x00, 0x01, 0x00, 0x00, 0x01,
/* These settings may be better for some cameras */
@@ -879,7 +878,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
err_code = sensor_write_regs(gspca_dev, vga_sensor1_init_data,
ARRAY_SIZE(vga_sensor1_init_data));
} else { /* sensor type == 2 */
- const struct sensor_w_data vga_sensor2_init_data[] = {
+ static const struct sensor_w_data vga_sensor2_init_data[] = {
{0x01, 0x00, {0x48}, 1},
{0x02, 0x00, {0x22}, 1},
@@ -976,7 +975,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
u8 val;
u8 sign_reg = 7; /* This reg and the next one used on CIF cams. */
u8 value_reg = 8; /* VGA cams seem to use regs 0x0b and 0x0c */
- const u8 quick_clix_table[] =
+ static const u8 quick_clix_table[] =
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
{ 0, 4, 8, 12, 1, 2, 3, 5, 6, 9, 7, 10, 13, 11, 14, 15};
/*
@@ -1261,18 +1260,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 2b2cbdbf03fe..6cf6855aa506 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -57,10 +57,24 @@ static int frame_rate;
* are getting "Failed to read sensor ID..." */
static int i2c_detect_tries = 10;
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ CONTRAST,
+ COLORS,
+ HFLIP,
+ VFLIP,
+ AUTOBRIGHT,
+ FREQ,
+ NCTRL /* number of controls */
+};
+
/* ov519 device descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct gspca_ctrl ctrls[NCTRL];
+
__u8 packet_nr;
char bridge;
@@ -82,13 +96,6 @@ struct sd {
/* Determined by sensor type */
__u8 sif;
- __u8 brightness;
- __u8 contrast;
- __u8 colors;
- __u8 hflip;
- __u8 vflip;
- __u8 autobrightness;
- __u8 freq;
__u8 quality;
#define QUALITY_MIN 50
#define QUALITY_MAX 70
@@ -130,29 +137,16 @@ struct sd {
#include "w996Xcf.c"
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setcolors(struct gspca_dev *gspca_dev);
-static void setautobrightness(struct sd *sd);
-static void setfreq(struct sd *sd);
+static void sethvflip(struct gspca_dev *gspca_dev);
+static void setautobright(struct gspca_dev *gspca_dev);
+static void setfreq(struct gspca_dev *gspca_dev);
+static void setfreq_i(struct sd *sd);
static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -160,14 +154,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define BRIGHTNESS_DEF 127
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 127,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness,
},
-#define CONTRAST_IDX 1
- {
+[CONTRAST] = {
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -175,14 +166,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
+ .default_value = 127,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
+ .set_control = setcontrast,
},
-#define COLOR_IDX 2
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -190,15 +178,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define COLOR_DEF 127
- .default_value = COLOR_DEF,
+ .default_value = 127,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors,
},
/* The flip controls work with ov7670 only */
-#define HFLIP_IDX 3
- {
+[HFLIP] = {
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -206,14 +191,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
+ .default_value = 0,
},
- .set = sd_sethflip,
- .get = sd_gethflip,
+ .set_control = sethvflip,
},
-#define VFLIP_IDX 4
- {
+[VFLIP] = {
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -221,14 +203,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
+ .default_value = 0,
},
- .set = sd_setvflip,
- .get = sd_getvflip,
+ .set_control = sethvflip,
},
-#define AUTOBRIGHT_IDX 5
- {
+[AUTOBRIGHT] = {
{
.id = V4L2_CID_AUTOBRIGHTNESS,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -236,14 +215,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define AUTOBRIGHT_DEF 1
- .default_value = AUTOBRIGHT_DEF,
+ .default_value = 1,
},
- .set = sd_setautobrightness,
- .get = sd_getautobrightness,
+ .set_control = setautobright,
},
-#define FREQ_IDX 6
- {
+[FREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
@@ -251,26 +227,9 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
.step = 1,
-#define FREQ_DEF 0
- .default_value = FREQ_DEF,
- },
- .set = sd_setfreq,
- .get = sd_getfreq,
- },
-#define OV7670_FREQ_IDX 7
- {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 3, /* 0: 0, 1: 50Hz, 2:60Hz 3: Auto Hz */
- .step = 1,
-#define OV7670_FREQ_DEF 3
- .default_value = OV7670_FREQ_DEF,
+ .default_value = 0,
},
- .set = sd_setfreq,
- .get = sd_getfreq,
+ .set_control = setfreq,
},
};
@@ -456,10 +415,10 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
/* Registers common to OV511 / OV518 */
#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
-#define R51x_SYS_RESET 0x50
+#define R51x_SYS_RESET 0x50
/* Reset type flags */
#define OV511_RESET_OMNICE 0x08
-#define R51x_SYS_INIT 0x53
+#define R51x_SYS_INIT 0x53
#define R51x_SYS_SNAP 0x52
#define R51x_SYS_CUST_ID 0x5F
#define R51x_COMP_LUT_BEGIN 0x80
@@ -644,13 +603,11 @@ struct ov_i2c_regvals {
};
/* Settings for OV2610 camera chip */
-static const struct ov_i2c_regvals norm_2610[] =
-{
+static const struct ov_i2c_regvals norm_2610[] = {
{ 0x12, 0x80 }, /* reset */
};
-static const struct ov_i2c_regvals norm_3620b[] =
-{
+static const struct ov_i2c_regvals norm_3620b[] = {
/*
* From the datasheet: "Note that after writing to register COMH
* (0x12) to change the sensor mode, registers related to the
@@ -1900,7 +1857,7 @@ static int reg_w(struct sd *sd, __u16 index, __u16 value)
sd->gspca_dev.usb_buf, 1, 500);
leave:
if (ret < 0) {
- PDEBUG(D_ERR, "Write reg 0x%04x -> [0x%02x] failed",
+ err("Write reg 0x%04x -> [0x%02x] failed",
value, index);
return ret;
}
@@ -1938,7 +1895,7 @@ static int reg_r(struct sd *sd, __u16 index)
ret = sd->gspca_dev.usb_buf[0];
PDEBUG(D_USBI, "Read reg [0x%02X] -> 0x%04X", index, ret);
} else
- PDEBUG(D_ERR, "Read reg [0x%02x] failed", index);
+ err("Read reg [0x%02x] failed", index);
return ret;
}
@@ -1958,7 +1915,7 @@ static int reg_r8(struct sd *sd,
if (ret >= 0)
ret = sd->gspca_dev.usb_buf[0];
else
- PDEBUG(D_ERR, "Read reg 8 [0x%02x] failed", index);
+ err("Read reg 8 [0x%02x] failed", index);
return ret;
}
@@ -2006,7 +1963,7 @@ static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
0, index,
sd->gspca_dev.usb_buf, n, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "Write reg32 [%02x] %08x failed", index, value);
+ err("Write reg32 [%02x] %08x failed", index, value);
return ret;
}
@@ -2203,7 +2160,7 @@ static int ovfx2_i2c_w(struct sd *sd, __u8 reg, __u8 value)
(__u16)value, (__u16)reg, NULL, 0, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "i2c 0x%02x -> [0x%02x] failed", value, reg);
+ err("i2c 0x%02x -> [0x%02x] failed", value, reg);
return ret;
}
@@ -2225,7 +2182,7 @@ static int ovfx2_i2c_r(struct sd *sd, __u8 reg)
ret = sd->gspca_dev.usb_buf[0];
PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, ret);
} else
- PDEBUG(D_ERR, "i2c read [0x%02x] failed", reg);
+ err("i2c read [0x%02x] failed", reg);
return ret;
}
@@ -2481,7 +2438,7 @@ static int ov_hires_configure(struct sd *sd)
int high, low;
if (sd->bridge != BRIDGE_OVFX2) {
- PDEBUG(D_ERR, "error hires sensors only supported with ovfx2");
+ err("error hires sensors only supported with ovfx2");
return -1;
}
@@ -2498,7 +2455,7 @@ static int ov_hires_configure(struct sd *sd)
PDEBUG(D_PROBE, "Sensor is an OV3610");
sd->sensor = SEN_OV3610;
} else {
- PDEBUG(D_ERR, "Error unknown sensor type: 0x%02x%02x",
+ err("Error unknown sensor type: 0x%02x%02x",
high, low);
return -1;
}
@@ -2526,7 +2483,7 @@ static int ov8xx0_configure(struct sd *sd)
if ((rc & 3) == 1) {
sd->sensor = SEN_OV8610;
} else {
- PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3);
+ err("Unknown image sensor version: %d", rc & 3);
return -1;
}
@@ -2589,9 +2546,8 @@ static int ov7xx0_configure(struct sd *sd)
if (high == 0x76) {
switch (low) {
case 0x30:
- PDEBUG(D_PROBE, "Sensor is an OV7630/OV7635");
- PDEBUG(D_ERR,
- "7630 is not supported by this driver");
+ err("Sensor is an OV7630/OV7635");
+ err("7630 is not supported by this driver");
return -1;
case 0x40:
PDEBUG(D_PROBE, "Sensor is an OV7645");
@@ -2614,7 +2570,7 @@ static int ov7xx0_configure(struct sd *sd)
sd->sensor = SEN_OV7620;
}
} else {
- PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3);
+ err("Unknown image sensor version: %d", rc & 3);
return -1;
}
@@ -2641,9 +2597,8 @@ static int ov6xx0_configure(struct sd *sd)
switch (rc) {
case 0x00:
sd->sensor = SEN_OV6630;
- PDEBUG(D_ERR,
- "WARNING: Sensor is an OV66308. Your camera may have");
- PDEBUG(D_ERR, "been misdetected in previous driver versions.");
+ warn("WARNING: Sensor is an OV66308. Your camera may have");
+ warn("been misdetected in previous driver versions.");
break;
case 0x01:
sd->sensor = SEN_OV6620;
@@ -2659,12 +2614,11 @@ static int ov6xx0_configure(struct sd *sd)
break;
case 0x90:
sd->sensor = SEN_OV6630;
- PDEBUG(D_ERR,
- "WARNING: Sensor is an OV66307. Your camera may have");
- PDEBUG(D_ERR, "been misdetected in previous driver versions.");
+ warn("WARNING: Sensor is an OV66307. Your camera may have");
+ warn("been misdetected in previous driver versions.");
break;
default:
- PDEBUG(D_ERR, "FATAL: Unknown sensor version: 0x%02x", rc);
+ err("FATAL: Unknown sensor version: 0x%02x", rc);
return -1;
}
@@ -2823,7 +2777,7 @@ static int ov511_configure(struct gspca_dev *gspca_dev)
};
const struct ov_regvals norm_511[] = {
- { R511_DRAM_FLOW_CTL, 0x01 },
+ { R511_DRAM_FLOW_CTL, 0x01 },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
{ R51x_SYS_SNAP, 0x00 },
@@ -2907,7 +2861,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
- { 0x31, 0x0f },
+ { 0x31, 0x0f },
{ 0x5d, 0x03 },
{ 0x24, 0x9f },
{ 0x25, 0x90 },
@@ -2920,7 +2874,7 @@ static int ov518_configure(struct gspca_dev *gspca_dev)
const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
- { 0x31, 0x0f },
+ { 0x31, 0x0f },
{ 0x5d, 0x03 },
{ 0x24, 0x9f },
{ 0x25, 0x90 },
@@ -3082,7 +3036,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
goto error;
}
} else {
- PDEBUG(D_ERR, "Can't determine sensor slave IDs");
+ err("Can't determine sensor slave IDs");
goto error;
}
@@ -3142,36 +3096,23 @@ static int sd_config(struct gspca_dev *gspca_dev,
goto error;
break;
}
- sd->brightness = BRIGHTNESS_DEF;
- if (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF)
- sd->contrast = 200; /* The default is too low for the ov6630 */
+ gspca_dev->cam.ctrls = sd->ctrls;
+ if (sd->sensor == SEN_OV7670)
+ gspca_dev->ctrl_dis = 1 << COLORS;
else
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->hflip = HFLIP_DEF;
- sd->vflip = VFLIP_DEF;
- sd->autobrightness = AUTOBRIGHT_DEF;
- if (sd->sensor == SEN_OV7670) {
- sd->freq = OV7670_FREQ_DEF;
- gspca_dev->ctrl_dis = (1 << FREQ_IDX) | (1 << COLOR_IDX);
- } else {
- sd->freq = FREQ_DEF;
- gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) |
- (1 << OV7670_FREQ_IDX);
- }
+ gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->quality = QUALITY_DEF;
if (sd->sensor == SEN_OV7640 ||
sd->sensor == SEN_OV7648)
- gspca_dev->ctrl_dis |= (1 << AUTOBRIGHT_IDX) |
- (1 << CONTRAST_IDX);
+ gspca_dev->ctrl_dis |= (1 << AUTOBRIGHT) | (1 << CONTRAST);
if (sd->sensor == SEN_OV7670)
- gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX;
+ gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT;
/* OV8610 Frequency filter control should work but needs testing */
if (sd->sensor == SEN_OV8610)
- gspca_dev->ctrl_dis |= 1 << FREQ_IDX;
+ gspca_dev->ctrl_dis |= 1 << FREQ;
/* No controls for the OV2610/OV3610 */
if (sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610)
- gspca_dev->ctrl_dis |= 0xFF;
+ gspca_dev->ctrl_dis |= (1 << NCTRL) - 1;
return 0;
error:
@@ -3206,6 +3147,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
case SEN_OV6630:
case SEN_OV66308AF:
+ sd->ctrls[CONTRAST].def = 200;
+ /* The default is too low for the ov6630 */
if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
return -EIO;
break;
@@ -3228,6 +3171,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
return -EIO;
break;
case SEN_OV7670:
+ sd->ctrls[FREQ].max = 3; /* auto */
+ sd->ctrls[FREQ].def = 3;
if (write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670)))
return -EIO;
break;
@@ -3253,7 +3198,7 @@ static int ov511_mode_init_regs(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- PDEBUG(D_ERR, "Couldn't get altsetting");
+ err("Couldn't get altsetting");
return -EIO;
}
@@ -3377,7 +3322,7 @@ static int ov518_mode_init_regs(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- PDEBUG(D_ERR, "Couldn't get altsetting");
+ err("Couldn't get altsetting");
return -EIO;
}
@@ -3706,7 +3651,7 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
break;
case SEN_OV7610:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
- i2c_w(sd, 0x35, qvga?0x1e:0x9e);
+ i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
@@ -3798,15 +3743,17 @@ static int mode_init_ov_sensor_regs(struct sd *sd)
return 0;
}
-static void sethvflip(struct sd *sd)
+static void sethvflip(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
if (sd->sensor != SEN_OV7670)
return;
if (sd->gspca_dev.streaming)
ov51x_stop(sd);
i2c_w_mask(sd, OV7670_REG_MVFP,
- OV7670_MVFP_MIRROR * sd->hflip
- | OV7670_MVFP_VFLIP * sd->vflip,
+ OV7670_MVFP_MIRROR * sd->ctrls[HFLIP].val
+ | OV7670_MVFP_VFLIP * sd->ctrls[VFLIP].val,
OV7670_MVFP_MIRROR | OV7670_MVFP_VFLIP);
if (sd->gspca_dev.streaming)
ov51x_restart(sd);
@@ -3957,9 +3904,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
setcontrast(gspca_dev);
setbrightness(gspca_dev);
setcolors(gspca_dev);
- sethvflip(sd);
- setautobrightness(sd);
- setfreq(sd);
+ sethvflip(gspca_dev);
+ setautobright(gspca_dev);
+ setfreq_i(sd);
/* Force clear snapshot state in case the snapshot button was
pressed while we weren't streaming */
@@ -4000,7 +3947,7 @@ static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state)
struct sd *sd = (struct sd *) gspca_dev;
if (sd->snapshot_pressed != state) {
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
input_sync(gspca_dev->input_dev);
#endif
@@ -4214,7 +4161,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int val;
- val = sd->brightness;
+ val = sd->ctrls[BRIGHTNESS].val;
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
@@ -4229,7 +4176,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
case SEN_OV7620:
case SEN_OV7620AE:
/* 7620 doesn't like manual changes when in auto mode */
- if (!sd->autobrightness)
+ if (!sd->ctrls[AUTOBRIGHT].val)
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7670:
@@ -4245,7 +4192,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int val;
- val = sd->contrast;
+ val = sd->ctrls[CONTRAST].val;
switch (sd->sensor) {
case SEN_OV7610:
case SEN_OV6620:
@@ -4287,7 +4234,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int val;
- val = sd->colors;
+ val = sd->ctrls[COLORS].val;
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
@@ -4317,23 +4264,25 @@ static void setcolors(struct gspca_dev *gspca_dev)
}
}
-static void setautobrightness(struct sd *sd)
+static void setautobright(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7648 ||
sd->sensor == SEN_OV7670 ||
sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610)
return;
- i2c_w_mask(sd, 0x2d, sd->autobrightness ? 0x10 : 0x00, 0x10);
+ i2c_w_mask(sd, 0x2d, sd->ctrls[AUTOBRIGHT].val ? 0x10 : 0x00, 0x10);
}
-static void setfreq(struct sd *sd)
+static void setfreq_i(struct sd *sd)
{
if (sd->sensor == SEN_OV2610 || sd->sensor == SEN_OV3610)
return;
if (sd->sensor == SEN_OV7670) {
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_BFILT);
break;
@@ -4355,7 +4304,7 @@ static void setfreq(struct sd *sd)
break;
}
} else {
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, 0x2d, 0x00, 0x04);
i2c_w_mask(sd, 0x2a, 0x00, 0x80);
@@ -4387,135 +4336,15 @@ static void setfreq(struct sd *sd)
}
}
}
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hflip = val;
- if (gspca_dev->streaming)
- sethvflip(sd);
- return 0;
-}
-
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hflip;
- return 0;
-}
-
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->vflip = val;
- if (gspca_dev->streaming)
- sethvflip(sd);
- return 0;
-}
-
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->vflip;
- return 0;
-}
-
-static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autobrightness = val;
- if (gspca_dev->streaming)
- setautobrightness(sd);
- return 0;
-}
-
-static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autobrightness;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+static void setfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sd->freq = val;
- if (gspca_dev->streaming) {
- setfreq(sd);
- /* Ugly but necessary */
- if (sd->bridge == BRIDGE_W9968CF)
- w9968cf_set_crop_window(sd);
- }
- return 0;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ setfreq_i(sd);
- *val = sd->freq;
- return 0;
+ /* Ugly but necessary */
+ if (sd->bridge == BRIDGE_W9968CF)
+ w9968cf_set_crop_window(sd);
}
static int sd_querymenu(struct gspca_dev *gspca_dev,
@@ -4601,7 +4430,7 @@ static const struct sd_desc sd_desc = {
.querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
};
@@ -4663,17 +4492,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 96cb3a976581..88ef03f6235b 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -487,7 +487,7 @@ static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
if (ret < 0)
- PDEBUG(D_ERR, "write failed");
+ err("write failed %d", ret);
}
static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -502,7 +502,7 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]);
if (ret < 0)
- PDEBUG(D_ERR, "read failed");
+ err("read failed %d", ret);
return gspca_dev->usb_buf[0];
}
@@ -564,7 +564,7 @@ static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_reg_write failed");
+ err("sccb_reg_write failed");
}
static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -572,11 +572,11 @@ static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_reg_read failed 1");
+ err("sccb_reg_read failed 1");
ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_reg_read failed 2");
+ err("sccb_reg_read failed 2");
return ov534_reg_read(gspca_dev, OV534_REG_READ);
}
@@ -1327,19 +1327,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index bbe5a030e3b4..e831f0d280ea 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -785,7 +785,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev, u16 reg, u8 val)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w failed %d", ret);
+ err("reg_w failed %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -810,7 +810,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
PDEBUG(D_USBI, "reg_r [%04x] -> %02x", reg, gspca_dev->usb_buf[0]);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
return gspca_dev->usb_buf[0];
@@ -848,7 +848,7 @@ static void sccb_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
reg_w_i(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_write failed");
+ err("sccb_write failed");
}
static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -856,11 +856,11 @@ static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg)
reg_w(gspca_dev, OV534_REG_SUBADDR, reg);
reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_read failed 1");
+ err("sccb_read failed 1");
reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2);
if (!sccb_check_status(gspca_dev))
- PDEBUG(D_ERR, "sccb_read failed 2");
+ err("sccb_read failed 2");
return reg_r(gspca_dev, OV534_REG_READ);
}
@@ -1458,19 +1458,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index a40f8893310d..15e97fa4c337 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -1,7 +1,7 @@
/*
* Pixart PAC207BCA library
*
- * Copyright (C) 2008 Hans de Goede <hdgoede@redhat.com>
+ * Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li
* Copyleft (C) 2005 Michel Xhaard mxhaard@magic.fr
*
@@ -28,7 +28,7 @@
#include <linux/input.h>
#include "gspca.h"
-MODULE_AUTHOR("Hans de Goede <hdgoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Pixart PAC207");
MODULE_LICENSE("GPL");
@@ -45,7 +45,7 @@ MODULE_LICENSE("GPL");
#define PAC207_GAIN_MIN 0
#define PAC207_GAIN_MAX 31
-#define PAC207_GAIN_DEFAULT 9 /* power on default: 9 */
+#define PAC207_GAIN_DEFAULT 9 /* power on default: 9 */
#define PAC207_GAIN_KNEE 31
#define PAC207_AUTOGAIN_DEADZONE 30
@@ -178,8 +178,7 @@ static int pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
0x00, index,
gspca_dev->usb_buf, length, PAC207_CTRL_TIMEOUT);
if (err < 0)
- PDEBUG(D_ERR,
- "Failed to write registers to index 0x%04X, error %d)",
+ err("Failed to write registers to index 0x%04X, error %d)",
index, err);
return err;
@@ -195,7 +194,7 @@ static int pac207_write_reg(struct gspca_dev *gspca_dev, u16 index, u16 value)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, index, NULL, 0, PAC207_CTRL_TIMEOUT);
if (err)
- PDEBUG(D_ERR, "Failed to write a register (index 0x%04X,"
+ err("Failed to write a register (index 0x%04X,"
" value 0x%02X, error %d)", index, value, err);
return err;
@@ -211,8 +210,7 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
0x00, index,
gspca_dev->usb_buf, 1, PAC207_CTRL_TIMEOUT);
if (res < 0) {
- PDEBUG(D_ERR,
- "Failed to read a register (index 0x%04X, error %d)",
+ err("Failed to read a register (index 0x%04X, error %d)",
index, res);
return res;
}
@@ -496,7 +494,7 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -526,7 +524,7 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.dq_callback = pac207_do_auto_gain,
.pkt_scan = sd_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -572,17 +570,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index a66df07d7625..55fbea7381b0 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -408,9 +408,8 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_buf(): "
- "Failed to write registers to index 0x%x, error %i",
- index, ret);
+ err("reg_w_buf failed index 0x%02x, error %d",
+ index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -432,9 +431,8 @@ static void reg_w(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w(): "
- "Failed to write register to index 0x%x, value 0x%x, error %i",
- index, value, ret);
+ err("reg_w() failed index 0x%02x, value 0x%02x, error %d",
+ index, value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -468,10 +466,9 @@ static void reg_w_page(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_page(): "
- "Failed to write register to index 0x%x, "
- "value 0x%x, error %i",
- index, page[index], ret);
+ err("reg_w_page() failed index 0x%02x, "
+ "value 0x%02x, error %d",
+ index, page[index], ret);
gspca_dev->usb_err = ret;
break;
}
@@ -900,9 +897,8 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
struct sd *sd = (struct sd *) gspca_dev;
sd->contrast = val;
- if (gspca_dev->streaming) {
+ if (gspca_dev->streaming)
setbrightcont(gspca_dev);
- }
return gspca_dev->usb_err;
}
@@ -1135,7 +1131,7 @@ static int sd_chip_ident(struct gspca_dev *gspca_dev,
}
#endif
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -1182,7 +1178,7 @@ static const struct sd_desc sd_desc = {
.set_register = sd_dbg_s_register,
.get_chip_ident = sd_chip_ident,
#endif
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -1226,17 +1222,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 1cb7e99e92bd..7657b43b3203 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -276,9 +276,8 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_buf(): "
- "Failed to write registers to index 0x%x, error %i",
- index, ret);
+ err("reg_w_buf() failed index 0x%02x, error %d",
+ index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -300,9 +299,8 @@ static void reg_w(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w(): "
- "Failed to write register to index 0x%x, value 0x%x, error %i",
- index, value, ret);
+ err("reg_w() failed index 0x%02x, value 0x%02x, error %d",
+ index, value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -336,10 +334,9 @@ static void reg_w_page(struct gspca_dev *gspca_dev,
0, index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_page(): "
- "Failed to write register to index 0x%x, "
- "value 0x%x, error %i",
- index, page[index], ret);
+ err("reg_w_page() failed index 0x%02x, "
+ "value 0x%02x, error %d",
+ index, page[index], ret);
gspca_dev->usb_err = ret;
break;
}
@@ -675,9 +672,8 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
struct sd *sd = (struct sd *) gspca_dev;
sd->contrast = val;
- if (gspca_dev->streaming) {
+ if (gspca_dev->streaming)
setcontrast(gspca_dev);
- }
return gspca_dev->usb_err;
}
@@ -792,7 +788,7 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -835,7 +831,7 @@ static const struct sd_desc sd_desc = {
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -874,17 +870,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 71d9447a7986..40a06680502d 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -75,7 +75,7 @@ static int sn9c2028_command(struct gspca_dev *gspca_dev, u8 *command)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
2, 0, gspca_dev->usb_buf, 6, 500);
if (rc < 0) {
- PDEBUG(D_ERR, "command write [%02x] error %d",
+ err("command write [%02x] error %d",
gspca_dev->usb_buf[0], rc);
return rc;
}
@@ -93,7 +93,7 @@ static int sn9c2028_read1(struct gspca_dev *gspca_dev)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1, 0, gspca_dev->usb_buf, 1, 500);
if (rc != 1) {
- PDEBUG(D_ERR, "read1 error %d", rc);
+ err("read1 error %d", rc);
return (rc < 0) ? rc : -EIO;
}
PDEBUG(D_USBI, "read1 response %02x", gspca_dev->usb_buf[0]);
@@ -109,7 +109,7 @@ static int sn9c2028_read4(struct gspca_dev *gspca_dev, u8 *reading)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
4, 0, gspca_dev->usb_buf, 4, 500);
if (rc != 4) {
- PDEBUG(D_ERR, "read4 error %d", rc);
+ err("read4 error %d", rc);
return (rc < 0) ? rc : -EIO;
}
memcpy(reading, gspca_dev->usb_buf, 4);
@@ -131,7 +131,7 @@ static int sn9c2028_long_command(struct gspca_dev *gspca_dev, u8 *command)
for (i = 0; i < 256 && status < 2; i++)
status = sn9c2028_read1(gspca_dev);
if (status != 2) {
- PDEBUG(D_ERR, "long command status read error %d", status);
+ err("long command status read error %d", status);
return (status < 0) ? status : -EIO;
}
@@ -638,7 +638,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
err_code = start_vivitar_cam(gspca_dev);
break;
default:
- PDEBUG(D_ERR, "Starting unknown camera, please report this");
+ err("Starting unknown camera, please report this");
return -ENXIO;
}
@@ -738,19 +738,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 9052d5702556..6b155ae3a746 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -18,9 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifdef CONFIG_INPUT
#include <linux/input.h>
-#endif
#include "gspca.h"
#include "jpeg.h"
@@ -347,8 +345,8 @@ static const struct ctrl sd_ctrls[] = {
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 240,
- .sizeimage = 240 * 120,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 4 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0 | MODE_JPEG},
{160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -357,13 +355,13 @@ static const struct v4l2_pix_format vga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0 | MODE_RAW},
{160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 240,
+ .bytesperline = 160,
.sizeimage = 240 * 120,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 480,
- .sizeimage = 480 * 240 ,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1 | MODE_JPEG},
{320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -372,13 +370,13 @@ static const struct v4l2_pix_format vga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1 | MODE_RAW},
{320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 480,
+ .bytesperline = 320,
.sizeimage = 480 * 240 ,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 960,
- .sizeimage = 960 * 480,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2 | MODE_JPEG},
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -387,7 +385,7 @@ static const struct v4l2_pix_format vga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2 | MODE_RAW},
{640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 960,
+ .bytesperline = 640,
.sizeimage = 960 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
@@ -395,8 +393,8 @@ static const struct v4l2_pix_format vga_mode[] = {
static const struct v4l2_pix_format sxga_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 240,
- .sizeimage = 240 * 120,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 4 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0 | MODE_JPEG},
{160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -405,13 +403,13 @@ static const struct v4l2_pix_format sxga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0 | MODE_RAW},
{160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 240,
+ .bytesperline = 160,
.sizeimage = 240 * 120,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 480,
- .sizeimage = 480 * 240 ,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1 | MODE_JPEG},
{320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -420,13 +418,13 @@ static const struct v4l2_pix_format sxga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1 | MODE_RAW},
{320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 480,
+ .bytesperline = 320,
.sizeimage = 480 * 240 ,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
- .bytesperline = 960,
- .sizeimage = 960 * 480,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2 | MODE_JPEG},
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
@@ -435,13 +433,13 @@ static const struct v4l2_pix_format sxga_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2 | MODE_RAW},
{640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE,
- .bytesperline = 960,
+ .bytesperline = 640,
.sizeimage = 960 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
{1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 1280,
- .sizeimage = (1280 * 1024) + 64,
+ .sizeimage = 1280 * 1024,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3 | MODE_RAW | MODE_SXGA},
};
@@ -1272,7 +1270,8 @@ static int soi968_init_sensor(struct gspca_dev *gspca_dev)
}
}
/* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << EXPOSURE_IDX);
+ gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX)
+ | (1 << EXPOSURE_IDX);
sd->hstart = 60;
sd->vstart = 11;
return 0;
@@ -1351,7 +1350,9 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX)
+ | (1 << AUTOGAIN_IDX)
+ | (1 << GAIN_IDX);
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V111;
@@ -1395,7 +1396,8 @@ static int mt9m112_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX)
+ | (1 << GAIN_IDX);
sd->hstart = 0;
sd->vstart = 2;
return 0;
@@ -1412,7 +1414,8 @@ static int mt9m111_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX)
+ | (1 << GAIN_IDX);
sd->hstart = 0;
sd->vstart = 2;
return 0;
@@ -2304,7 +2307,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
do_autoexposure(gspca_dev, avg_lum);
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet */
int len) /* interrupt packet length */
@@ -2386,7 +2389,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
.dq_callback = sd_dqcallback,
@@ -2467,17 +2470,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 204bb3af4559..706f96f92654 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -323,10 +323,9 @@ static const __u8 initOv6650[] = {
0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07
};
-static const __u8 ov6650_sensor_init[][8] =
-{
+static const __u8 ov6650_sensor_init[][8] = {
/* Bright, contrast, etc are set through SCBB interface.
- * AVCAP on win2 do not send any data on this controls. */
+ * AVCAP on win2 do not send any data on this controls. */
/* Anyway, some registers appears to alter bright and constrat */
/* Reset sensor */
@@ -544,7 +543,7 @@ static const __u8 initTas5130[] = {
0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
};
static const __u8 tas5130_sensor_init[][8] = {
-/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
+/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
* shutter 0x47 short exposure? */
{0x30, 0x11, 0x00, 0x40, 0x01, 0x00, 0x00, 0x10},
/* shutter 0x01 long exposure */
@@ -861,7 +860,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
i2c[4] |= reg11 - 1;
/* If register 11 didn't change, don't change it */
- if (sd->reg11 == reg11 )
+ if (sd->reg11 == reg11)
i2c[0] = 0xa0;
if (i2c_w(gspca_dev, i2c) == 0)
@@ -1388,7 +1387,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -1419,7 +1418,7 @@ static const struct sd_desc sd_desc = {
.pkt_scan = sd_pkt_scan,
.querymenu = sd_querymenu,
.dq_callback = do_autogain,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -1479,17 +1478,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 370544361be2..330dadc00106 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -31,24 +31,32 @@ MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
MODULE_LICENSE("GPL");
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ CONTRAST,
+ COLORS,
+ BLUE,
+ RED,
+ GAMMA,
+ AUTOGAIN,
+ HFLIP,
+ VFLIP,
+ SHARPNESS,
+ INFRARED,
+ FREQ,
+ NCTRLS /* number of controls */
+};
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct gspca_ctrl ctrls[NCTRLS];
+
atomic_t avg_lum;
u32 exposure;
- u16 brightness;
- u8 contrast;
- u8 colors;
- u8 autogain;
- u8 blue;
- u8 red;
- u8 gamma;
- u8 vflip; /* ov7630/ov7648 only */
- u8 sharpness;
- u8 infrared; /* mt9v111 only */
- u8 freq; /* ov76xx only */
u8 quality; /* image quality */
#define QUALITY_MIN 60
#define QUALITY_MAX 95
@@ -75,6 +83,7 @@ enum sensors {
SENSOR_GC0307,
SENSOR_HV7131R,
SENSOR_MI0360,
+ SENSOR_MI0360B,
SENSOR_MO4000,
SENSOR_MT9V111,
SENSOR_OM6802,
@@ -88,48 +97,31 @@ enum sensors {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcontrast(struct gspca_dev *gspca_dev);
+static void setcolors(struct gspca_dev *gspca_dev);
+static void setredblue(struct gspca_dev *gspca_dev);
+static void setgamma(struct gspca_dev *gspca_dev);
+static void setautogain(struct gspca_dev *gspca_dev);
+static void sethvflip(struct gspca_dev *gspca_dev);
+static void setsharpness(struct gspca_dev *gspca_dev);
+static void setinfrared(struct gspca_dev *gspca_dev);
+static void setfreq(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[NCTRLS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
-#define BRIGHTNESS_MAX 0xffff
- .maximum = BRIGHTNESS_MAX,
+ .maximum = 0xff,
.step = 1,
-#define BRIGHTNESS_DEF 0x8000
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 0x80,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness
},
-#define CONTRAST_IDX 1
- {
+[CONTRAST] = {
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -138,14 +130,11 @@ static const struct ctrl sd_ctrls[] = {
#define CONTRAST_MAX 127
.maximum = CONTRAST_MAX,
.step = 1,
-#define CONTRAST_DEF 63
- .default_value = CONTRAST_DEF,
+ .default_value = 63,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
+ .set_control = setcontrast
},
-#define COLOR_IDX 2
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -153,14 +142,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 40,
.step = 1,
-#define COLOR_DEF 25
- .default_value = COLOR_DEF,
+#define COLORS_DEF 25
+ .default_value = COLORS_DEF,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors
},
-#define BLUE_BALANCE_IDX 3
- {
+[BLUE] = {
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -168,14 +155,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 24,
.maximum = 40,
.step = 1,
-#define BLUE_BALANCE_DEF 32
- .default_value = BLUE_BALANCE_DEF,
+ .default_value = 32,
},
- .set = sd_setblue_balance,
- .get = sd_getblue_balance,
+ .set_control = setredblue
},
-#define RED_BALANCE_IDX 4
- {
+[RED] = {
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -183,14 +167,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 24,
.maximum = 40,
.step = 1,
-#define RED_BALANCE_DEF 32
- .default_value = RED_BALANCE_DEF,
+ .default_value = 32,
},
- .set = sd_setred_balance,
- .get = sd_getred_balance,
+ .set_control = setredblue
},
-#define GAMMA_IDX 5
- {
+[GAMMA] = {
{
.id = V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -201,11 +182,9 @@ static const struct ctrl sd_ctrls[] = {
#define GAMMA_DEF 20
.default_value = GAMMA_DEF,
},
- .set = sd_setgamma,
- .get = sd_getgamma,
+ .set_control = setgamma
},
-#define AUTOGAIN_IDX 6
- {
+[AUTOGAIN] = {
{
.id = V4L2_CID_AUTOGAIN,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -213,15 +192,23 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
+ .default_value = 1
+ },
+ .set_control = setautogain
+ },
+[HFLIP] = {
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
},
- .set = sd_setautogain,
- .get = sd_getautogain,
+ .set_control = sethvflip
},
-/* ov7630/ov7648 only */
-#define VFLIP_IDX 7
- {
+[VFLIP] = {
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -229,14 +216,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
+ .default_value = 0,
},
- .set = sd_setvflip,
- .get = sd_getvflip,
+ .set_control = sethvflip
},
-#define SHARPNESS_IDX 8
- {
+[SHARPNESS] = {
{
.id = V4L2_CID_SHARPNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -244,15 +228,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define SHARPNESS_DEF 90
- .default_value = SHARPNESS_DEF,
+ .default_value = 90,
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
+ .set_control = setsharpness
},
/* mt9v111 only */
-#define INFRARED_IDX 9
- {
+[INFRARED] = {
{
.id = V4L2_CID_INFRARED,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -260,15 +241,12 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
-#define INFRARED_DEF 0
- .default_value = INFRARED_DEF,
+ .default_value = 0,
},
- .set = sd_setinfrared,
- .get = sd_getinfrared,
+ .set_control = setinfrared
},
/* ov7630/ov7648/ov7660 only */
-#define FREQ_IDX 10
- {
+[FREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
@@ -276,69 +254,85 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
.step = 1,
-#define FREQ_DEF 1
- .default_value = FREQ_DEF,
+ .default_value = 1,
},
- .set = sd_setfreq,
- .get = sd_getfreq,
+ .set_control = setfreq
},
};
/* table of the disabled controls */
static const __u32 ctrl_dis[] = {
-[SENSOR_ADCM1700] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_GC0307] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_HV7131R] = (1 << INFRARED_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_MI0360] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_MO4000] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_MT9V111] = (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_OM6802] = (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_OV7630] = (1 << INFRARED_IDX),
-
-[SENSOR_OV7648] = (1 << INFRARED_IDX),
-
-[SENSOR_OV7660] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX),
-
-[SENSOR_PO1030] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_PO2030N] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-[SENSOR_SOI768] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
-
-[SENSOR_SP80708] = (1 << AUTOGAIN_IDX) |
- (1 << INFRARED_IDX) |
- (1 << VFLIP_IDX) |
- (1 << FREQ_IDX),
+[SENSOR_ADCM1700] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_GC0307] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_HV7131R] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MI0360] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MI0360B] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MO4000] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_MT9V111] = (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_OM6802] = (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_OV7630] = (1 << INFRARED) |
+ (1 << HFLIP),
+
+[SENSOR_OV7648] = (1 << INFRARED) |
+ (1 << HFLIP),
+
+[SENSOR_OV7660] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP),
+
+[SENSOR_PO1030] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_PO2030N] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << FREQ),
+
+[SENSOR_SOI768] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
+
+[SENSOR_SP80708] = (1 << AUTOGAIN) |
+ (1 << INFRARED) |
+ (1 << HFLIP) |
+ (1 << VFLIP) |
+ (1 << FREQ),
};
static const struct v4l2_pix_format cif_mode[] = {
@@ -411,6 +405,17 @@ static const u8 sn_mi0360[0x1c] = {
0x06, 0x00, 0x00, 0x00
};
+static const u8 sn_mi0360b[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x40,
+/* reg18 reg19 reg1a reg1b */
+ 0x06, 0x00, 0x00, 0x00
+};
+
static const u8 sn_mo4000[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x23, 0x60, 0x00, 0x1a, 0x00, 0x20, 0x18,
@@ -527,6 +532,7 @@ static const u8 *sn_tb[] = {
[SENSOR_GC0307] = sn_gc0307,
[SENSOR_HV7131R] = sn_hv7131,
[SENSOR_MI0360] = sn_mi0360,
+[SENSOR_MI0360B] = sn_mi0360b,
[SENSOR_MO4000] = sn_mo4000,
[SENSOR_MT9V111] = sn_mt9v111,
[SENSOR_OM6802] = sn_om6802,
@@ -572,20 +578,23 @@ static const u8 reg84[] = {
0x3e, 0x00, 0xcd, 0x0f, 0xf7, 0x0f, /* VR VG VB */
0x00, 0x00, 0x00 /* YUV offsets */
};
+
+#define DELAY 0xdd
+
static const u8 adcm1700_sensor_init[][8] = {
{0xa0, 0x51, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x04, 0x08, 0x00, 0x00, 0x00, 0x10}, /* reset */
- {0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x04, 0x00, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x0c, 0xe0, 0x2e, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x10, 0x02, 0x02, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x14, 0x0e, 0x0e, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x1c, 0x00, 0x80, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x20, 0x01, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x04, 0x04, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0xb0, 0x51, 0x04, 0x01, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10},
{0xb0, 0x51, 0x14, 0x01, 0x00, 0x00, 0x00, 0x10},
@@ -629,7 +638,7 @@ static const u8 gc0307_sensor_init[][8] = {
{0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/
+ {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/
{0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10},
{0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10},
@@ -747,6 +756,62 @@ static const u8 mi0360_sensor_init[][8] = {
{0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */
{}
};
+static const u8 mi0360b_sensor_init[][8] = {
+ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10},
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/
+ {0xb1, 0x5d, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/
+ {0xd1, 0x5d, 0x01, 0x00, 0x08, 0x00, 0x16, 0x10},
+ {0xd1, 0x5d, 0x03, 0x01, 0xe2, 0x02, 0x82, 0x10},
+ {0xd1, 0x5d, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x0d, 0x00, 0x02, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x24, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x26, 0x00, 0x00, 0x00, 0x24, 0x10},
+ {0xd1, 0x5d, 0x2f, 0xf7, 0xb0, 0x00, 0x04, 0x10},
+ {0xd1, 0x5d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x33, 0x00, 0x00, 0x01, 0x00, 0x10},
+ {0xb1, 0x5d, 0x3d, 0x06, 0x8f, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x40, 0x01, 0xe0, 0x00, 0xd1, 0x10},
+ {0xb1, 0x5d, 0x44, 0x00, 0x82, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x58, 0x00, 0x78, 0x00, 0x43, 0x10},
+ {0xd1, 0x5d, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x5e, 0x00, 0x00, 0xa3, 0x1d, 0x10},
+ {0xb1, 0x5d, 0x62, 0x04, 0x11, 0x00, 0x00, 0x10},
+
+ {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x09, 0x00, 0x64, 0x00, 0x00, 0x10},
+ {0xd1, 0x5d, 0x2b, 0x00, 0x33, 0x00, 0xa0, 0x10},
+ {0xd1, 0x5d, 0x2d, 0x00, 0xa0, 0x00, 0x33, 0x10},
+ {}
+};
+static const u8 mi0360b_sensor_param1[][8] = {
+ {0xb1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x06, 0x00, 0x53, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x05, 0x00, 0x09, 0x00, 0x00, 0x10},
+ {0xb1, 0x5d, 0x09, 0x02, 0x35, 0x00, 0x00, 0x10}, /* exposure 2 */
+
+ {0xd1, 0x5d, 0x2b, 0x00, 0xd1, 0x01, 0xc9, 0x10},
+ {0xd1, 0x5d, 0x2d, 0x00, 0xed, 0x00, 0xd1, 0x10},
+ {0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10}, /* update */
+ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */
+ {}
+};
static const u8 mo4000_sensor_init[][8] = {
{0xa1, 0x21, 0x01, 0x02, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x02, 0x00, 0x00, 0x00, 0x00, 0x10},
@@ -772,7 +837,7 @@ static const u8 mo4000_sensor_init[][8] = {
};
static const u8 mt9v111_sensor_init[][8] = {
{0xb1, 0x5c, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, /* reset? */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xb1, 0x5c, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xb1, 0x5c, 0x01, 0x00, 0x01, 0x00, 0x00, 0x10}, /* IFP select */
{0xb1, 0x5c, 0x08, 0x04, 0x80, 0x00, 0x00, 0x10}, /* output fmt ctrl */
@@ -860,10 +925,10 @@ static const u8 om6802_sensor_param1[][8] = {
static const u8 ov7630_sensor_init[][8] = {
{0xa1, 0x21, 0x76, 0x01, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
/* win: i2c_r from 00 to 80 */
{0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10},
@@ -917,7 +982,7 @@ static const u8 ov7630_sensor_param1[][8] = {
static const u8 ov7648_sensor_init[][8] = {
{0xa1, 0x21, 0x76, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x21, 0x03, 0xa4, 0x30, 0x88, 0x00, 0x10},
{0xb1, 0x21, 0x11, 0x80, 0x08, 0x00, 0x00, 0x10},
@@ -966,7 +1031,7 @@ static const u8 ov7648_sensor_param1[][8] = {
static const u8 ov7660_sensor_init[][8] = {
{0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x21, 0x12, 0x05, 0x00, 0x00, 0x00, 0x10},
/* Outformat = rawRGB */
{0xa1, 0x21, 0x13, 0xb8, 0x00, 0x00, 0x00, 0x10}, /* init COM8 */
@@ -1062,7 +1127,7 @@ static const u8 ov7660_sensor_param1[][8] = {
static const u8 po1030_sensor_init[][8] = {
/* the sensor registers are described in m5602/m5602_po1030.h */
{0xa1, 0x6e, 0x3f, 0x20, 0x00, 0x00, 0x00, 0x10}, /* sensor reset */
- {0xdd, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
+ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */
{0xa1, 0x6e, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x6e, 0x04, 0x02, 0xb1, 0x02, 0x39, 0x10},
@@ -1116,10 +1181,10 @@ static const u8 po1030_sensor_param1[][8] = {
static const u8 po2030n_sensor_init[][8] = {
{0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
{0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
{0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10},
@@ -1168,7 +1233,7 @@ static const u8 po2030n_sensor_init[][8] = {
};
static const u8 po2030n_sensor_param1[][8] = {
{0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
+ {DELAY, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
{0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
{0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10},
@@ -1182,16 +1247,16 @@ static const u8 po2030n_sensor_param1[][8] = {
{0xc1, 0x6e, 0x16, 0x52, 0x40, 0x48, 0x00, 0x10},
/*after start*/
{0xa1, 0x6e, 0x15, 0x0f, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {DELAY, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
{0xa1, 0x6e, 0x1a, 0x05, 0x00, 0x00, 0x00, 0x10},
- {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {DELAY, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
{0xa1, 0x6e, 0x1b, 0x53, 0x00, 0x00, 0x00, 0x10},
{}
};
static const u8 soi768_sensor_init[][8] = {
{0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
- {0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */
+ {DELAY, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */
{0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x10},
@@ -1310,6 +1375,7 @@ static const u8 (*sensor_init[])[8] = {
[SENSOR_GC0307] = gc0307_sensor_init,
[SENSOR_HV7131R] = hv7131r_sensor_init,
[SENSOR_MI0360] = mi0360_sensor_init,
+[SENSOR_MI0360B] = mi0360b_sensor_init,
[SENSOR_MO4000] = mo4000_sensor_init,
[SENSOR_MT9V111] = mt9v111_sensor_init,
[SENSOR_OM6802] = om6802_sensor_init,
@@ -1326,13 +1392,17 @@ static const u8 (*sensor_init[])[8] = {
static void reg_r(struct gspca_dev *gspca_dev,
u16 value, int len)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
err("reg_r: buffer overflow");
return;
}
#endif
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1340,15 +1410,23 @@ static void reg_r(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, len,
500);
PDEBUG(D_USBI, "reg_r [%02x] -> %02x", value, gspca_dev->usb_buf[0]);
+ if (ret < 0) {
+ err("reg_r err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
static void reg_w1(struct gspca_dev *gspca_dev,
u16 value,
u8 data)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "reg_w1 [%04x] = %02x", value, data);
gspca_dev->usb_buf[0] = data;
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1356,12 +1434,20 @@ static void reg_w1(struct gspca_dev *gspca_dev,
0,
gspca_dev->usb_buf, 1,
500);
+ if (ret < 0) {
+ err("reg_w1 err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
static void reg_w(struct gspca_dev *gspca_dev,
u16 value,
const u8 *buffer,
int len)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "reg_w [%04x] = %02x %02x ..",
value, buffer[0], buffer[1]);
#ifdef GSPCA_DEBUG
@@ -1371,20 +1457,27 @@ static void reg_w(struct gspca_dev *gspca_dev,
}
#endif
memcpy(gspca_dev->usb_buf, buffer, len);
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, 0,
gspca_dev->usb_buf, len,
500);
+ if (ret < 0) {
+ err("reg_w err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* I2C write 1 byte */
static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
{
struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "i2c_w1 [%02x] = %02x", reg, val);
switch (sd->sensor) {
case SENSOR_ADCM1700:
@@ -1403,7 +1496,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
gspca_dev->usb_buf[5] = 0;
gspca_dev->usb_buf[6] = 0;
gspca_dev->usb_buf[7] = 0x10;
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1411,16 +1504,24 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
0,
gspca_dev->usb_buf, 8,
500);
+ if (ret < 0) {
+ err("i2c_w1 err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* I2C write 8 bytes */
static void i2c_w8(struct gspca_dev *gspca_dev,
const u8 *buffer)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
PDEBUG(D_USBO, "i2c_w8 [%02x] = %02x ..",
buffer[2], buffer[3]);
memcpy(gspca_dev->usb_buf, buffer, 8);
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -1428,6 +1529,10 @@ static void i2c_w8(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 8,
500);
msleep(2);
+ if (ret < 0) {
+ err("i2c_w8 err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* sensor read 'len' (1..5) bytes in gspca_dev->usb_buf */
@@ -1466,7 +1571,7 @@ static void i2c_w_seq(struct gspca_dev *gspca_dev,
const u8 (*data)[8])
{
while ((*data)[0] != 0) {
- if ((*data)[0] != 0xdd)
+ if ((*data)[0] != DELAY)
i2c_w8(gspca_dev, *data);
else
msleep((*data)[1]);
@@ -1529,7 +1634,13 @@ static void mi0360_probe(struct gspca_dev *gspca_dev)
if (val != 0xffff)
break;
}
+ if (gspca_dev->usb_err < 0)
+ return;
switch (val) {
+ case 0x8221:
+ PDEBUG(D_PROBE, "Sensor mi0360b");
+ sd->sensor = SENSOR_MI0360B;
+ break;
case 0x823a:
PDEBUG(D_PROBE, "Sensor mt9v111");
sd->sensor = SENSOR_MT9V111;
@@ -1556,6 +1667,8 @@ static void ov7630_probe(struct gspca_dev *gspca_dev)
val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if (gspca_dev->usb_err < 0)
+ return;
if (val == 0x7628) { /* soi768 */
sd->sensor = SENSOR_SOI768;
/*fixme: only valid for 0c45:613e?*/
@@ -1593,13 +1706,14 @@ static void ov7648_probe(struct gspca_dev *gspca_dev)
val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if (gspca_dev->usb_err < 0)
+ return;
if (val == 0x1030) { /* po1030 */
PDEBUG(D_PROBE, "Sensor po1030");
sd->sensor = SENSOR_PO1030;
return;
}
-
- PDEBUG(D_PROBE, "Unknown sensor %04x", val);
+ err("Unknown sensor %04x", val);
}
/* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */
@@ -1631,11 +1745,13 @@ static void po2030n_probe(struct gspca_dev *gspca_dev)
val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if (gspca_dev->usb_err < 0)
+ return;
if (val == 0x2030) {
PDEBUG(D_PROBE, "Sensor po2030n");
/* sd->sensor = SENSOR_PO2030N; */
} else {
- PDEBUG(D_PROBE, "Unknown sensor ID %04x", val);
+ err("Unknown sensor ID %04x", val);
}
}
@@ -1697,6 +1813,12 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x40);
msleep(50);
break;
+ case SENSOR_MI0360B:
+ reg_w1(gspca_dev, 0x01, 0x61);
+ reg_w1(gspca_dev, 0x17, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x40);
+ break;
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x61);
@@ -1762,8 +1884,7 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x43);
reg_w1(gspca_dev, 0x17, 0x61);
reg_w1(gspca_dev, 0x01, 0x42);
- if (sd->sensor == SENSOR_HV7131R
- && sd->bridge == BRIDGE_SN9C102P)
+ if (sd->sensor == SENSOR_HV7131R)
hv7131r_probe(gspca_dev);
break;
}
@@ -1788,26 +1909,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(vga_mode);
}
cam->npkt = 24; /* 24 packets per ISOC message */
+ cam->ctrls = sd->ctrls;
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->blue = BLUE_BALANCE_DEF;
- sd->red = RED_BALANCE_DEF;
- sd->gamma = GAMMA_DEF;
- sd->autogain = AUTOGAIN_DEF;
sd->ag_cnt = -1;
- sd->vflip = VFLIP_DEF;
- switch (sd->sensor) {
- case SENSOR_OM6802:
- sd->sharpness = 0x10;
- break;
- default:
- sd->sharpness = SHARPNESS_DEF;
- break;
- }
- sd->infrared = INFRARED_DEF;
- sd->freq = FREQ_DEF;
sd->quality = QUALITY_DEF;
sd->jpegqual = 80;
@@ -1828,6 +1932,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]);
reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */
regF1 = gspca_dev->usb_buf[0];
+ if (gspca_dev->usb_err < 0)
+ return gspca_dev->usb_err;
PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
switch (sd->bridge) {
case BRIDGE_SN9C102P:
@@ -1871,6 +1977,9 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
}
+ if (sd->sensor == SENSOR_OM6802)
+ sd->ctrls[SHARPNESS].def = 0x10;
+
/* Note we do not disable the sensor clock here (power saving mode),
as that also disables the button on the cam. */
reg_w1(gspca_dev, 0xf1, 0x00);
@@ -1881,7 +1990,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
- return 0;
+ return gspca_dev->usb_err;
}
static u32 setexposure(struct gspca_dev *gspca_dev,
@@ -1912,7 +2021,8 @@ static u32 setexposure(struct gspca_dev *gspca_dev,
i2c_w8(gspca_dev, Expodoit);
break;
}
- case SENSOR_MI0360: {
+ case SENSOR_MI0360:
+ case SENSOR_MI0360B: {
u8 expoMi[] = /* exposure 0x0635 -> 4 fp/s 0x10 */
{ 0xb1, 0x5d, 0x09, 0x00, 0x00, 0x00, 0x00, 0x16 };
static const u8 doit[] = /* update sensor */
@@ -1991,16 +2101,18 @@ static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
unsigned int expo;
+ int brightness;
u8 k2;
- k2 = ((int) sd->brightness - 0x8000) >> 10;
+ brightness = sd->ctrls[BRIGHTNESS].val;
+ k2 = (brightness - 0x80) >> 2;
switch (sd->sensor) {
case SENSOR_ADCM1700:
if (k2 > 0x1f)
k2 = 0; /* only positive Y offset */
break;
case SENSOR_HV7131R:
- expo = sd->brightness << 4;
+ expo = brightness << 12;
if (expo > 0x002dc6c0)
expo = 0x002dc6c0;
else if (expo < 0x02a0)
@@ -2009,18 +2121,22 @@ static void setbrightness(struct gspca_dev *gspca_dev)
break;
case SENSOR_MI0360:
case SENSOR_MO4000:
- expo = sd->brightness >> 4;
+ expo = brightness << 4;
+ sd->exposure = setexposure(gspca_dev, expo);
+ break;
+ case SENSOR_MI0360B:
+ expo = brightness << 2;
sd->exposure = setexposure(gspca_dev, expo);
break;
case SENSOR_GC0307:
case SENSOR_MT9V111:
- expo = sd->brightness >> 8;
+ expo = brightness;
sd->exposure = setexposure(gspca_dev, expo);
return; /* don't set the Y offset */
case SENSOR_OM6802:
- expo = sd->brightness >> 6;
+ expo = brightness << 2;
sd->exposure = setexposure(gspca_dev, expo);
- k2 = sd->brightness >> 11;
+ k2 = brightness >> 3;
break;
}
@@ -2033,7 +2149,8 @@ static void setcontrast(struct gspca_dev *gspca_dev)
u8 k2;
u8 contrast[6];
- k2 = sd->contrast * 0x30 / (CONTRAST_MAX + 1) + 0x10; /* 10..40 */
+ k2 = sd->ctrls[CONTRAST].val * 0x30 / (CONTRAST_MAX + 1)
+ + 0x10; /* 10..40 */
contrast[0] = (k2 + 1) / 2; /* red */
contrast[1] = 0;
contrast[2] = k2; /* green */
@@ -2046,15 +2163,25 @@ static void setcontrast(struct gspca_dev *gspca_dev)
static void setcolors(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int i, v;
+ int i, v, colors;
+ const s16 *uv;
u8 reg8a[12]; /* U & V gains */
- static const s16 uv[6] = { /* same as reg84 in signed decimal */
+ static const s16 uv_com[6] = { /* same as reg84 in signed decimal */
-24, -38, 64, /* UR UG UB */
62, -51, -9 /* VR VG VB */
};
+ static const s16 uv_mi0360b[6] = {
+ -20, -38, 64, /* UR UG UB */
+ 60, -51, -9 /* VR VG VB */
+ };
+ colors = sd->ctrls[COLORS].val;
+ if (sd->sensor == SENSOR_MI0360B)
+ uv = uv_mi0360b;
+ else
+ uv = uv_com;
for (i = 0; i < 6; i++) {
- v = uv[i] * sd->colors / COLOR_DEF;
+ v = uv[i] * colors / COLORS_DEF;
reg8a[i * 2] = v;
reg8a[i * 2 + 1] = (v >> 8) & 0x0f;
}
@@ -2065,15 +2192,15 @@ static void setredblue(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- reg_w1(gspca_dev, 0x05, sd->red);
+ reg_w1(gspca_dev, 0x05, sd->ctrls[RED].val);
/* reg_w1(gspca_dev, 0x07, 32); */
- reg_w1(gspca_dev, 0x06, sd->blue);
+ reg_w1(gspca_dev, 0x06, sd->ctrls[BLUE].val);
}
static void setgamma(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- int i;
+ int i, val;
u8 gamma[17];
const u8 *gamma_base;
static const u8 delta[17] = {
@@ -2086,6 +2213,7 @@ static void setgamma(struct gspca_dev *gspca_dev)
gamma_base = gamma_spec_0;
break;
case SENSOR_HV7131R:
+ case SENSOR_MI0360B:
case SENSOR_MT9V111:
gamma_base = gamma_spec_1;
break;
@@ -2100,9 +2228,10 @@ static void setgamma(struct gspca_dev *gspca_dev)
break;
}
+ val = sd->ctrls[GAMMA].val;
for (i = 0; i < sizeof gamma; i++)
gamma[i] = gamma_base[i]
- + delta[i] * (sd->gamma - GAMMA_DEF) / 32;
+ + delta[i] * (val - GAMMA_DEF) / 32;
reg_w(gspca_dev, 0x20, gamma, sizeof gamma);
}
@@ -2110,7 +2239,7 @@ static void setautogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (gspca_dev->ctrl_dis & (1 << AUTOGAIN_IDX))
+ if (gspca_dev->ctrl_dis & (1 << AUTOGAIN))
return;
switch (sd->sensor) {
case SENSOR_OV7630:
@@ -2121,74 +2250,91 @@ static void setautogain(struct gspca_dev *gspca_dev)
comb = 0xc0;
else
comb = 0xa0;
- if (sd->autogain)
+ if (sd->ctrls[AUTOGAIN].val)
comb |= 0x03;
i2c_w1(&sd->gspca_dev, 0x13, comb);
return;
}
}
- if (sd->autogain)
+ if (sd->ctrls[AUTOGAIN].val)
sd->ag_cnt = AG_CNT_START;
else
sd->ag_cnt = -1;
}
-/* hv7131r/ov7630/ov7648 only */
-static void setvflip(struct sd *sd)
+static void sethvflip(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
u8 comn;
- if (sd->gspca_dev.ctrl_dis & (1 << VFLIP_IDX))
- return;
switch (sd->sensor) {
case SENSOR_HV7131R:
comn = 0x18; /* clkdiv = 1, ablcen = 1 */
- if (sd->vflip)
+ if (sd->ctrls[VFLIP].val)
comn |= 0x01;
- i2c_w1(&sd->gspca_dev, 0x01, comn); /* sctra */
+ i2c_w1(gspca_dev, 0x01, comn); /* sctra */
break;
case SENSOR_OV7630:
comn = 0x02;
- if (!sd->vflip)
+ if (!sd->ctrls[VFLIP].val)
comn |= 0x80;
- i2c_w1(&sd->gspca_dev, 0x75, comn);
+ i2c_w1(gspca_dev, 0x75, comn);
break;
- default:
-/* case SENSOR_OV7648: */
+ case SENSOR_OV7648:
comn = 0x06;
- if (sd->vflip)
+ if (sd->ctrls[VFLIP].val)
+ comn |= 0x80;
+ i2c_w1(gspca_dev, 0x75, comn);
+ break;
+ case SENSOR_PO2030N:
+ /* Reg. 0x1E: Timing Generator Control Register 2 (Tgcontrol2)
+ * (reset value: 0x0A)
+ * bit7: HM: Horizontal Mirror: 0: disable, 1: enable
+ * bit6: VM: Vertical Mirror: 0: disable, 1: enable
+ * bit5: ST: Shutter Selection: 0: electrical, 1: mechanical
+ * bit4: FT: Single Frame Transfer: 0: disable, 1: enable
+ * bit3-0: X
+ */
+ comn = 0x0a;
+ if (sd->ctrls[HFLIP].val)
comn |= 0x80;
- i2c_w1(&sd->gspca_dev, 0x75, comn);
+ if (sd->ctrls[VFLIP].val)
+ comn |= 0x40;
+ i2c_w1(&sd->gspca_dev, 0x1e, comn);
break;
}
}
-static void setsharpness(struct sd *sd)
+static void setsharpness(struct gspca_dev *gspca_dev)
{
- reg_w1(&sd->gspca_dev, 0x99, sd->sharpness);
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val);
}
-static void setinfrared(struct sd *sd)
+static void setinfrared(struct gspca_dev *gspca_dev)
{
- if (sd->gspca_dev.ctrl_dis & (1 << INFRARED_IDX))
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (gspca_dev->ctrl_dis & (1 << INFRARED))
return;
/*fixme: different sequence for StarCam Clip and StarCam 370i */
/* Clip */
- i2c_w1(&sd->gspca_dev, 0x02, /* gpio */
- sd->infrared ? 0x66 : 0x64);
+ i2c_w1(gspca_dev, 0x02, /* gpio */
+ sd->ctrls[INFRARED].val ? 0x66 : 0x64);
}
static void setfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (gspca_dev->ctrl_dis & (1 << FREQ_IDX))
+ if (gspca_dev->ctrl_dis & (1 << FREQ))
return;
if (sd->sensor == SENSOR_OV7660) {
u8 com8;
com8 = 0xdf; /* auto gain/wb/expo */
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
i2c_w1(gspca_dev, 0x13, com8 | 0x20);
break;
@@ -2216,7 +2362,7 @@ static void setfreq(struct gspca_dev *gspca_dev)
break;
}
- switch (sd->freq) {
+ switch (sd->ctrls[FREQ].val) {
case 0: /* Banding filter disabled */
break;
case 1: /* 50 hz (filter on and framerate adj) */
@@ -2334,6 +2480,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0xa2;
break;
case SENSOR_MT9V111:
+ case SENSOR_MI0360B:
reg17 = 0xe0;
break;
case SENSOR_ADCM1700:
@@ -2375,6 +2522,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
case SENSOR_GC0307:
case SENSOR_MT9V111:
+ case SENSOR_MI0360B:
reg_w1(gspca_dev, 0x9a, 0x07);
break;
case SENSOR_OV7630:
@@ -2389,7 +2537,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x9a, 0x08);
break;
}
- setsharpness(sd);
+ setsharpness(gspca_dev);
reg_w(gspca_dev, 0x84, reg84, sizeof reg84);
reg_w1(gspca_dev, 0x05, 0x20); /* red */
@@ -2414,6 +2562,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0xa2;
reg1 = 0x44;
break;
+ case SENSOR_MI0360B:
+ init = mi0360b_sensor_param1;
+ reg1 &= ~0x02; /* don't inverse pin S_PWR_DN */
+ reg17 = 0xe2;
+ break;
case SENSOR_MO4000:
if (mode) {
/* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */
@@ -2474,8 +2627,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg1 = 0x44;
reg17 = 0xa2;
break;
- default:
-/* case SENSOR_SP80708: */
+ case SENSOR_SP80708:
init = sp80708_sensor_param1;
if (mode) {
/*?? reg1 = 0x04; * 320 clk 48Mhz */
@@ -2526,7 +2678,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
-
/* here change size mode 0 -> VGA; 1 -> CIF */
sd->reg18 = sn9c1xx[0x18] | (mode << 4) | 0x40;
reg_w1(gspca_dev, 0x18, sd->reg18);
@@ -2535,13 +2686,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x17, reg17);
reg_w1(gspca_dev, 0x01, reg1);
- setvflip(sd);
+ sethvflip(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setcolors(gspca_dev);
setautogain(gspca_dev);
setfreq(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -2568,6 +2719,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
data = 0x2b;
break;
case SENSOR_MI0360:
+ case SENSOR_MI0360B:
i2c_w8(gspca_dev, stopmi0360);
data = 0x29;
break;
@@ -2641,6 +2793,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
default:
/* case SENSOR_MO4000: */
/* case SENSOR_MI0360: */
+/* case SENSOR_MI0360B: */
/* case SENSOR_MT9V111: */
expotimes = sd->exposure;
expotimes += (luma_mean - delta) >> 6;
@@ -2663,236 +2816,52 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
int sof, avg_lum;
- sof = len - 64;
- if (sof >= 0 && data[sof] == 0xff && data[sof + 1] == 0xd9) {
+ /* the image ends on a 64 bytes block starting with
+ * ff d9 ff ff 00 c4 c4 96
+ * and followed by various information including luminosity */
+ /* this block may be splitted between two packets */
+ /* a new image always starts in a new packet */
+ switch (gspca_dev->last_packet_type) {
+ case DISCARD_PACKET: /* restart image building */
+ sof = len - 64;
+ if (sof >= 0 && data[sof] == 0xff && data[sof + 1] == 0xd9)
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+ return;
+ case LAST_PACKET: /* put the JPEG 422 header */
+ gspca_frame_add(gspca_dev, FIRST_PACKET,
+ sd->jpeg_hdr, JPEG_HDR_SZ);
+ break;
+ }
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
+
+ data = gspca_dev->image;
+ if (data == NULL)
+ return;
+ sof = gspca_dev->image_len - 64;
+ if (data[sof] != 0xff
+ || data[sof + 1] != 0xd9)
+ return;
- /* end of frame */
- gspca_frame_add(gspca_dev, LAST_PACKET,
- data, sof + 2);
- if (sd->ag_cnt < 0)
- return;
+ /* end of image found - remove the trailing data */
+ gspca_dev->image_len = sof + 2;
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+ if (sd->ag_cnt < 0)
+ return;
/* w1 w2 w3 */
/* w4 w5 w6 */
/* w7 w8 */
/* w4 */
- avg_lum = ((data[sof + 29] << 8) | data[sof + 30]) >> 6;
+ avg_lum = ((data[sof + 29] << 8) | data[sof + 30]) >> 6;
/* w6 */
- avg_lum += ((data[sof + 33] << 8) | data[sof + 34]) >> 6;
+ avg_lum += ((data[sof + 33] << 8) | data[sof + 34]) >> 6;
/* w2 */
- avg_lum += ((data[sof + 25] << 8) | data[sof + 26]) >> 6;
+ avg_lum += ((data[sof + 25] << 8) | data[sof + 26]) >> 6;
/* w8 */
- avg_lum += ((data[sof + 37] << 8) | data[sof + 38]) >> 6;
+ avg_lum += ((data[sof + 37] << 8) | data[sof + 38]) >> 6;
/* w5 */
- avg_lum += ((data[sof + 31] << 8) | data[sof + 32]) >> 4;
- avg_lum >>= 4;
- atomic_set(&sd->avg_lum, avg_lum);
- return;
- }
- if (gspca_dev->last_packet_type == LAST_PACKET) {
-
- /* put the JPEG 422 header */
- gspca_frame_add(gspca_dev, FIRST_PACKET,
- sd->jpeg_hdr, JPEG_HDR_SZ);
- }
- gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
-}
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->blue = val;
- if (gspca_dev->streaming)
- setredblue(gspca_dev);
- return 0;
-}
-
-static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->blue;
- return 0;
-}
-
-static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->red = val;
- if (gspca_dev->streaming)
- setredblue(gspca_dev);
- return 0;
-}
-
-static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->red;
- return 0;
-}
-
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gamma = val;
- if (gspca_dev->streaming)
- setgamma(gspca_dev);
- return 0;
-}
-
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gamma;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
- return 0;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming)
- setsharpness(sd);
- return 0;
-}
-
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
- return 0;
-}
-
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->vflip = val;
- if (gspca_dev->streaming)
- setvflip(sd);
- return 0;
-}
-
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->vflip;
- return 0;
-}
-
-static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->infrared = val;
- if (gspca_dev->streaming)
- setinfrared(sd);
- return 0;
-}
-
-static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->infrared;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->freq = val;
- if (gspca_dev->streaming)
- setfreq(gspca_dev);
- return 0;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->freq;
- return 0;
+ avg_lum += ((data[sof + 31] << 8) | data[sof + 32]) >> 4;
+ avg_lum >>= 4;
+ atomic_set(&sd->avg_lum, avg_lum);
}
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
@@ -2944,7 +2913,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return -EINVAL;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -2967,7 +2936,7 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -2977,7 +2946,7 @@ static const struct sd_desc sd_desc = {
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
.querymenu = sd_querymenu,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -3005,6 +2974,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)},
/* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */
{USB_DEVICE(0x0c45, 0x60c0), BS(SN9C105, MI0360)},
+ /* or MT9V111 */
/* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */
/* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */
/* {USB_DEVICE(0x0c45, 0x60cc), BS(SN9C105, HV7131GP)}, */
@@ -3019,7 +2989,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)},
#endif
{USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/
-/* {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, * / GC0305*/
+ {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/
/* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */
{USB_DEVICE(0x0c45, 0x610a), BS(SN9C120, OV7648)}, /*sn9c128*/
{USB_DEVICE(0x0c45, 0x610b), BS(SN9C120, OV7660)}, /*sn9c128*/
@@ -3031,12 +3001,12 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6128), BS(SN9C120, OM6802)}, /*sn9c325?*/
/*bw600.inf:*/
{USB_DEVICE(0x0c45, 0x612a), BS(SN9C120, OV7648)}, /*sn9c325?*/
+ {USB_DEVICE(0x0c45, 0x612b), BS(SN9C110, ADCM1700)},
{USB_DEVICE(0x0c45, 0x612c), BS(SN9C110, MO4000)},
{USB_DEVICE(0x0c45, 0x612e), BS(SN9C110, OV7630)},
/* {USB_DEVICE(0x0c45, 0x612f), BS(SN9C110, ICM105C)}, */
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
{USB_DEVICE(0x0c45, 0x6130), BS(SN9C120, MI0360)},
-#endif
+ /* or MT9V111 / MI0360B */
/* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */
{USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)},
{USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)},
@@ -3076,17 +3046,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index 3f514eb1d99d..e64338664410 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -171,7 +171,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
PDEBUG(D_USBI, "GET %02x 0000 %04x %02x", req, index,
gspca_dev->usb_buf[0]);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -193,7 +193,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
value, index,
NULL, 0, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -217,7 +217,7 @@ static void reg_wb(struct gspca_dev *gspca_dev,
value, index,
gspca_dev->usb_buf, 1, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -587,18 +587,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index c02beb6c1e93..8e202b9039f1 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -396,7 +396,7 @@ static int reg_w(struct gspca_dev *gspca_dev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -418,8 +418,8 @@ static int reg_r_12(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, length,
500); /* timeout */
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_12 err %d", ret);
- return -1;
+ err("reg_r_12 err %d", ret);
+ return ret;
}
return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
}
@@ -1093,17 +1093,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index c99333933e32..642839a11e8d 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -1724,7 +1724,7 @@ static const __u16 spca501c_mysterious_init_data[][3] = {
{0x00, 0x0000, 0x0048},
{0x00, 0x0000, 0x0049},
{0x00, 0x0008, 0x004a},
-/* DSP Registers */
+/* DSP Registers */
{0x01, 0x00a6, 0x0000},
{0x01, 0x0028, 0x0001},
{0x01, 0x0000, 0x0002},
@@ -1788,7 +1788,7 @@ static const __u16 spca501c_mysterious_init_data[][3] = {
{0x05, 0x0022, 0x0004},
{0x05, 0x0025, 0x0001},
{0x05, 0x0000, 0x0000},
-/* Part 4 */
+/* Part 4 */
{0x05, 0x0026, 0x0001},
{0x05, 0x0001, 0x0000},
{0x05, 0x0027, 0x0001},
@@ -1806,7 +1806,7 @@ static const __u16 spca501c_mysterious_init_data[][3] = {
{0x05, 0x0001, 0x0000},
{0x05, 0x0027, 0x0001},
{0x05, 0x004e, 0x0000},
-/* Part 5 */
+/* Part 5 */
{0x01, 0x0003, 0x003f},
{0x01, 0x0001, 0x0056},
{0x01, 0x000f, 0x0008},
@@ -1852,7 +1852,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write: 0x%02x 0x%02x 0x%02x",
req, index, value);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -2189,17 +2189,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index c576eed73abe..bc9dd9034ab4 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -368,10 +368,6 @@ static const u8 spca505b_init_data[][3] = {
{0x08, 0x00, 0x00},
{0x08, 0x00, 0x01},
{0x08, 0x00, 0x02},
- {0x00, 0x01, 0x00},
- {0x00, 0x01, 0x01},
- {0x00, 0x01, 0x34},
- {0x00, 0x01, 0x35},
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
@@ -582,7 +578,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write: 0x%02x,0x%02x:0x%02x, %d",
req, index, value, ret);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -689,8 +685,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
return ret;
}
if (ret != 0x0101) {
- PDEBUG(D_ERR|D_CONF,
- "After vector read returns 0x%04x should be 0x0101",
+ err("After vector read returns 0x%04x should be 0x0101",
ret);
}
@@ -821,18 +816,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index edf0fe157501..7307638ac91d 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -92,8 +92,7 @@ static const struct v4l2_pix_format sif_mode[] = {
* Initialization data: this is the first set-up data written to the
* device (before the open data).
*/
-static const u16 spca508_init_data[][2] =
-{
+static const u16 spca508_init_data[][2] = {
{0x0000, 0x870b},
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
@@ -1276,7 +1275,7 @@ static int reg_write(struct usb_device *dev,
PDEBUG(D_USBO, "reg write i:0x%04x = 0x%02x",
index, value);
if (ret < 0)
- PDEBUG(D_ERR|D_USBO, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
return ret;
}
@@ -1298,7 +1297,7 @@ static int reg_read(struct gspca_dev *gspca_dev,
PDEBUG(D_USBI, "reg read i:%04x --> %02x",
index, gspca_dev->usb_buf[0]);
if (ret < 0) {
- PDEBUG(D_ERR|D_USBI, "reg_read err %d", ret);
+ err("reg_read err %d", ret);
return ret;
}
return gspca_dev->usb_buf[0];
@@ -1543,18 +1542,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 7bb2355005dc..ad73f4812c05 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -315,7 +315,7 @@ static void reg_w_val(struct usb_device *dev, __u16 index, __u8 value)
value, index, NULL, 0, 500);
PDEBUG(D_USBO, "reg write: 0x%02x:0x%02x", index, value);
if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
+ err("reg write: error %d", ret);
}
static void write_vector(struct gspca_dev *gspca_dev,
@@ -787,7 +787,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
return;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
if (data[0] & 0x20) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
input_sync(gspca_dev->input_dev);
@@ -1037,7 +1037,7 @@ static const struct sd_desc sd_desc_12a = {
.start = sd_start_12a,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
};
@@ -1051,7 +1051,7 @@ static const struct sd_desc sd_desc_72a = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
};
@@ -1107,17 +1107,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 09b3f93fa4d6..404067745775 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -123,7 +123,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
SQ905_COMMAND, index, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)",
+ err("%s: usb_control_msg failed (%d)",
__func__, ret);
return ret;
}
@@ -135,7 +135,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
SQ905_PING, 0, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed 2 (%d)",
+ err("%s: usb_control_msg failed 2 (%d)",
__func__, ret);
return ret;
}
@@ -158,7 +158,7 @@ static int sq905_ack_frame(struct gspca_dev *gspca_dev)
SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1,
SQ905_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)", __func__, ret);
+ err("%s: usb_control_msg failed (%d)", __func__, ret);
return ret;
}
@@ -186,7 +186,7 @@ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
if (need_lock)
mutex_unlock(&gspca_dev->usb_lock);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)", __func__, ret);
+ err("%s: usb_control_msg failed (%d)", __func__, ret);
return ret;
}
ret = usb_bulk_msg(gspca_dev->dev,
@@ -195,7 +195,7 @@ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
/* successful, it returns 0, otherwise negative */
if (ret < 0 || act_len != size) {
- PDEBUG(D_ERR, "bulk read fail (%d) len %d/%d",
+ err("bulk read fail (%d) len %d/%d",
ret, act_len, size);
return -EIO;
}
@@ -226,7 +226,7 @@ static void sq905_dostream(struct work_struct *work)
buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- PDEBUG(D_ERR, "Couldn't allocate USB buffer");
+ err("Couldn't allocate USB buffer");
goto quit_stream;
}
@@ -436,19 +436,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 4c70628ca615..c2e88b5303cb 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -95,7 +95,7 @@ static int sq905c_command(struct gspca_dev *gspca_dev, u16 command, u16 index)
command, index, NULL, 0,
SQ905C_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)",
+ err("%s: usb_control_msg failed (%d)",
__func__, ret);
return ret;
}
@@ -115,7 +115,7 @@ static int sq905c_read(struct gspca_dev *gspca_dev, u16 command, u16 index,
command, index, gspca_dev->usb_buf, size,
SQ905C_CMD_TIMEOUT);
if (ret < 0) {
- PDEBUG(D_ERR, "%s: usb_control_msg failed (%d)",
+ err("%s: usb_control_msg failed (%d)",
__func__, ret);
return ret;
}
@@ -146,7 +146,7 @@ static void sq905c_dostream(struct work_struct *work)
buffer = kmalloc(SQ905C_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- PDEBUG(D_ERR, "Couldn't allocate USB buffer");
+ err("Couldn't allocate USB buffer");
goto quit_stream;
}
@@ -341,19 +341,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index 7ae6522d4edf..3e4b0b94c700 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -468,7 +468,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
value, 0, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r %04x failed %d", value, ret);
+ err("reg_r %04x failed %d", value, ret);
gspca_dev->usb_err = ret;
}
}
@@ -488,7 +488,7 @@ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
500);
msleep(30);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w %04x %04x failed %d", value, index, ret);
+ err("reg_w %04x %04x failed %d", value, index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -511,7 +511,7 @@ static void reg_wb(struct gspca_dev *gspca_dev, u16 value, u16 index,
1000);
msleep(30);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_wb %04x %04x failed %d", value, index, ret);
+ err("reg_wb %04x %04x failed %d", value, index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -556,7 +556,7 @@ static void i2c_write(struct sd *sd,
gspca_dev->usb_buf, buf - gspca_dev->usb_buf,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "i2c_write failed %d", ret);
+ err("i2c_write failed %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -612,7 +612,7 @@ static void ucbus_write(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, buf - gspca_dev->usb_buf,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "ucbus_write failed %d", ret);
+ err("ucbus_write failed %d", ret);
gspca_dev->usb_err = ret;
return;
}
@@ -688,7 +688,7 @@ static void cmos_probe(struct gspca_dev *gspca_dev)
break;
}
if (i >= ARRAY_SIZE(probe_order))
- PDEBUG(D_PROBE, "Unknown sensor");
+ err("Unknown sensor");
else
sd->sensor = probe_order[i];
}
@@ -1079,7 +1079,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
gspca_dev->cam.bulk_nurbs = 1;
ret = usb_submit_urb(gspca_dev->urb[0], GFP_ATOMIC);
if (ret < 0)
- PDEBUG(D_ERR|D_PACK, "sd_dq_callback() err %d", ret);
+ err("sd_dq_callback() err %d", ret);
/* wait a little time, otherwise the webcam crashes */
msleep(100);
@@ -1185,18 +1185,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 2aedf4b1bfa3..11a192b95ed4 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -27,14 +27,21 @@ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("Syntek DV4000 (STK014) USB Camera Driver");
MODULE_LICENSE("GPL");
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ CONTRAST,
+ COLORS,
+ LIGHTFREQ,
+ NCTRLS /* number of controls */
+};
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char brightness;
- unsigned char contrast;
- unsigned char colors;
- unsigned char lightfreq;
+ struct gspca_ctrl ctrls[NCTRLS];
+
u8 quality;
#define QUALITY_MIN 70
#define QUALITY_MAX 95
@@ -44,17 +51,13 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setcontrast(struct gspca_dev *gspca_dev);
+static void setcolors(struct gspca_dev *gspca_dev);
+static void setlightfreq(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[NCTRLS] = {
+[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -62,13 +65,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define BRIGHTNESS_DEF 127
- .default_value = BRIGHTNESS_DEF,
+ .default_value = 127,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
+ .set_control = setbrightness
},
- {
+[CONTRAST] = {
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -76,13 +77,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
+ .default_value = 127,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
+ .set_control = setcontrast
},
- {
+[COLORS] = {
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -90,13 +89,11 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 0,
.maximum = 255,
.step = 1,
-#define COLOR_DEF 127
- .default_value = COLOR_DEF,
+ .default_value = 127,
},
- .set = sd_setcolors,
- .get = sd_getcolors,
+ .set_control = setcolors
},
- {
+[LIGHTFREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
@@ -104,11 +101,9 @@ static const struct ctrl sd_ctrls[] = {
.minimum = 1,
.maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
.step = 1,
-#define FREQ_DEF 1
- .default_value = FREQ_DEF,
+ .default_value = 1,
},
- .set = sd_setfreq,
- .get = sd_getfreq,
+ .set_control = setlightfreq
},
};
@@ -142,7 +137,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -167,7 +162,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -197,7 +192,7 @@ static void rcv_val(struct gspca_dev *gspca_dev,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
- PDEBUG(D_ERR, "rcv_val err %d", ret);
+ err("rcv_val err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -240,7 +235,7 @@ static void snd_val(struct gspca_dev *gspca_dev,
&alen,
500); /* timeout in milliseconds */
if (ret < 0) {
- PDEBUG(D_ERR, "snd_val err %d", ret);
+ err("snd_val err %d", ret);
gspca_dev->usb_err = ret;
} else {
if (ads == 0x003f08) {
@@ -264,7 +259,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
int parval;
parval = 0x06000000 /* whiteness */
- + (sd->brightness << 16);
+ + (sd->ctrls[BRIGHTNESS].val << 16);
set_par(gspca_dev, parval);
}
@@ -274,7 +269,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
int parval;
parval = 0x07000000 /* contrast */
- + (sd->contrast << 16);
+ + (sd->ctrls[CONTRAST].val << 16);
set_par(gspca_dev, parval);
}
@@ -284,15 +279,15 @@ static void setcolors(struct gspca_dev *gspca_dev)
int parval;
parval = 0x08000000 /* saturation */
- + (sd->colors << 16);
+ + (sd->ctrls[COLORS].val << 16);
set_par(gspca_dev, parval);
}
-static void setfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- set_par(gspca_dev, sd->lightfreq == 1
+ set_par(gspca_dev, sd->ctrls[LIGHTFREQ].val == 1
? 0x33640000 /* 50 Hz */
: 0x33780000); /* 60 Hz */
}
@@ -305,10 +300,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->lightfreq = FREQ_DEF;
+ gspca_dev->cam.ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
return 0;
}
@@ -323,7 +315,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
ret = reg_r(gspca_dev, 0x0740);
if (gspca_dev->usb_err >= 0) {
if (ret != 0xff) {
- PDEBUG(D_ERR|D_STREAM, "init reg: 0x%02x", ret);
+ err("init reg: 0x%02x", ret);
gspca_dev->usb_err = -EIO;
}
}
@@ -357,7 +349,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
gspca_dev->iface,
gspca_dev->alt);
if (ret < 0) {
- PDEBUG(D_ERR|D_STREAM, "set intf %d %d failed",
+ err("set intf %d %d failed",
gspca_dev->iface, gspca_dev->alt);
gspca_dev->usb_err = ret;
goto out;
@@ -378,7 +370,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_par(gspca_dev, 0x0a800000); /* Green ? */
set_par(gspca_dev, 0x0b800000); /* Blue ? */
set_par(gspca_dev, 0x0d030000); /* Gamma ? */
- setfreq(gspca_dev); /* light frequency */
+ setlightfreq(gspca_dev);
/* start the video flow */
set_par(gspca_dev, 0x01000000);
@@ -441,78 +433,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->lightfreq = val;
- if (gspca_dev->streaming)
- setfreq(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->lightfreq;
- return 0;
-}
-
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
@@ -563,7 +483,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -603,17 +523,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- info("registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- info("deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index e50dd7693f74..b199ad4666bd 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -1,7 +1,7 @@
/*
* STV0680 USB Camera Driver
*
- * Copyright (C) 2009 Hans de Goede <hdgoede@redhat.com>
+ * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com>
*
* This module is adapted from the in kernel v4l1 stv680 driver:
*
@@ -31,7 +31,7 @@
#include "gspca.h"
-MODULE_AUTHOR("Hans de Goede <hdgoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("STV0680 USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -79,8 +79,7 @@ static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
val, 0, gspca_dev->usb_buf, size, 500);
if ((ret < 0) && (req != 0x0a))
- PDEBUG(D_ERR,
- "usb_control_msg error %i, request = 0x%x, error = %i",
+ err("usb_control_msg error %i, request = 0x%x, error = %i",
set, req, ret);
return ret;
@@ -237,7 +236,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0100, 0x12) != 0x12 ||
gspca_dev->usb_buf[8] != 0x53 || gspca_dev->usb_buf[9] != 0x05) {
- PDEBUG(D_ERR, "Could not get descriptor 0100.");
+ err("Could not get descriptor 0100.");
return stv0680_handle_error(gspca_dev, -EIO);
}
@@ -357,17 +356,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 14f179a19485..086de44a6e57 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -189,7 +189,7 @@ int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value)
0x04, 0x40, 0x1400, 0, buf, I2C_BUFFER_LENGTH,
STV06XX_URB_MSG_TIMEOUT);
if (err < 0) {
- PDEBUG(D_ERR, "I2C: Read error writing address: %d", err);
+ err("I2C: Read error writing address: %d", err);
return err;
}
@@ -428,7 +428,7 @@ frame_data:
}
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrupt packet length */
@@ -462,7 +462,7 @@ static const struct sd_desc sd_desc = {
.start = stv06xx_start,
.stopN = stv06xx_stopN,
.pkt_scan = stv06xx_pkt_scan,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -562,17 +562,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 053a27e3a400..e0f63c51f40d 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -37,7 +37,7 @@
#define STV_ISOC_ENDPOINT_ADDR 0x81
-#define STV_REG23 0x0423
+#define STV_REG23 0x0423
/* Control registers of the STV0600 ASIC */
#define STV_I2C_PARTNER 0x1420
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index 706e08dc5254..17531b41a073 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -39,8 +39,8 @@ static const struct ctrl hdcs1x00_ctrl[] = {
.minimum = 0x00,
.maximum = 0xff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_EXPOSURE,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_exposure,
.get = hdcs_get_exposure
@@ -52,8 +52,8 @@ static const struct ctrl hdcs1x00_ctrl[] = {
.minimum = 0x00,
.maximum = 0xff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_GAIN,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_gain,
.get = hdcs_get_gain
@@ -83,8 +83,8 @@ static const struct ctrl hdcs1020_ctrl[] = {
.minimum = 0x00,
.maximum = 0xffff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_EXPOSURE,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_exposure,
.get = hdcs_get_exposure
@@ -96,8 +96,8 @@ static const struct ctrl hdcs1020_ctrl[] = {
.minimum = 0x00,
.maximum = 0xff,
.step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
+ .default_value = HDCS_DEFAULT_GAIN,
+ .flags = V4L2_CTRL_FLAG_SLIDER
},
.set = hdcs_set_gain,
.get = hdcs_get_gain
@@ -163,7 +163,8 @@ static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len)
for (i = 0; i < len; i++) {
regs[2 * i] = reg;
regs[2 * i + 1] = vals[i];
- /* All addresses are shifted left one bit as bit 0 toggles r/w */
+ /* All addresses are shifted left one bit
+ * as bit 0 toggles r/w */
reg += 2;
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
index 37b31c99d956..cf3d0ccc1121 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -37,7 +37,7 @@
#define HDCS_REG_CONTROL(sd) (IS_1020(sd) ? HDCS20_CONTROL : HDCS00_CONTROL)
#define HDCS_1X00_DEF_WIDTH 360
-#define HDCS_1X00_DEF_HEIGHT 296
+#define HDCS_1X00_DEF_HEIGHT 296
#define HDCS_1020_DEF_WIDTH 352
#define HDCS_1020_DEF_HEIGHT 292
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
index c11f06e4ae76..3af53264a364 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -246,7 +246,7 @@ static int st6422_start(struct sd *sd)
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
- PDEBUG(D_ERR, "Couldn't get altsetting");
+ err("Couldn't get altsetting");
return -EIO;
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
index 11a0c002f5dc..f8398434c328 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -66,7 +66,7 @@ static const struct ctrl vv6410_ctrl[] = {
.minimum = 0,
.maximum = 1,
.step = 1,
- .default_value = 0
+ .default_value = 0
},
.set = vv6410_set_vflip,
.get = vv6410_get_vflip
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index 96c61926d372..b3b5508473bc 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -157,8 +157,8 @@
/* Audio Amplifier Setup Register */
#define VV6410_AT1 0x79
-#define VV6410_HFLIP (1 << 3)
-#define VV6410_VFLIP (1 << 4)
+#define VV6410_HFLIP (1 << 3)
+#define VV6410_VFLIP (1 << 4)
#define VV6410_LOW_POWER_MODE (1 << 0)
#define VV6410_SOFT_RESET (1 << 2)
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 9494f86b9a85..a9cbcd6011d9 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -343,7 +343,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
len ? gspca_dev->usb_buf : NULL, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -368,7 +368,7 @@ static void reg_w_1(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_1 err %d", ret);
+ err("reg_w_1 err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -388,7 +388,7 @@ static void reg_w_riv(struct gspca_dev *gspca_dev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_riv err %d", ret);
+ err("reg_w_riv err %d", ret);
gspca_dev->usb_err = ret;
return;
}
@@ -413,7 +413,7 @@ static u8 reg_r_1(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, 1,
500); /* timeout */
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_1 err %d", ret);
+ err("reg_r_1 err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -440,7 +440,7 @@ static u16 reg_r_12(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, length,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_12 err %d", ret);
+ err("reg_r_12 err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -463,7 +463,7 @@ static void setup_qtable(struct gspca_dev *gspca_dev,
/* loop over y components */
for (i = 0; i < 64; i++)
- reg_w_riv(gspca_dev, 0x00, 0x2800 + i, qtable[0][i]);
+ reg_w_riv(gspca_dev, 0x00, 0x2800 + i, qtable[0][i]);
/* loop over c components */
for (i = 0; i < 64; i++)
@@ -712,8 +712,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->subtype = id->driver_info;
if (sd->subtype == AiptekMiniPenCam13) {
-/* try to get the firmware as some cam answer 2.0.1.2.2
- * and should be a spca504b then overwrite that setting */
+
+ /* try to get the firmware as some cam answer 2.0.1.2.2
+ * and should be a spca504b then overwrite that setting */
reg_r(gspca_dev, 0x20, 0, 1);
switch (gspca_dev->usb_buf[0]) {
case 1:
@@ -733,7 +734,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* case BRIDGE_SPCA504: */
/* case BRIDGE_SPCA536: */
cam->cam_mode = vga_mode;
- cam->nmodes =ARRAY_SIZE(vga_mode);
+ cam->nmodes = ARRAY_SIZE(vga_mode);
break;
case BRIDGE_SPCA533:
cam->cam_mode = custom_mode;
@@ -1247,17 +1248,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 3b3b983f2b9d..b45f4d0f3997 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -892,7 +892,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->sensor = SENSOR_OM6802;
break;
default:
- PDEBUG(D_ERR|D_PROBE, "unknown sensor %04x", sensor_id);
+ err("unknown sensor %04x", sensor_id);
return -EINVAL;
}
@@ -1444,17 +1444,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index d9c5bf3449d4..d9e3c6050781 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -421,18 +421,12 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index b16fd47e8ced..38a6efe1a5f9 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -3164,7 +3164,7 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, len,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r err %d", ret);
+ err("reg_r err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -3205,7 +3205,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev,
value, index, NULL, 0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w err %d", ret);
+ err("reg_w err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -3230,7 +3230,7 @@ static u16 read_sensor_register(struct gspca_dev *gspca_dev,
reg_r(gspca_dev, 0xa1, 0xb33f, 1);
if (!(gspca_dev->usb_buf[0] & 0x02)) {
- PDEBUG(D_ERR, "I2c Bus Busy Wait %02x",
+ err("I2c Bus Busy Wait %02x",
gspca_dev->usb_buf[0]);
return 0;
}
@@ -3344,7 +3344,7 @@ static void i2c_write(struct gspca_dev *gspca_dev,
msleep(20);
} while (--retry > 0);
if (retry <= 0)
- PDEBUG(D_ERR, "i2c_write timeout");
+ err("i2c_write timeout");
}
static void put_tab_to_reg(struct gspca_dev *gspca_dev,
@@ -3440,7 +3440,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
switch (sensor) {
case -1:
- PDEBUG(D_PROBE, "Unknown sensor...");
+ err("Unknown sensor...");
return -EINVAL;
case SENSOR_HV7131R:
PDEBUG(D_PROBE, "Find Sensor HV7131R");
@@ -4226,18 +4226,11 @@ static struct usb_driver sd_driver = {
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
- int ret;
-
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/gspca/w996Xcf.c b/drivers/media/video/gspca/w996Xcf.c
index 38a68591ce48..4066ac8c45a0 100644
--- a/drivers/media/video/gspca/w996Xcf.c
+++ b/drivers/media/video/gspca/w996Xcf.c
@@ -67,7 +67,7 @@ static int reg_w(struct sd *sd, __u16 index, __u16 value);
--------------------------------------------------------------------------*/
static int w9968cf_write_fsb(struct sd *sd, u16* data)
{
- struct usb_device* udev = sd->gspca_dev.dev;
+ struct usb_device *udev = sd->gspca_dev.dev;
u16 value;
int ret;
@@ -78,7 +78,7 @@ static int w9968cf_write_fsb(struct sd *sd, u16* data)
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
value, 0x06, sd->gspca_dev.usb_buf, 6, 500);
if (ret < 0) {
- PDEBUG(D_ERR, "Write FSB registers failed (%d)", ret);
+ err("Write FSB registers failed (%d)", ret);
return ret;
}
@@ -104,7 +104,7 @@ static int w9968cf_write_sb(struct sd *sd, u16 value)
udelay(W9968CF_I2C_BUS_DELAY);
if (ret < 0) {
- PDEBUG(D_ERR, "Write SB reg [01] %04x failed", value);
+ err("Write SB reg [01] %04x failed", value);
return ret;
}
@@ -130,7 +130,7 @@ static int w9968cf_read_sb(struct sd *sd)
ret = sd->gspca_dev.usb_buf[0] |
(sd->gspca_dev.usb_buf[1] << 8);
else
- PDEBUG(D_ERR, "Read SB reg [01] failed");
+ err("Read SB reg [01] failed");
udelay(W9968CF_I2C_BUS_DELAY);
@@ -437,7 +437,7 @@ static int w9968cf_set_crop_window(struct sd *sd)
if (sd->sensor == SEN_OV7620) {
/* Sigh, this is dependend on the clock / framerate changes
made by the frequency control, sick. */
- if (sd->freq == 1) {
+ if (sd->ctrls[FREQ].val == 1) {
start_cropx = 277;
start_cropy = 37;
} else {
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
new file mode 100644
index 000000000000..8715577bc2d8
--- /dev/null
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -0,0 +1,3253 @@
+/*
+ * USB IBM C-It Video Camera driver
+ *
+ * Supports Xirlink C-It Video Camera, IBM PC Camera,
+ * IBM NetCamera and Veo Stingray.
+ *
+ * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This driver is based on earlier work of:
+ *
+ * (C) Copyright 1999 Johannes Erdfelt
+ * (C) Copyright 1999 Randy Dunlap
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define MODULE_NAME "xirlink-cit"
+
+#include "gspca.h"
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Xirlink C-IT");
+MODULE_LICENSE("GPL");
+
+/* FIXME we should autodetect this */
+static int ibm_netcam_pro;
+module_param(ibm_netcam_pro, int, 0);
+MODULE_PARM_DESC(ibm_netcam_pro,
+ "Use IBM Netcamera Pro init sequences for Model 3 cams");
+
+/* FIXME this should be handled through the V4L2 input selection API */
+static int rca_input;
+module_param(rca_input, int, 0644);
+MODULE_PARM_DESC(rca_input,
+ "Use rca input instead of ccd sensor on Model 3 cams");
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ u8 model;
+#define CIT_MODEL0 0 /* bcd version 0.01 cams ie the xvp-500 */
+#define CIT_MODEL1 1 /* The model 1 - 4 nomenclature comes from the old */
+#define CIT_MODEL2 2 /* ibmcam driver */
+#define CIT_MODEL3 3
+#define CIT_MODEL4 4
+#define CIT_IBM_NETCAM_PRO 5
+ u8 input_index;
+ u8 stop_on_control_change;
+ u8 sof_read;
+ u8 sof_len;
+ u8 contrast;
+ u8 brightness;
+ u8 hue;
+ u8 sharpness;
+ u8 lighting;
+ u8 hflip;
+};
+
+/* V4L2 controls supported by the driver */
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setlighting(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getlighting(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
+static void sd_stop0(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[] = {
+#define SD_BRIGHTNESS 0
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
+#define BRIGHTNESS_DEFAULT 32
+ .default_value = BRIGHTNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setbrightness,
+ .get = sd_getbrightness,
+ },
+#define SD_CONTRAST 1
+ {
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "contrast",
+ .minimum = 0,
+ .maximum = 20,
+ .step = 1,
+#define CONTRAST_DEFAULT 10
+ .default_value = CONTRAST_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setcontrast,
+ .get = sd_getcontrast,
+ },
+#define SD_HUE 2
+ {
+ {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+#define HUE_DEFAULT 63
+ .default_value = HUE_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_sethue,
+ .get = sd_gethue,
+ },
+#define SD_SHARPNESS 3
+ {
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 6,
+ .step = 1,
+#define SHARPNESS_DEFAULT 3
+ .default_value = SHARPNESS_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setsharpness,
+ .get = sd_getsharpness,
+ },
+#define SD_LIGHTING 4
+ {
+ {
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Lighting",
+ .minimum = 0,
+ .maximum = 2,
+ .step = 1,
+#define LIGHTING_DEFAULT 1
+ .default_value = LIGHTING_DEFAULT,
+ .flags = 0,
+ },
+ .set = sd_setlighting,
+ .get = sd_getlighting,
+ },
+#define SD_HFLIP 5
+ {
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define HFLIP_DEFAULT 0
+ .default_value = HFLIP_DEFAULT,
+ },
+ .set = sd_sethflip,
+ .get = sd_gethflip,
+ },
+};
+
+static const struct v4l2_pix_format cif_yuv_mode[] = {
+ {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {352, 288, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+static const struct v4l2_pix_format vga_yuv_mode[] = {
+ {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {640, 480, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+static const struct v4l2_pix_format model0_mode[] = {
+ {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+static const struct v4l2_pix_format model2_mode[] = {
+ {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144 * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {352, 288, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+/*
+ * 01.01.08 - Added for RCA video in support -LO
+ * This struct is used to init the Model3 cam to use the RCA video in port
+ * instead of the CCD sensor.
+ */
+static const u16 rca_initdata[][3] = {
+ {0, 0x0000, 0x010c},
+ {0, 0x0006, 0x012c},
+ {0, 0x0078, 0x012d},
+ {0, 0x0046, 0x012f},
+ {0, 0xd141, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfea8, 0x0124},
+ {1, 0x0000, 0x0116},
+ {0, 0x0064, 0x0116},
+ {1, 0x0000, 0x0115},
+ {0, 0x0003, 0x0115},
+ {0, 0x0008, 0x0123},
+ {0, 0x0000, 0x0117},
+ {0, 0x0000, 0x0112},
+ {0, 0x0080, 0x0100},
+ {0, 0x0000, 0x0100},
+ {1, 0x0000, 0x0116},
+ {0, 0x0060, 0x0116},
+ {0, 0x0002, 0x0112},
+ {0, 0x0000, 0x0123},
+ {0, 0x0001, 0x0117},
+ {0, 0x0040, 0x0108},
+ {0, 0x0019, 0x012c},
+ {0, 0x0040, 0x0116},
+ {0, 0x000a, 0x0115},
+ {0, 0x000b, 0x0115},
+ {0, 0x0078, 0x012d},
+ {0, 0x0046, 0x012f},
+ {0, 0xd141, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfea8, 0x0124},
+ {0, 0x0064, 0x0116},
+ {0, 0x0000, 0x0115},
+ {0, 0x0001, 0x0115},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00aa, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f2, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x000f, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f8, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00fc, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f9, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x003c, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xffff, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0027, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0019, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0021, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0006, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0045, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002a, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x000e, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002b, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00f4, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002c, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0004, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002d, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0014, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002e, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0003, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x002f, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0003, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0014, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0053, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0x0000, 0x0101},
+ {0, 0x00a0, 0x0103},
+ {0, 0x0078, 0x0105},
+ {0, 0x0000, 0x010a},
+ {0, 0x0024, 0x010b},
+ {0, 0x0028, 0x0119},
+ {0, 0x0088, 0x011b},
+ {0, 0x0002, 0x011d},
+ {0, 0x0003, 0x011e},
+ {0, 0x0000, 0x0129},
+ {0, 0x00fc, 0x012b},
+ {0, 0x0008, 0x0102},
+ {0, 0x0000, 0x0104},
+ {0, 0x0008, 0x011a},
+ {0, 0x0028, 0x011c},
+ {0, 0x0021, 0x012a},
+ {0, 0x0000, 0x0118},
+ {0, 0x0000, 0x0132},
+ {0, 0x0000, 0x0109},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0031, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x00dc, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0032, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0020, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0001, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0040, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0037, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0030, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0xfff9, 0x0124},
+ {0, 0x0086, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0038, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0008, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0x0000, 0x0127},
+ {0, 0xfff8, 0x0124},
+ {0, 0xfffd, 0x0124},
+ {0, 0xfffa, 0x0124},
+ {0, 0x0003, 0x0111},
+};
+
+/* TESTME the old ibmcam driver repeats certain commands to Model1 cameras, we
+ do the same for now (testing needed to see if this is really necessary) */
+static const int cit_model1_ntries = 5;
+static const int cit_model1_ntries2 = 2;
+
+static int cit_write_reg(struct gspca_dev *gspca_dev, u16 value, u16 index)
+{
+ struct usb_device *udev = gspca_dev->dev;
+ int err;
+
+ err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
+ value, index, NULL, 0, 1000);
+ if (err < 0)
+ err("Failed to write a register (index 0x%04X,"
+ " value 0x%02X, error %d)", index, value, err);
+
+ return 0;
+}
+
+static int cit_read_reg(struct gspca_dev *gspca_dev, u16 index)
+{
+ struct usb_device *udev = gspca_dev->dev;
+ __u8 *buf = gspca_dev->usb_buf;
+ int res;
+
+ res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
+ 0x00, index, buf, 8, 1000);
+ if (res < 0) {
+ err("Failed to read a register (index 0x%04X, error %d)",
+ index, res);
+ return res;
+ }
+
+ PDEBUG(D_PROBE,
+ "Register %04x value: %02x %02x %02x %02x %02x %02x %02x %02x",
+ index,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+
+ return 0;
+}
+
+/*
+ * cit_send_FF_04_02()
+ *
+ * This procedure sends magic 3-command prefix to the camera.
+ * The purpose of this prefix is not known.
+ *
+ * History:
+ * 1/2/00 Created.
+ */
+static void cit_send_FF_04_02(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x00FF, 0x0127);
+ cit_write_reg(gspca_dev, 0x0004, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+}
+
+static void cit_send_00_04_06(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x0004, 0x0124);
+ cit_write_reg(gspca_dev, 0x0006, 0x0124);
+}
+
+static void cit_send_x_00(struct gspca_dev *gspca_dev, unsigned short x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+}
+
+static void cit_send_x_00_05(struct gspca_dev *gspca_dev, unsigned short x)
+{
+ cit_send_x_00(gspca_dev, x);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+}
+
+static void cit_send_x_00_05_02(struct gspca_dev *gspca_dev, unsigned short x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+}
+
+static void cit_send_x_01_00_05(struct gspca_dev *gspca_dev, u16 x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0001, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+}
+
+static void cit_send_x_00_05_02_01(struct gspca_dev *gspca_dev, u16 x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0124);
+}
+
+static void cit_send_x_00_05_02_08_01(struct gspca_dev *gspca_dev, u16 x)
+{
+ cit_write_reg(gspca_dev, x, 0x0127);
+ cit_write_reg(gspca_dev, 0x0000, 0x0124);
+ cit_write_reg(gspca_dev, 0x0005, 0x0124);
+ cit_write_reg(gspca_dev, 0x0002, 0x0124);
+ cit_write_reg(gspca_dev, 0x0008, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0124);
+}
+
+static void cit_Packet_Format1(struct gspca_dev *gspca_dev, u16 fkey, u16 val)
+{
+ cit_send_x_01_00_05(gspca_dev, 0x0088);
+ cit_send_x_00_05(gspca_dev, fkey);
+ cit_send_x_00_05_02_08_01(gspca_dev, val);
+ cit_send_x_00_05(gspca_dev, 0x0088);
+ cit_send_x_00_05_02_01(gspca_dev, fkey);
+ cit_send_x_00_05(gspca_dev, 0x0089);
+ cit_send_x_00(gspca_dev, fkey);
+ cit_send_00_04_06(gspca_dev);
+ cit_read_reg(gspca_dev, 0x0126);
+ cit_send_FF_04_02(gspca_dev);
+}
+
+static void cit_PacketFormat2(struct gspca_dev *gspca_dev, u16 fkey, u16 val)
+{
+ cit_send_x_01_00_05(gspca_dev, 0x0088);
+ cit_send_x_00_05(gspca_dev, fkey);
+ cit_send_x_00_05_02(gspca_dev, val);
+}
+
+static void cit_model2_Packet2(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x00ff, 0x012d);
+ cit_write_reg(gspca_dev, 0xfea3, 0x0124);
+}
+
+static void cit_model2_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
+{
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x00ff, 0x012e);
+ cit_write_reg(gspca_dev, v1, 0x012f);
+ cit_write_reg(gspca_dev, 0x00ff, 0x0130);
+ cit_write_reg(gspca_dev, 0xc719, 0x0124);
+ cit_write_reg(gspca_dev, v2, 0x0127);
+
+ cit_model2_Packet2(gspca_dev);
+}
+
+/*
+ * cit_model3_Packet1()
+ *
+ * 00_0078_012d
+ * 00_0097_012f
+ * 00_d141_0124
+ * 00_0096_0127
+ * 00_fea8_0124
+*/
+static void cit_model3_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
+{
+ cit_write_reg(gspca_dev, 0x0078, 0x012d);
+ cit_write_reg(gspca_dev, v1, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, v2, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+}
+
+static void cit_model4_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2)
+{
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, v1, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, v2, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+}
+
+static void cit_model4_BrightnessPacket(struct gspca_dev *gspca_dev, u16 val)
+{
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0026, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, val, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0038, 0x012d);
+ cit_write_reg(gspca_dev, 0x0004, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+}
+
+/* this function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct cam *cam;
+
+ sd->model = id->driver_info;
+ if (sd->model == CIT_MODEL3 && ibm_netcam_pro)
+ sd->model = CIT_IBM_NETCAM_PRO;
+
+ cam = &gspca_dev->cam;
+ switch (sd->model) {
+ case CIT_MODEL0:
+ cam->cam_mode = model0_mode;
+ cam->nmodes = ARRAY_SIZE(model0_mode);
+ cam->reverse_alts = 1;
+ gspca_dev->ctrl_dis = ~((1 << SD_CONTRAST) | (1 << SD_HFLIP));
+ sd->sof_len = 4;
+ break;
+ case CIT_MODEL1:
+ cam->cam_mode = cif_yuv_mode;
+ cam->nmodes = ARRAY_SIZE(cif_yuv_mode);
+ cam->reverse_alts = 1;
+ gspca_dev->ctrl_dis = (1 << SD_HUE) | (1 << SD_HFLIP);
+ sd->sof_len = 4;
+ break;
+ case CIT_MODEL2:
+ cam->cam_mode = model2_mode + 1; /* no 160x120 */
+ cam->nmodes = 3;
+ gspca_dev->ctrl_dis = (1 << SD_CONTRAST) |
+ (1 << SD_SHARPNESS) |
+ (1 << SD_HFLIP);
+ break;
+ case CIT_MODEL3:
+ cam->cam_mode = vga_yuv_mode;
+ cam->nmodes = ARRAY_SIZE(vga_yuv_mode);
+ gspca_dev->ctrl_dis = (1 << SD_HUE) |
+ (1 << SD_LIGHTING) |
+ (1 << SD_HFLIP);
+ sd->stop_on_control_change = 1;
+ sd->sof_len = 4;
+ break;
+ case CIT_MODEL4:
+ cam->cam_mode = model2_mode;
+ cam->nmodes = ARRAY_SIZE(model2_mode);
+ gspca_dev->ctrl_dis = (1 << SD_CONTRAST) |
+ (1 << SD_SHARPNESS) |
+ (1 << SD_LIGHTING) |
+ (1 << SD_HFLIP);
+ break;
+ case CIT_IBM_NETCAM_PRO:
+ cam->cam_mode = vga_yuv_mode;
+ cam->nmodes = 2; /* no 640 x 480 */
+ cam->input_flags = V4L2_IN_ST_VFLIP;
+ gspca_dev->ctrl_dis = ~(1 << SD_CONTRAST);
+ sd->stop_on_control_change = 1;
+ sd->sof_len = 4;
+ break;
+ }
+
+ sd->brightness = BRIGHTNESS_DEFAULT;
+ sd->contrast = CONTRAST_DEFAULT;
+ sd->hue = HUE_DEFAULT;
+ sd->sharpness = SHARPNESS_DEFAULT;
+ sd->lighting = LIGHTING_DEFAULT;
+ sd->hflip = HFLIP_DEFAULT;
+
+ return 0;
+}
+
+static int cit_init_model0(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */
+ cit_write_reg(gspca_dev, 0x0001, 0x0112); /* turn on autogain ? */
+ cit_write_reg(gspca_dev, 0x0000, 0x0400);
+ cit_write_reg(gspca_dev, 0x0001, 0x0400);
+ cit_write_reg(gspca_dev, 0x0000, 0x0420);
+ cit_write_reg(gspca_dev, 0x0001, 0x0420);
+ cit_write_reg(gspca_dev, 0x000d, 0x0409);
+ cit_write_reg(gspca_dev, 0x0002, 0x040a);
+ cit_write_reg(gspca_dev, 0x0018, 0x0405);
+ cit_write_reg(gspca_dev, 0x0008, 0x0435);
+ cit_write_reg(gspca_dev, 0x0026, 0x040b);
+ cit_write_reg(gspca_dev, 0x0007, 0x0437);
+ cit_write_reg(gspca_dev, 0x0015, 0x042f);
+ cit_write_reg(gspca_dev, 0x002b, 0x0439);
+ cit_write_reg(gspca_dev, 0x0026, 0x043a);
+ cit_write_reg(gspca_dev, 0x0008, 0x0438);
+ cit_write_reg(gspca_dev, 0x001e, 0x042b);
+ cit_write_reg(gspca_dev, 0x0041, 0x042c);
+
+ return 0;
+}
+
+static int cit_init_ibm_netcam_pro(struct gspca_dev *gspca_dev)
+{
+ cit_read_reg(gspca_dev, 0x128);
+ cit_write_reg(gspca_dev, 0x0003, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0112);
+ cit_write_reg(gspca_dev, 0x0000, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0123);
+ cit_write_reg(gspca_dev, 0x0001, 0x0117);
+ cit_write_reg(gspca_dev, 0x0040, 0x0108);
+ cit_write_reg(gspca_dev, 0x0019, 0x012c);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0115);
+ cit_write_reg(gspca_dev, 0x000b, 0x0115);
+
+ cit_write_reg(gspca_dev, 0x0078, 0x012d);
+ cit_write_reg(gspca_dev, 0x0001, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0079, 0x012d);
+ cit_write_reg(gspca_dev, 0x00ff, 0x0130);
+ cit_write_reg(gspca_dev, 0xcd41, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_read_reg(gspca_dev, 0x0126);
+
+ cit_model3_Packet1(gspca_dev, 0x0000, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0000, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x000b, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x000c, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x000d, 0x003a);
+ cit_model3_Packet1(gspca_dev, 0x000e, 0x0060);
+ cit_model3_Packet1(gspca_dev, 0x000f, 0x0060);
+ cit_model3_Packet1(gspca_dev, 0x0010, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0011, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x0012, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x0013, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0014, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0015, 0x00fb);
+ cit_model3_Packet1(gspca_dev, 0x0016, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0017, 0x0037);
+ cit_model3_Packet1(gspca_dev, 0x0018, 0x0036);
+ cit_model3_Packet1(gspca_dev, 0x001e, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0020, 0x00c1);
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0034);
+ cit_model3_Packet1(gspca_dev, 0x0022, 0x0034);
+ cit_model3_Packet1(gspca_dev, 0x0025, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0028, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x0029, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x002b, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x002c, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x002d, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x002e, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x002f, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x0030, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x0031, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x0032, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x0033, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0037, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x003a, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x003b, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x003c, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x000c);
+ cit_model3_Packet1(gspca_dev, 0x0041, 0x00fb);
+ cit_model3_Packet1(gspca_dev, 0x0042, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0043, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0045, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0046, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0047, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0048, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0049, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x004a, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x004b, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x004c, 0x00ff);
+ cit_model3_Packet1(gspca_dev, 0x004f, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0050, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0055, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0056, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0057, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0058, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0059, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x005c, 0x0016);
+ cit_model3_Packet1(gspca_dev, 0x005d, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x005e, 0x003c);
+ cit_model3_Packet1(gspca_dev, 0x005f, 0x0050);
+ cit_model3_Packet1(gspca_dev, 0x0060, 0x0044);
+ cit_model3_Packet1(gspca_dev, 0x0061, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x006a, 0x007e);
+ cit_model3_Packet1(gspca_dev, 0x006f, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0072, 0x001b);
+ cit_model3_Packet1(gspca_dev, 0x0073, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0074, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0075, 0x001b);
+ cit_model3_Packet1(gspca_dev, 0x0076, 0x002a);
+ cit_model3_Packet1(gspca_dev, 0x0077, 0x003c);
+ cit_model3_Packet1(gspca_dev, 0x0078, 0x0050);
+ cit_model3_Packet1(gspca_dev, 0x007b, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x007c, 0x0011);
+ cit_model3_Packet1(gspca_dev, 0x007d, 0x0024);
+ cit_model3_Packet1(gspca_dev, 0x007e, 0x0043);
+ cit_model3_Packet1(gspca_dev, 0x007f, 0x005a);
+ cit_model3_Packet1(gspca_dev, 0x0084, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x0085, 0x0033);
+ cit_model3_Packet1(gspca_dev, 0x0086, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0087, 0x0030);
+ cit_model3_Packet1(gspca_dev, 0x0088, 0x0070);
+ cit_model3_Packet1(gspca_dev, 0x008b, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x008f, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0090, 0x0006);
+ cit_model3_Packet1(gspca_dev, 0x0091, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x0092, 0x005a);
+ cit_model3_Packet1(gspca_dev, 0x0093, 0x0082);
+ cit_model3_Packet1(gspca_dev, 0x0096, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x0097, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x0098, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00b0, 0x0046);
+ cit_model3_Packet1(gspca_dev, 0x00b1, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00b2, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00b3, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00b4, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x00b6, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x00b7, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00bb, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00bc, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00bd, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00bf, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c0, 0x00c8);
+ cit_model3_Packet1(gspca_dev, 0x00c1, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x00c2, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00c3, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c4, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00cb, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00cc, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00cd, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00ce, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00cf, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x00d0, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00d2, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00d3, 0x00bf);
+ cit_model3_Packet1(gspca_dev, 0x00ea, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x00eb, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00ec, 0x00e8);
+ cit_model3_Packet1(gspca_dev, 0x00ed, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00ef, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x00f0, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00f2, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x00f4, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x00f5, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fa, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fb, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00fc, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fd, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00fe, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00ff, 0x0000);
+
+ cit_model3_Packet1(gspca_dev, 0x00be, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x00c8, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c9, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x00ca, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x0053, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0082, 0x000e);
+ cit_model3_Packet1(gspca_dev, 0x0083, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x0034, 0x003c);
+ cit_model3_Packet1(gspca_dev, 0x006e, 0x0055);
+ cit_model3_Packet1(gspca_dev, 0x0062, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0063, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0066, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0067, 0x0006);
+ cit_model3_Packet1(gspca_dev, 0x006b, 0x0010);
+ cit_model3_Packet1(gspca_dev, 0x005a, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x005b, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0023, 0x0006);
+ cit_model3_Packet1(gspca_dev, 0x0026, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x0036, 0x0069);
+ cit_model3_Packet1(gspca_dev, 0x0038, 0x0064);
+ cit_model3_Packet1(gspca_dev, 0x003d, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x003e, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00b8, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x00b9, 0x0014);
+ cit_model3_Packet1(gspca_dev, 0x00e6, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x00e8, 0x0001);
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ cit_init_model0(gspca_dev);
+ sd_stop0(gspca_dev);
+ break;
+ case CIT_MODEL1:
+ case CIT_MODEL2:
+ case CIT_MODEL3:
+ case CIT_MODEL4:
+ break; /* All is done in sd_start */
+ case CIT_IBM_NETCAM_PRO:
+ cit_init_ibm_netcam_pro(gspca_dev);
+ sd_stop0(gspca_dev);
+ break;
+ }
+ return 0;
+}
+
+static int cit_set_brightness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_IBM_NETCAM_PRO:
+ /* No (known) brightness control for these */
+ break;
+ case CIT_MODEL1:
+ /* Model 1: Brightness range 0 - 63 */
+ cit_Packet_Format1(gspca_dev, 0x0031, sd->brightness);
+ cit_Packet_Format1(gspca_dev, 0x0032, sd->brightness);
+ cit_Packet_Format1(gspca_dev, 0x0033, sd->brightness);
+ break;
+ case CIT_MODEL2:
+ /* Model 2: Brightness range 0x60 - 0xee */
+ /* Scale 0 - 63 to 0x60 - 0xee */
+ i = 0x60 + sd->brightness * 2254 / 1000;
+ cit_model2_Packet1(gspca_dev, 0x001a, i);
+ break;
+ case CIT_MODEL3:
+ /* Model 3: Brightness range 'i' in [0x0C..0x3F] */
+ i = sd->brightness;
+ if (i < 0x0c)
+ i = 0x0c;
+ cit_model3_Packet1(gspca_dev, 0x0036, i);
+ break;
+ case CIT_MODEL4:
+ /* Model 4: Brightness range 'i' in [0x04..0xb4] */
+ /* Scale 0 - 63 to 0x04 - 0xb4 */
+ i = 0x04 + sd->brightness * 2794 / 1000;
+ cit_model4_BrightnessPacket(gspca_dev, i);
+ break;
+ }
+
+ return 0;
+}
+
+static int cit_set_contrast(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0: {
+ int i;
+ /* gain 0-15, 0-20 -> 0-15 */
+ i = sd->contrast * 1000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0422);
+ /* gain 0-31, may not be lower then 0x0422, 0-20 -> 0-31 */
+ i = sd->contrast * 2000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0423);
+ /* gain 0-127, may not be lower then 0x0423, 0-20 -> 0-63 */
+ i = sd->contrast * 4000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0424);
+ /* gain 0-127, may not be lower then 0x0424, , 0-20 -> 0-127 */
+ i = sd->contrast * 8000 / 1333;
+ cit_write_reg(gspca_dev, i, 0x0425);
+ break;
+ }
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ /* These models do not have this control. */
+ break;
+ case CIT_MODEL1:
+ {
+ /* Scale 0 - 20 to 15 - 0 */
+ int i, new_contrast = (20 - sd->contrast) * 1000 / 1333;
+ for (i = 0; i < cit_model1_ntries; i++) {
+ cit_Packet_Format1(gspca_dev, 0x0014, new_contrast);
+ cit_send_FF_04_02(gspca_dev);
+ }
+ break;
+ }
+ case CIT_MODEL3:
+ { /* Preset hardware values */
+ static const struct {
+ unsigned short cv1;
+ unsigned short cv2;
+ unsigned short cv3;
+ } cv[7] = {
+ { 0x05, 0x05, 0x0f }, /* Minimum */
+ { 0x04, 0x04, 0x16 },
+ { 0x02, 0x03, 0x16 },
+ { 0x02, 0x08, 0x16 },
+ { 0x01, 0x0c, 0x16 },
+ { 0x01, 0x0e, 0x16 },
+ { 0x01, 0x10, 0x16 } /* Maximum */
+ };
+ int i = sd->contrast / 3;
+ cit_model3_Packet1(gspca_dev, 0x0067, cv[i].cv1);
+ cit_model3_Packet1(gspca_dev, 0x005b, cv[i].cv2);
+ cit_model3_Packet1(gspca_dev, 0x005c, cv[i].cv3);
+ break;
+ }
+ case CIT_IBM_NETCAM_PRO:
+ cit_model3_Packet1(gspca_dev, 0x005b, sd->contrast + 1);
+ break;
+ }
+ return 0;
+}
+
+static int cit_set_hue(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ case CIT_IBM_NETCAM_PRO:
+ /* No hue control for these models */
+ break;
+ case CIT_MODEL2:
+ cit_model2_Packet1(gspca_dev, 0x0024, sd->hue);
+ /* cit_model2_Packet1(gspca_dev, 0x0020, sat); */
+ break;
+ case CIT_MODEL3: {
+ /* Model 3: Brightness range 'i' in [0x05..0x37] */
+ /* TESTME according to the ibmcam driver this does not work */
+ if (0) {
+ /* Scale 0 - 127 to 0x05 - 0x37 */
+ int i = 0x05 + sd->hue * 1000 / 2540;
+ cit_model3_Packet1(gspca_dev, 0x007e, i);
+ }
+ break;
+ }
+ case CIT_MODEL4:
+ /* HDG: taken from ibmcam, setting the color gains does not
+ * really belong here.
+ *
+ * I am not sure r/g/b_gain variables exactly control gain
+ * of those channels. Most likely they subtly change some
+ * very internal image processing settings in the camera.
+ * In any case, here is what they do, and feel free to tweak:
+ *
+ * r_gain: seriously affects red gain
+ * g_gain: seriously affects green gain
+ * b_gain: seriously affects blue gain
+ * hue: changes average color from violet (0) to red (0xFF)
+ */
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 160, 0x0127); /* Green gain */
+ cit_write_reg(gspca_dev, 160, 0x012e); /* Red gain */
+ cit_write_reg(gspca_dev, 160, 0x0130); /* Blue gain */
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, sd->hue, 0x012d); /* Hue */
+ cit_write_reg(gspca_dev, 0xf545, 0x0124);
+ break;
+ }
+ return 0;
+}
+
+static int cit_set_sharpness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ case CIT_IBM_NETCAM_PRO:
+ /* These models do not have this control */
+ break;
+ case CIT_MODEL1: {
+ int i;
+ const unsigned short sa[] = {
+ 0x11, 0x13, 0x16, 0x18, 0x1a, 0x8, 0x0a };
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_PacketFormat2(gspca_dev, 0x0013, sa[sd->sharpness]);
+ break;
+ }
+ case CIT_MODEL3:
+ { /*
+ * "Use a table of magic numbers.
+ * This setting doesn't really change much.
+ * But that's how Windows does it."
+ */
+ static const struct {
+ unsigned short sv1;
+ unsigned short sv2;
+ unsigned short sv3;
+ unsigned short sv4;
+ } sv[7] = {
+ { 0x00, 0x00, 0x05, 0x14 }, /* Smoothest */
+ { 0x01, 0x04, 0x05, 0x14 },
+ { 0x02, 0x04, 0x05, 0x14 },
+ { 0x03, 0x04, 0x05, 0x14 },
+ { 0x03, 0x05, 0x05, 0x14 },
+ { 0x03, 0x06, 0x05, 0x14 },
+ { 0x03, 0x07, 0x05, 0x14 } /* Sharpest */
+ };
+ cit_model3_Packet1(gspca_dev, 0x0060, sv[sd->sharpness].sv1);
+ cit_model3_Packet1(gspca_dev, 0x0061, sv[sd->sharpness].sv2);
+ cit_model3_Packet1(gspca_dev, 0x0062, sv[sd->sharpness].sv3);
+ cit_model3_Packet1(gspca_dev, 0x0063, sv[sd->sharpness].sv4);
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * cit_set_lighting()
+ *
+ * Camera model 1:
+ * We have 3 levels of lighting conditions: 0=Bright, 1=Medium, 2=Low.
+ *
+ * Camera model 2:
+ * We have 16 levels of lighting, 0 for bright light and up to 15 for
+ * low light. But values above 5 or so are useless because camera is
+ * not really capable to produce anything worth viewing at such light.
+ * This setting may be altered only in certain camera state.
+ *
+ * Low lighting forces slower FPS.
+ *
+ * History:
+ * 1/5/00 Created.
+ * 2/20/00 Added support for Model 2 cameras.
+ */
+static void cit_set_lighting(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL2:
+ case CIT_MODEL3:
+ case CIT_MODEL4:
+ case CIT_IBM_NETCAM_PRO:
+ break;
+ case CIT_MODEL1: {
+ int i;
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x0027, sd->lighting);
+ break;
+ }
+ }
+}
+
+static void cit_set_hflip(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ if (sd->hflip)
+ cit_write_reg(gspca_dev, 0x0020, 0x0115);
+ else
+ cit_write_reg(gspca_dev, 0x0040, 0x0115);
+ break;
+ case CIT_MODEL1:
+ case CIT_MODEL2:
+ case CIT_MODEL3:
+ case CIT_MODEL4:
+ case CIT_IBM_NETCAM_PRO:
+ break;
+ }
+}
+
+static int cit_restart_stream(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ case CIT_MODEL3:
+ case CIT_IBM_NETCAM_PRO:
+ cit_write_reg(gspca_dev, 0x0001, 0x0114);
+ /* Fall through */
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */
+ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe);
+ /* This happens repeatedly while streaming with the ibm netcam
+ pro and the ibmcam driver did it for model3 after changing
+ settings, but it does not seem to have any effect. */
+ /* cit_write_reg(gspca_dev, 0x0001, 0x0113); */
+ break;
+ }
+
+ sd->sof_read = 0;
+
+ return 0;
+}
+
+static int cit_get_packet_size(struct gspca_dev *gspca_dev)
+{
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ alt = usb_altnum_to_altsetting(intf, gspca_dev->alt);
+ if (!alt) {
+ err("Couldn't get altsetting");
+ return -EIO;
+ }
+
+ return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+}
+
+/* Calculate the clockdiv giving us max fps given the available bandwidth */
+static int cit_get_clock_div(struct gspca_dev *gspca_dev)
+{
+ int clock_div = 7; /* 0=30 1=25 2=20 3=15 4=12 5=7.5 6=6 7=3fps ?? */
+ int fps[8] = { 30, 25, 20, 15, 12, 8, 6, 3 };
+ int packet_size;
+
+ packet_size = cit_get_packet_size(gspca_dev);
+ if (packet_size < 0)
+ return packet_size;
+
+ while (clock_div > 3 &&
+ 1000 * packet_size >
+ gspca_dev->width * gspca_dev->height *
+ fps[clock_div - 1] * 3 / 2)
+ clock_div--;
+
+ PDEBUG(D_PROBE,
+ "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)",
+ packet_size, gspca_dev->width, gspca_dev->height, clock_div,
+ fps[clock_div]);
+
+ return clock_div;
+}
+
+static int cit_start_model0(struct gspca_dev *gspca_dev)
+{
+ const unsigned short compression = 0; /* 0=none, 7=best frame rate */
+ int clock_div;
+
+ clock_div = cit_get_clock_div(gspca_dev);
+ if (clock_div < 0)
+ return clock_div;
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */
+ cit_write_reg(gspca_dev, 0x0003, 0x0438);
+ cit_write_reg(gspca_dev, 0x001e, 0x042b);
+ cit_write_reg(gspca_dev, 0x0041, 0x042c);
+ cit_write_reg(gspca_dev, 0x0008, 0x0436);
+ cit_write_reg(gspca_dev, 0x0024, 0x0403);
+ cit_write_reg(gspca_dev, 0x002c, 0x0404);
+ cit_write_reg(gspca_dev, 0x0002, 0x0426);
+ cit_write_reg(gspca_dev, 0x0014, 0x0427);
+
+ switch (gspca_dev->width) {
+ case 160: /* 160x120 */
+ cit_write_reg(gspca_dev, 0x0004, 0x010b);
+ cit_write_reg(gspca_dev, 0x0001, 0x010a);
+ cit_write_reg(gspca_dev, 0x0010, 0x0102);
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0078, 0x0105);
+ break;
+
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x0006, 0x010b);
+ cit_write_reg(gspca_dev, 0x0000, 0x010a);
+ cit_write_reg(gspca_dev, 0x0005, 0x0102);
+ cit_write_reg(gspca_dev, 0x00b0, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0090, 0x0105);
+ break;
+
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0008, 0x010b);
+ cit_write_reg(gspca_dev, 0x0004, 0x010a);
+ cit_write_reg(gspca_dev, 0x0005, 0x0102);
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103);
+ cit_write_reg(gspca_dev, 0x0010, 0x0104);
+ cit_write_reg(gspca_dev, 0x0078, 0x0105);
+ break;
+ }
+
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, clock_div, 0x0111);
+
+ return 0;
+}
+
+static int cit_start_model1(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i, clock_div;
+
+ clock_div = cit_get_clock_div(gspca_dev);
+ if (clock_div < 0)
+ return clock_div;
+
+ cit_read_reg(gspca_dev, 0x0128);
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */
+ cit_write_reg(gspca_dev, 0x01, 0x0108);
+
+ cit_write_reg(gspca_dev, 0x03, 0x0112);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x06, 0x0115);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x44, 0x0116);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x40, 0x0116);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x0e, 0x0115);
+ cit_write_reg(gspca_dev, 0x19, 0x012c);
+
+ cit_Packet_Format1(gspca_dev, 0x00, 0x1e);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x0d);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x09);
+ cit_Packet_Format1(gspca_dev, 0x3b, 0x00);
+ cit_Packet_Format1(gspca_dev, 0x28, 0x22);
+ cit_Packet_Format1(gspca_dev, 0x27, 0x00);
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1f);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x08);
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x2c, 0x00);
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x30, 0x14);
+
+ cit_PacketFormat2(gspca_dev, 0x39, 0x02);
+ cit_PacketFormat2(gspca_dev, 0x01, 0xe1);
+ cit_PacketFormat2(gspca_dev, 0x02, 0xcd);
+ cit_PacketFormat2(gspca_dev, 0x03, 0xcd);
+ cit_PacketFormat2(gspca_dev, 0x04, 0xfa);
+ cit_PacketFormat2(gspca_dev, 0x3f, 0xff);
+ cit_PacketFormat2(gspca_dev, 0x39, 0x00);
+
+ cit_PacketFormat2(gspca_dev, 0x39, 0x02);
+ cit_PacketFormat2(gspca_dev, 0x0a, 0x37);
+ cit_PacketFormat2(gspca_dev, 0x0b, 0xb8);
+ cit_PacketFormat2(gspca_dev, 0x0c, 0xf3);
+ cit_PacketFormat2(gspca_dev, 0x0d, 0xe3);
+ cit_PacketFormat2(gspca_dev, 0x0e, 0x0d);
+ cit_PacketFormat2(gspca_dev, 0x0f, 0xf2);
+ cit_PacketFormat2(gspca_dev, 0x10, 0xd5);
+ cit_PacketFormat2(gspca_dev, 0x11, 0xba);
+ cit_PacketFormat2(gspca_dev, 0x12, 0x53);
+ cit_PacketFormat2(gspca_dev, 0x3f, 0xff);
+ cit_PacketFormat2(gspca_dev, 0x39, 0x00);
+
+ cit_PacketFormat2(gspca_dev, 0x39, 0x02);
+ cit_PacketFormat2(gspca_dev, 0x16, 0x00);
+ cit_PacketFormat2(gspca_dev, 0x17, 0x28);
+ cit_PacketFormat2(gspca_dev, 0x18, 0x7d);
+ cit_PacketFormat2(gspca_dev, 0x19, 0xbe);
+ cit_PacketFormat2(gspca_dev, 0x3f, 0xff);
+ cit_PacketFormat2(gspca_dev, 0x39, 0x00);
+
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x00, 0x18);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x13, 0x18);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x14, 0x06);
+
+ /* TESTME These are handled through controls
+ KEEP until someone can test leaving this out is ok */
+ if (0) {
+ /* This is default brightness */
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x31, 0x37);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x32, 0x46);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x33, 0x55);
+ }
+
+ cit_Packet_Format1(gspca_dev, 0x2e, 0x04);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x2d, 0x04);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x29, 0x80);
+ cit_Packet_Format1(gspca_dev, 0x2c, 0x01);
+ cit_Packet_Format1(gspca_dev, 0x30, 0x17);
+ cit_Packet_Format1(gspca_dev, 0x39, 0x08);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x34, 0x00);
+
+ cit_write_reg(gspca_dev, 0x00, 0x0101);
+ cit_write_reg(gspca_dev, 0x00, 0x010a);
+
+ switch (gspca_dev->width) {
+ case 128: /* 128x96 */
+ cit_write_reg(gspca_dev, 0x80, 0x0103);
+ cit_write_reg(gspca_dev, 0x60, 0x0105);
+ cit_write_reg(gspca_dev, 0x0c, 0x010b);
+ cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x0b, 0x011d);
+ cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x00, 0x0129);
+ break;
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0xb0, 0x0103);
+ cit_write_reg(gspca_dev, 0x8f, 0x0105);
+ cit_write_reg(gspca_dev, 0x06, 0x010b);
+ cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x0d, 0x011d);
+ cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x03, 0x0129);
+ break;
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0xb0, 0x0103);
+ cit_write_reg(gspca_dev, 0x90, 0x0105);
+ cit_write_reg(gspca_dev, 0x02, 0x010b);
+ cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x05, 0x011d);
+ cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x00, 0x0129);
+ break;
+ }
+
+ cit_write_reg(gspca_dev, 0xff, 0x012b);
+
+ /* TESTME These are handled through controls
+ KEEP until someone can test leaving this out is ok */
+ if (0) {
+ /* This is another brightness - don't know why */
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x31, 0xc3);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x32, 0xd2);
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x33, 0xe1);
+
+ /* Default contrast */
+ for (i = 0; i < cit_model1_ntries; i++)
+ cit_Packet_Format1(gspca_dev, 0x14, 0x0a);
+
+ /* Default sharpness */
+ for (i = 0; i < cit_model1_ntries2; i++)
+ cit_PacketFormat2(gspca_dev, 0x13, 0x1a);
+
+ /* Default lighting conditions */
+ cit_Packet_Format1(gspca_dev, 0x0027, sd->lighting);
+ }
+
+ /* Assorted init */
+ switch (gspca_dev->width) {
+ case 128: /* 128x96 */
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
+ cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x36, 0x0102);
+ cit_write_reg(gspca_dev, 0x1a, 0x0104);
+ cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x2b, 0x011c);
+ cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */
+ break;
+ case 176: /* 176x144 */
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
+ cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x04, 0x0102);
+ cit_write_reg(gspca_dev, 0x02, 0x0104);
+ cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x2b, 0x011c);
+ cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */
+ break;
+ case 352: /* 352x288 */
+ cit_Packet_Format1(gspca_dev, 0x2b, 0x1f);
+ cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x08, 0x0102);
+ cit_write_reg(gspca_dev, 0x01, 0x0104);
+ cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */
+ cit_write_reg(gspca_dev, 0x2f, 0x011c);
+ cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */
+ break;
+ }
+
+ cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */
+ cit_write_reg(gspca_dev, clock_div, 0x0111);
+
+ return 0;
+}
+
+static int cit_start_model2(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int clock_div = 0;
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0112);
+ cit_write_reg(gspca_dev, 0x00bc, 0x012c);
+ cit_write_reg(gspca_dev, 0x0008, 0x012b);
+ cit_write_reg(gspca_dev, 0x0000, 0x0108);
+ cit_write_reg(gspca_dev, 0x0001, 0x0133);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */
+ cit_write_reg(gspca_dev, 0x00b9, 0x010a); /* Unique to this mode */
+ cit_write_reg(gspca_dev, 0x0038, 0x0119); /* Unique to this mode */
+ /* TESTME HDG: this does not seem right
+ (it is 2 for all other resolutions) */
+ sd->sof_len = 10;
+ break;
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0028, 0x0103); /* Unique to this mode */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */
+ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */
+ sd->sof_len = 2;
+ break;
+ /* case VIDEOSIZE_352x240: */
+ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */
+ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */
+ sd->sof_len = 2;
+ break;
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */
+ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */
+ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */
+ sd->sof_len = 2;
+ break;
+ }
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */
+
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x0050, 0x0111);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ break;
+ case 320: /* 320x240 */
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0x0040, 0x0111);
+ cit_write_reg(gspca_dev, 0x00c0, 0x0111);
+ break;
+ }
+ cit_write_reg(gspca_dev, 0x009b, 0x010f);
+ cit_write_reg(gspca_dev, 0x00bb, 0x010f);
+
+ /*
+ * Hardware settings, may affect CMOS sensor; not user controls!
+ * -------------------------------------------------------------
+ * 0x0004: no effect
+ * 0x0006: hardware effect
+ * 0x0008: no effect
+ * 0x000a: stops video stream, probably important h/w setting
+ * 0x000c: changes color in hardware manner (not user setting)
+ * 0x0012: changes number of colors (does not affect speed)
+ * 0x002a: no effect
+ * 0x002c: hardware setting (related to scan lines)
+ * 0x002e: stops video stream, probably important h/w setting
+ */
+ cit_model2_Packet1(gspca_dev, 0x000a, 0x005c);
+ cit_model2_Packet1(gspca_dev, 0x0004, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x0006, 0x00fb);
+ cit_model2_Packet1(gspca_dev, 0x0008, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x000c, 0x0009);
+ cit_model2_Packet1(gspca_dev, 0x0012, 0x000a);
+ cit_model2_Packet1(gspca_dev, 0x002a, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x002c, 0x0000);
+ cit_model2_Packet1(gspca_dev, 0x002e, 0x0008);
+
+ /*
+ * Function 0x0030 pops up all over the place. Apparently
+ * it is a hardware control register, with every bit assigned to
+ * do something.
+ */
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0000);
+
+ /*
+ * Magic control of CMOS sensor. Only lower values like
+ * 0-3 work, and picture shifts left or right. Don't change.
+ */
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0002);
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */
+ clock_div = 6;
+ break;
+ case 320: /* 320x240 */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009);
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0005); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Another hardware setting */
+ clock_div = 8;
+ break;
+ /* case VIDEOSIZE_352x240: */
+ /* This mode doesn't work as Windows programs it; changed to work */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); /* Windows sets this to 8 */
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0003); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Windows sets this to 0x0045 */
+ clock_div = 10;
+ break;
+ case 352: /* 352x288 */
+ cit_model2_Packet1(gspca_dev, 0x0014, 0x0003);
+ cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
+ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */
+ clock_div = 16;
+ break;
+ }
+
+ /* TESTME These are handled through controls
+ KEEP until someone can test leaving this out is ok */
+ if (0)
+ cit_model2_Packet1(gspca_dev, 0x001a, 0x005a);
+
+ /*
+ * We have our own frame rate setting varying from 0 (slowest) to 6
+ * (fastest). The camera model 2 allows frame rate in range [0..0x1F]
+ # where 0 is also the slowest setting. However for all practical
+ # reasons high settings make no sense because USB is not fast enough
+ # to support high FPS. Be aware that the picture datastream will be
+ # severely disrupted if you ask for frame rate faster than allowed
+ # for the video size - see below:
+ *
+ * Allowable ranges (obtained experimentally on OHCI, K6-3, 450 MHz):
+ * -----------------------------------------------------------------
+ * 176x144: [6..31]
+ * 320x240: [8..31]
+ * 352x240: [10..31]
+ * 352x288: [16..31] I have to raise lower threshold for stability...
+ *
+ * As usual, slower FPS provides better sensitivity.
+ */
+ cit_model2_Packet1(gspca_dev, 0x001c, clock_div);
+
+ /*
+ * This setting does not visibly affect pictures; left it here
+ * because it was present in Windows USB data stream. This function
+ * does not allow arbitrary values and apparently is a bit mask, to
+ * be activated only at appropriate time. Don't change it randomly!
+ */
+ switch (gspca_dev->width) {
+ case 176: /* 176x144 */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2);
+ break;
+ case 320: /* 320x240 */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x0044);
+ break;
+ /* case VIDEOSIZE_352x240: */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x0046);
+ break;
+ case 352: /* 352x288 */
+ cit_model2_Packet1(gspca_dev, 0x0026, 0x0048);
+ break;
+ }
+
+ /* FIXME this cannot be changed while streaming, so we
+ should report a grabbed flag for this control. */
+ cit_model2_Packet1(gspca_dev, 0x0028, sd->lighting);
+ /* color balance rg2 */
+ cit_model2_Packet1(gspca_dev, 0x001e, 0x002f);
+ /* saturation */
+ cit_model2_Packet1(gspca_dev, 0x0020, 0x0034);
+ /* color balance yb */
+ cit_model2_Packet1(gspca_dev, 0x0022, 0x00a0);
+
+ /* Hardware control command */
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0004);
+
+ return 0;
+}
+
+static int cit_start_model3(struct gspca_dev *gspca_dev)
+{
+ const unsigned short compression = 0; /* 0=none, 7=best frame rate */
+ int i, clock_div = 0;
+
+ /* HDG not in ibmcam driver, added to see if it helps with
+ auto-detecting between model3 and ibm netcamera pro */
+ cit_read_reg(gspca_dev, 0x128);
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0112);
+ cit_write_reg(gspca_dev, 0x0000, 0x0123);
+ cit_write_reg(gspca_dev, 0x0001, 0x0117);
+ cit_write_reg(gspca_dev, 0x0040, 0x0108);
+ cit_write_reg(gspca_dev, 0x0019, 0x012c);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ cit_write_reg(gspca_dev, 0x0002, 0x0115);
+ cit_write_reg(gspca_dev, 0x0003, 0x0115);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x000b, 0x0115);
+
+ /* TESTME HDG not in ibmcam driver, added to see if it helps with
+ auto-detecting between model3 and ibm netcamera pro */
+ if (0) {
+ cit_write_reg(gspca_dev, 0x0078, 0x012d);
+ cit_write_reg(gspca_dev, 0x0001, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0079, 0x012d);
+ cit_write_reg(gspca_dev, 0x00ff, 0x0130);
+ cit_write_reg(gspca_dev, 0xcd41, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_read_reg(gspca_dev, 0x0126);
+ }
+
+ cit_model3_Packet1(gspca_dev, 0x000a, 0x0040);
+ cit_model3_Packet1(gspca_dev, 0x000b, 0x00f6);
+ cit_model3_Packet1(gspca_dev, 0x000c, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x000d, 0x0020);
+ cit_model3_Packet1(gspca_dev, 0x000e, 0x0033);
+ cit_model3_Packet1(gspca_dev, 0x000f, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x0010, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0011, 0x0070);
+ cit_model3_Packet1(gspca_dev, 0x0012, 0x0030);
+ cit_model3_Packet1(gspca_dev, 0x0013, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0014, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0015, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0016, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0017, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0018, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x001e, 0x00c3);
+ cit_model3_Packet1(gspca_dev, 0x0020, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0028, 0x0010);
+ cit_model3_Packet1(gspca_dev, 0x0029, 0x0054);
+ cit_model3_Packet1(gspca_dev, 0x002a, 0x0013);
+ cit_model3_Packet1(gspca_dev, 0x002b, 0x0007);
+ cit_model3_Packet1(gspca_dev, 0x002d, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x002e, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0031, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0032, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0033, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0034, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0035, 0x0038);
+ cit_model3_Packet1(gspca_dev, 0x003a, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x003c, 0x001e);
+ cit_model3_Packet1(gspca_dev, 0x003f, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0041, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0046, 0x003f);
+ cit_model3_Packet1(gspca_dev, 0x0047, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0050, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x0052, 0x001a);
+ cit_model3_Packet1(gspca_dev, 0x0053, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x005a, 0x006b);
+ cit_model3_Packet1(gspca_dev, 0x005d, 0x001e);
+ cit_model3_Packet1(gspca_dev, 0x005e, 0x0030);
+ cit_model3_Packet1(gspca_dev, 0x005f, 0x0041);
+ cit_model3_Packet1(gspca_dev, 0x0064, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0065, 0x0015);
+ cit_model3_Packet1(gspca_dev, 0x0068, 0x000f);
+ cit_model3_Packet1(gspca_dev, 0x0079, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x007a, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x007c, 0x003f);
+ cit_model3_Packet1(gspca_dev, 0x0082, 0x000f);
+ cit_model3_Packet1(gspca_dev, 0x0085, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0099, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x009b, 0x0023);
+ cit_model3_Packet1(gspca_dev, 0x009c, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x009d, 0x0096);
+ cit_model3_Packet1(gspca_dev, 0x009e, 0x0096);
+ cit_model3_Packet1(gspca_dev, 0x009f, 0x000a);
+
+ switch (gspca_dev->width) {
+ case 160:
+ cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0024, 0x010b); /* Differs everywhere */
+ cit_write_reg(gspca_dev, 0x00a9, 0x0119);
+ cit_write_reg(gspca_dev, 0x0016, 0x011b);
+ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ cit_write_reg(gspca_dev, 0x0018, 0x0102);
+ cit_write_reg(gspca_dev, 0x0004, 0x0104);
+ cit_write_reg(gspca_dev, 0x0004, 0x011a);
+ cit_write_reg(gspca_dev, 0x0028, 0x011c);
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+ cit_write_reg(gspca_dev, 0x0000, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ clock_div = 3;
+ break;
+ case 320:
+ cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0028, 0x010b); /* Differs everywhere */
+ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same */
+ cit_write_reg(gspca_dev, 0x0000, 0x011e);
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ /* 4 commands from 160x120 skipped */
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, 0x00d9, 0x0119);
+ cit_write_reg(gspca_dev, 0x0006, 0x011b);
+ cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0010, 0x0104);
+ cit_write_reg(gspca_dev, 0x0004, 0x011a);
+ cit_write_reg(gspca_dev, 0x003f, 0x011c);
+ cit_write_reg(gspca_dev, 0x001c, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ clock_div = 5;
+ break;
+ case 640:
+ cit_write_reg(gspca_dev, 0x00f0, 0x0105);
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0038, 0x010b); /* Differs everywhere */
+ cit_write_reg(gspca_dev, 0x00d9, 0x0119); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0006, 0x011b); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0004, 0x011d); /* NC */
+ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0016, 0x0104); /* NC */
+ cit_write_reg(gspca_dev, 0x0004, 0x011a); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x003f, 0x011c); /* Same on 320x240, 640x480 */
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+ cit_write_reg(gspca_dev, 0x001c, 0x0118); /* Same on 320x240, 640x480 */
+ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, 0x0040, 0x0101);
+ cit_write_reg(gspca_dev, 0x0040, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132); /* Same on 320x240, 640x480 */
+ clock_div = 7;
+ break;
+ }
+
+ cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); /* Hue */
+ cit_model3_Packet1(gspca_dev, 0x0036, 0x0011); /* Brightness */
+ cit_model3_Packet1(gspca_dev, 0x0060, 0x0002); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0061, 0x0004); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0063, 0x0014); /* Sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0067, 0x0001); /* Contrast */
+ cit_model3_Packet1(gspca_dev, 0x005b, 0x000c); /* Contrast */
+ cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); /* Contrast */
+ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x002c, 0x0003); /* Was 1, broke 640x480 */
+ cit_model3_Packet1(gspca_dev, 0x002f, 0x002a);
+ cit_model3_Packet1(gspca_dev, 0x0030, 0x0029);
+ cit_model3_Packet1(gspca_dev, 0x0037, 0x0002);
+ cit_model3_Packet1(gspca_dev, 0x0038, 0x0059);
+ cit_model3_Packet1(gspca_dev, 0x003d, 0x002e);
+ cit_model3_Packet1(gspca_dev, 0x003e, 0x0028);
+ cit_model3_Packet1(gspca_dev, 0x0078, 0x0005);
+ cit_model3_Packet1(gspca_dev, 0x007b, 0x0011);
+ cit_model3_Packet1(gspca_dev, 0x007d, 0x004b);
+ cit_model3_Packet1(gspca_dev, 0x007f, 0x0022);
+ cit_model3_Packet1(gspca_dev, 0x0080, 0x000c);
+ cit_model3_Packet1(gspca_dev, 0x0081, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x0083, 0x00fd);
+ cit_model3_Packet1(gspca_dev, 0x0086, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x0087, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x007e, 0x000e);
+ cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */
+ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b);
+
+ /* FIXME we should probably use cit_get_clock_div() here (in
+ combination with isoc negotiation using the programmable isoc size)
+ like with the IBM netcam pro). */
+ cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */
+
+ switch (gspca_dev->width) {
+ case 160:
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x000a);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x000a);
+ break;
+ case 320:
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x000b);
+ break;
+ case 640:
+ cit_model3_Packet1(gspca_dev, 0x001f, 0x0002); /* !Same */
+ cit_model3_Packet1(gspca_dev, 0x0039, 0x003e); /* !Same */
+ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008);
+ cit_model3_Packet1(gspca_dev, 0x0051, 0x000a);
+ break;
+ }
+
+/* if (sd->input_index) { */
+ if (rca_input) {
+ for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) {
+ if (rca_initdata[i][0])
+ cit_read_reg(gspca_dev, rca_initdata[i][2]);
+ else
+ cit_write_reg(gspca_dev, rca_initdata[i][1],
+ rca_initdata[i][2]);
+ }
+ }
+
+ return 0;
+}
+
+static int cit_start_model4(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_write_reg(gspca_dev, 0x00c0, 0x0111);
+ cit_write_reg(gspca_dev, 0x00bc, 0x012c);
+ cit_write_reg(gspca_dev, 0x0080, 0x012b);
+ cit_write_reg(gspca_dev, 0x0000, 0x0108);
+ cit_write_reg(gspca_dev, 0x0001, 0x0133);
+ cit_write_reg(gspca_dev, 0x009b, 0x010f);
+ cit_write_reg(gspca_dev, 0x00bb, 0x010f);
+ cit_model4_Packet1(gspca_dev, 0x0038, 0x0000);
+ cit_model4_Packet1(gspca_dev, 0x000a, 0x005c);
+
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0004, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00fb, 0x012e);
+ cit_write_reg(gspca_dev, 0x0000, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x000c, 0x0127);
+ cit_write_reg(gspca_dev, 0x0009, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0012, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0008, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x002a, 0x012d);
+ cit_write_reg(gspca_dev, 0x0000, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_model4_Packet1(gspca_dev, 0x0034, 0x0000);
+
+ switch (gspca_dev->width) {
+ case 128: /* 128x96 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x0039, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x0028, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x001e, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x000a, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005a, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0043, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00eb, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x0031, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x012d);
+ cit_write_reg(gspca_dev, 0x0078, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ case 160: /* 160x120 */
+ cit_write_reg(gspca_dev, 0x0038, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x00b9, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x0028, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x001e, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x000b, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005a, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0043, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00c7, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0025, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0048, 0x0127);
+ cit_write_reg(gspca_dev, 0x0035, 0x012e);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0048, 0x012d);
+ cit_write_reg(gspca_dev, 0x0090, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ case 176: /* 176x144 */
+ cit_write_reg(gspca_dev, 0x0038, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x00b9, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x002c, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0024, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0007, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0001, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005e, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0049, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00c7, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0028, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x002a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x012d);
+ cit_write_reg(gspca_dev, 0x006d, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0001, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ /* TESTME HDG: this does not seem right
+ (it is 2 for all other resolutions) */
+ sd->sof_len = 10;
+ break;
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119);
+ cit_write_reg(gspca_dev, 0x00d0, 0x0111);
+ cit_write_reg(gspca_dev, 0x0039, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x0028, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x001e, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x000a, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005a, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0043, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00eb, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x0031, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0017, 0x012d);
+ cit_write_reg(gspca_dev, 0x0078, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ case 352: /* 352x288 */
+ cit_write_reg(gspca_dev, 0x0070, 0x0119);
+ cit_write_reg(gspca_dev, 0x00c0, 0x0111);
+ cit_write_reg(gspca_dev, 0x0039, 0x010a);
+ cit_write_reg(gspca_dev, 0x0001, 0x0102);
+ cit_write_reg(gspca_dev, 0x002c, 0x0103);
+ cit_write_reg(gspca_dev, 0x0000, 0x0104);
+ cit_write_reg(gspca_dev, 0x0024, 0x0105);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0016, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0006, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0014, 0x012d);
+ cit_write_reg(gspca_dev, 0x0002, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012e);
+ cit_write_reg(gspca_dev, 0x001a, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a0a, 0x0124);
+ cit_write_reg(gspca_dev, 0x005e, 0x012d);
+ cit_write_reg(gspca_dev, 0x9545, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0127);
+ cit_write_reg(gspca_dev, 0x0018, 0x012e);
+ cit_write_reg(gspca_dev, 0x0049, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012f);
+ cit_write_reg(gspca_dev, 0xd055, 0x0124);
+ cit_write_reg(gspca_dev, 0x001c, 0x0127);
+ cit_write_reg(gspca_dev, 0x00cf, 0x012e);
+ cit_write_reg(gspca_dev, 0xaa28, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x0032, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0x00aa, 0x0130);
+ cit_write_reg(gspca_dev, 0x82a8, 0x0124);
+ cit_write_reg(gspca_dev, 0x0036, 0x012d);
+ cit_write_reg(gspca_dev, 0x0008, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0xfffa, 0x0124);
+ cit_write_reg(gspca_dev, 0x00aa, 0x012d);
+ cit_write_reg(gspca_dev, 0x001e, 0x012f);
+ cit_write_reg(gspca_dev, 0xd141, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x0127);
+ cit_write_reg(gspca_dev, 0x0013, 0x012e);
+ cit_write_reg(gspca_dev, 0x0025, 0x0130);
+ cit_write_reg(gspca_dev, 0x8a28, 0x0124);
+ cit_write_reg(gspca_dev, 0x0010, 0x012d);
+ cit_write_reg(gspca_dev, 0x0048, 0x012f);
+ cit_write_reg(gspca_dev, 0xd145, 0x0124);
+ cit_write_reg(gspca_dev, 0x0000, 0x0127);
+ cit_write_reg(gspca_dev, 0xfea8, 0x0124);
+ sd->sof_len = 2;
+ break;
+ }
+
+ cit_model4_Packet1(gspca_dev, 0x0038, 0x0004);
+
+ return 0;
+}
+
+static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev)
+{
+ const unsigned short compression = 0; /* 0=none, 7=best frame rate */
+ int i, clock_div;
+
+ clock_div = cit_get_clock_div(gspca_dev);
+ if (clock_div < 0)
+ return clock_div;
+
+ cit_write_reg(gspca_dev, 0x0003, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0100);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ /* cit_write_reg(gspca_dev, 0x0002, 0x0112); see sd_stop0 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0123);
+ cit_write_reg(gspca_dev, 0x0001, 0x0117);
+ cit_write_reg(gspca_dev, 0x0040, 0x0108);
+ cit_write_reg(gspca_dev, 0x0019, 0x012c);
+ cit_write_reg(gspca_dev, 0x0060, 0x0116);
+ /* cit_write_reg(gspca_dev, 0x000b, 0x0115); see sd_stop0 */
+
+ cit_model3_Packet1(gspca_dev, 0x0049, 0x0000);
+
+ cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x003a, 0x0102); /* Hstart */
+ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */
+ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */
+ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */
+ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
+ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
+
+ switch (gspca_dev->width) {
+ case 160: /* 160x120 */
+ cit_write_reg(gspca_dev, 0x0024, 0x010b);
+ cit_write_reg(gspca_dev, 0x0089, 0x0119);
+ cit_write_reg(gspca_dev, 0x000a, 0x011b);
+ cit_write_reg(gspca_dev, 0x0003, 0x011e);
+ cit_write_reg(gspca_dev, 0x0007, 0x0104);
+ cit_write_reg(gspca_dev, 0x0009, 0x011a);
+ cit_write_reg(gspca_dev, 0x008b, 0x011c);
+ cit_write_reg(gspca_dev, 0x0008, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ break;
+ case 320: /* 320x240 */
+ cit_write_reg(gspca_dev, 0x0028, 0x010b);
+ cit_write_reg(gspca_dev, 0x00d9, 0x0119);
+ cit_write_reg(gspca_dev, 0x0006, 0x011b);
+ cit_write_reg(gspca_dev, 0x0000, 0x011e);
+ cit_write_reg(gspca_dev, 0x000e, 0x0104);
+ cit_write_reg(gspca_dev, 0x0004, 0x011a);
+ cit_write_reg(gspca_dev, 0x003f, 0x011c);
+ cit_write_reg(gspca_dev, 0x000c, 0x0118);
+ cit_write_reg(gspca_dev, 0x0000, 0x0132);
+ break;
+ }
+
+ cit_model3_Packet1(gspca_dev, 0x0019, 0x0031);
+ cit_model3_Packet1(gspca_dev, 0x001a, 0x0003);
+ cit_model3_Packet1(gspca_dev, 0x001b, 0x0038);
+ cit_model3_Packet1(gspca_dev, 0x001c, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0024, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0027, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x002a, 0x0004);
+ cit_model3_Packet1(gspca_dev, 0x0035, 0x000b);
+ cit_model3_Packet1(gspca_dev, 0x003f, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x0044, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x0054, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00c4, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00e7, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00e9, 0x0001);
+ cit_model3_Packet1(gspca_dev, 0x00ee, 0x0000);
+ cit_model3_Packet1(gspca_dev, 0x00f3, 0x00c0);
+
+ cit_write_reg(gspca_dev, compression, 0x0109);
+ cit_write_reg(gspca_dev, clock_div, 0x0111);
+
+/* if (sd->input_index) { */
+ if (rca_input) {
+ for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) {
+ if (rca_initdata[i][0])
+ cit_read_reg(gspca_dev, rca_initdata[i][2]);
+ else
+ cit_write_reg(gspca_dev, rca_initdata[i][1],
+ rca_initdata[i][2]);
+ }
+ }
+
+ return 0;
+}
+
+/* -- start the camera -- */
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int packet_size;
+
+ packet_size = cit_get_packet_size(gspca_dev);
+ if (packet_size < 0)
+ return packet_size;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ cit_start_model0(gspca_dev);
+ break;
+ case CIT_MODEL1:
+ cit_start_model1(gspca_dev);
+ break;
+ case CIT_MODEL2:
+ cit_start_model2(gspca_dev);
+ break;
+ case CIT_MODEL3:
+ cit_start_model3(gspca_dev);
+ break;
+ case CIT_MODEL4:
+ cit_start_model4(gspca_dev);
+ break;
+ case CIT_IBM_NETCAM_PRO:
+ cit_start_ibm_netcam_pro(gspca_dev);
+ break;
+ }
+
+ cit_set_brightness(gspca_dev);
+ cit_set_contrast(gspca_dev);
+ cit_set_hue(gspca_dev);
+ cit_set_sharpness(gspca_dev);
+ cit_set_lighting(gspca_dev);
+ cit_set_hflip(gspca_dev);
+
+ /* Program max isoc packet size */
+ cit_write_reg(gspca_dev, packet_size >> 8, 0x0106);
+ cit_write_reg(gspca_dev, packet_size & 0xff, 0x0107);
+
+ cit_restart_stream(gspca_dev);
+
+ return 0;
+}
+
+static int sd_isoc_nego(struct gspca_dev *gspca_dev)
+{
+ int ret, packet_size;
+ struct usb_host_interface *alt;
+
+ alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ packet_size -= 100;
+ if (packet_size < 300)
+ return -EIO;
+ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size);
+
+ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
+ if (ret < 0)
+ err("set alt 1 err %d", ret);
+
+ return ret;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ cit_write_reg(gspca_dev, 0x0000, 0x010c);
+}
+
+static void sd_stop0(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct usb_host_interface *alt;
+
+ /* We cannot use gspca_dev->present here as that is not set when
+ sd_init gets called and we get called from sd_init */
+ if (!gspca_dev->dev)
+ return;
+
+ alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ /* HDG windows does this, but it causes the cams autogain to
+ restart from a gain of 0, which does not look good when
+ changing resolutions. */
+ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */
+ cit_write_reg(gspca_dev, 0x00c0, 0x0100); /* LED Off */
+ break;
+ case CIT_MODEL1:
+ cit_send_FF_04_02(gspca_dev);
+ cit_read_reg(gspca_dev, 0x0100);
+ cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */
+ break;
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0004);
+
+ cit_write_reg(gspca_dev, 0x0080, 0x0100); /* LED Off */
+ cit_write_reg(gspca_dev, 0x0020, 0x0111);
+ cit_write_reg(gspca_dev, 0x00a0, 0x0111);
+
+ cit_model2_Packet1(gspca_dev, 0x0030, 0x0002);
+
+ cit_write_reg(gspca_dev, 0x0020, 0x0111);
+ cit_write_reg(gspca_dev, 0x0000, 0x0112);
+ break;
+ case CIT_MODEL3:
+ cit_write_reg(gspca_dev, 0x0006, 0x012c);
+ cit_model3_Packet1(gspca_dev, 0x0046, 0x0000);
+ cit_read_reg(gspca_dev, 0x0116);
+ cit_write_reg(gspca_dev, 0x0064, 0x0116);
+ cit_read_reg(gspca_dev, 0x0115);
+ cit_write_reg(gspca_dev, 0x0003, 0x0115);
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0000, 0x0112);
+ cit_write_reg(gspca_dev, 0x0080, 0x0100);
+ break;
+ case CIT_IBM_NETCAM_PRO:
+ cit_model3_Packet1(gspca_dev, 0x0049, 0x00ff);
+ cit_write_reg(gspca_dev, 0x0006, 0x012c);
+ cit_write_reg(gspca_dev, 0x0000, 0x0116);
+ /* HDG windows does this, but I cannot get the camera
+ to restart with this without redoing the entire init
+ sequence which makes switching modes really slow */
+ /* cit_write_reg(gspca_dev, 0x0006, 0x0115); */
+ cit_write_reg(gspca_dev, 0x0008, 0x0123);
+ cit_write_reg(gspca_dev, 0x0000, 0x0117);
+ cit_write_reg(gspca_dev, 0x0003, 0x0133);
+ cit_write_reg(gspca_dev, 0x0000, 0x0111);
+ /* HDG windows does this, but I get a green picture when
+ restarting the stream after this */
+ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */
+ cit_write_reg(gspca_dev, 0x00c0, 0x0100);
+
+ /* Start isoc bandwidth "negotiation" at max isoc bandwith
+ next stream start */
+ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(1022);
+ break;
+ }
+}
+
+static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u8 byte3 = 0, byte4 = 0;
+ int i;
+
+ switch (sd->model) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ case CIT_MODEL3:
+ case CIT_IBM_NETCAM_PRO:
+ switch (gspca_dev->width) {
+ case 160: /* 160x120 */
+ byte3 = 0x02;
+ byte4 = 0x0a;
+ break;
+ case 176: /* 176x144 */
+ byte3 = 0x02;
+ byte4 = 0x0e;
+ break;
+ case 320: /* 320x240 */
+ byte3 = 0x02;
+ byte4 = 0x08;
+ break;
+ case 352: /* 352x288 */
+ byte3 = 0x02;
+ byte4 = 0x00;
+ break;
+ case 640:
+ byte3 = 0x03;
+ byte4 = 0x08;
+ break;
+ }
+
+ /* These have a different byte3 */
+ if (sd->model <= CIT_MODEL1)
+ byte3 = 0x00;
+
+ for (i = 0; i < len; i++) {
+ /* For this model the SOF always starts at offset 0
+ so no need to search the entire frame */
+ if (sd->model == CIT_MODEL0 && sd->sof_read != i)
+ break;
+
+ switch (sd->sof_read) {
+ case 0:
+ if (data[i] == 0x00)
+ sd->sof_read++;
+ break;
+ case 1:
+ if (data[i] == 0xff)
+ sd->sof_read++;
+ else if (data[i] == 0x00)
+ sd->sof_read = 1;
+ else
+ sd->sof_read = 0;
+ break;
+ case 2:
+ if (data[i] == byte3)
+ sd->sof_read++;
+ else if (data[i] == 0x00)
+ sd->sof_read = 1;
+ else
+ sd->sof_read = 0;
+ break;
+ case 3:
+ if (data[i] == byte4) {
+ sd->sof_read = 0;
+ return data + i + (sd->sof_len - 3);
+ }
+ if (byte3 == 0x00 && data[i] == 0xff)
+ sd->sof_read = 2;
+ else if (data[i] == 0x00)
+ sd->sof_read = 1;
+ else
+ sd->sof_read = 0;
+ break;
+ }
+ }
+ break;
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ /* TESTME we need to find a longer sof signature to avoid
+ false positives */
+ for (i = 0; i < len; i++) {
+ switch (sd->sof_read) {
+ case 0:
+ if (data[i] == 0x00)
+ sd->sof_read++;
+ break;
+ case 1:
+ sd->sof_read = 0;
+ if (data[i] == 0xff) {
+ if (i >= 4)
+ PDEBUG(D_FRAM,
+ "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n",
+ i - 1,
+ data[i - 4],
+ data[i - 3],
+ data[i],
+ data[i + 1],
+ data[i + 2]);
+ else
+ PDEBUG(D_FRAM,
+ "header found at offset: %d: 00 %02x %02x %02x\n",
+ i - 1,
+ data[i],
+ data[i + 1],
+ data[i + 2]);
+ return data + i + (sd->sof_len - 1);
+ }
+ break;
+ }
+ }
+ break;
+ }
+ return NULL;
+}
+
+static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data, int len)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ unsigned char *sof;
+
+ sof = cit_find_sof(gspca_dev, data, len);
+ if (sof) {
+ int n;
+
+ /* finish decoding current frame */
+ n = sof - data;
+ if (n > sd->sof_len)
+ n -= sd->sof_len;
+ else
+ n = 0;
+ gspca_frame_add(gspca_dev, LAST_PACKET,
+ data, n);
+ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
+ len -= sof - data;
+ data = sof;
+ }
+
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
+}
+
+static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->brightness = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_brightness(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->brightness;
+
+ return 0;
+}
+
+static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->contrast = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_contrast(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+
+ return 0;
+}
+
+static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->contrast;
+
+ return 0;
+}
+
+static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->hue = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_hue(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->hue;
+
+ return 0;
+}
+
+static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->sharpness = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_sharpness(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->sharpness;
+
+ return 0;
+}
+
+static int sd_setlighting(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->lighting = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_lighting(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_getlighting(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->lighting;
+
+ return 0;
+}
+
+static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->hflip = val;
+ if (gspca_dev->streaming) {
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ cit_set_hflip(gspca_dev);
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ }
+ return 0;
+}
+
+static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->hflip;
+
+ return 0;
+}
+
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .stopN = sd_stopN,
+ .stop0 = sd_stop0,
+ .pkt_scan = sd_pkt_scan,
+};
+
+static const struct sd_desc sd_desc_isoc_nego = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .isoc_nego = sd_isoc_nego,
+ .stopN = sd_stopN,
+ .stop0 = sd_stop0,
+ .pkt_scan = sd_pkt_scan,
+};
+
+/* -- module initialisation -- */
+static const __devinitdata struct usb_device_id device_table[] = {
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 },
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 },
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
+ { USB_DEVICE_VER(0x0545, 0x8080, 0x0301, 0x0301), .driver_info = CIT_MODEL3 },
+ { USB_DEVICE_VER(0x0545, 0x8002, 0x030a, 0x030a), .driver_info = CIT_MODEL4 },
+ { USB_DEVICE_VER(0x0545, 0x800c, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
+ { USB_DEVICE_VER(0x0545, 0x800d, 0x030a, 0x030a), .driver_info = CIT_MODEL4 },
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ const struct sd_desc *desc = &sd_desc;
+
+ switch (id->driver_info) {
+ case CIT_MODEL0:
+ case CIT_MODEL1:
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 2)
+ return -ENODEV;
+ break;
+ case CIT_MODEL2:
+ case CIT_MODEL4:
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+ break;
+ case CIT_MODEL3:
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+ /* FIXME this likely applies to all model3 cams and probably
+ to other models too. */
+ if (ibm_netcam_pro)
+ desc = &sd_desc_isoc_nego;
+ break;
+ }
+
+ return gspca_dev_probe2(intf, id, desc, sizeof(struct sd), THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ return usb_register(&sd_driver);
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 0666038a51b0..c7e1970ca284 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -21,9 +21,7 @@
#define MODULE_NAME "zc3xx"
-#ifdef CONFIG_INPUT
#include <linux/input.h>
-#endif
#include "gspca.h"
#include "jpeg.h"
@@ -2953,7 +2951,7 @@ static const struct usb_action mc501cb_Initial[] = {
{}
};
-static const struct usb_action mc501cb_InitialScale[] = { /* 320x240 */
+static const struct usb_action mc501cb_InitialScale[] = { /* 320x240 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc */
{0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc */
{0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc */
@@ -3731,7 +3729,6 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */
{0xaa, 0x0d, 0x0000},
{0xaa, 0x0e, 0x0002},
{0xaa, 0x14, 0x0081},
-
/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
/* Frame retreiving */
@@ -3785,7 +3782,6 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */
{0xa0, 0x05, ZC3XX_R185_WINYWIDTH},
{0xa0, 0x14, ZC3XX_R186_WINYCENTER},
{0xa0, 0x00, ZC3XX_R180_AUTOCORRECTENABLE},
-
/* Auto exposure and white balance */
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH},
{0xa0, 0x03, ZC3XX_R191_EXPOSURELIMITMID},
@@ -3849,7 +3845,6 @@ static const struct usb_action pas106b_Initial[] = { /* 352x288 */
{0xaa, 0x0d, 0x0000},
{0xaa, 0x0e, 0x0002},
{0xaa, 0x14, 0x0081},
-
/* Other registers */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
/* Frame retreiving */
@@ -5698,7 +5693,7 @@ static u8 reg_r_i(struct gspca_dev *gspca_dev,
index, gspca_dev->usb_buf, 1,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_r_i err %d", ret);
+ err("reg_r_i err %d", ret);
gspca_dev->usb_err = ret;
return 0;
}
@@ -5730,7 +5725,7 @@ static void reg_w_i(struct gspca_dev *gspca_dev,
value, index, NULL, 0,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_w_i err %d", ret);
+ err("reg_w_i err %d", ret);
gspca_dev->usb_err = ret;
}
}
@@ -6309,8 +6304,7 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
if (chipset_revision_sensor[i].revision == retword) {
sd->chip_revision = retword;
send_unknown(gspca_dev, SENSOR_PB0330);
- return chipset_revision_sensor[i]
- .internal_sensor_id;
+ return chipset_revision_sensor[i].internal_sensor_id;
}
}
@@ -6503,8 +6497,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "Sensor Tas5130 (VF0250)");
break;
default:
- PDEBUG(D_PROBE,
- "Unknown sensor - set to TAS5130C");
+ warn("Unknown sensor - set to TAS5130C");
sd->sensor = SENSOR_TAS5130C;
}
break;
@@ -6610,7 +6603,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->sensor = SENSOR_OV7620; /* same sensor (?) */
break;
default:
- PDEBUG(D_ERR|D_PROBE, "Unknown sensor %04x", sensor);
+ err("Unknown sensor %04x", sensor);
return -EINVAL;
}
}
@@ -6790,7 +6783,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* fall thru */
case SENSOR_PAS202B:
case SENSOR_PO2030:
-/* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); * (from win traces) */
+/* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); in win traces */
reg_r(gspca_dev, 0x0180);
break;
case SENSOR_OV7620:
@@ -6798,7 +6791,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x15, 0x01ae);
i2c_read(gspca_dev, 0x13); /*fixme: returns 0xa3 */
i2c_write(gspca_dev, 0x13, 0xa3, 0x00);
- /*fixme: returned value to send? */
+ /*fixme: returned value to send? */
reg_w(gspca_dev, 0x40, 0x0117);
reg_r(gspca_dev, 0x0180);
break;
@@ -6841,7 +6834,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* remove the webcam's header:
* ff d8 ff fe 00 0e 00 00 ss ss 00 01 ww ww hh hh pp pp
* - 'ss ss' is the frame sequence number (BE)
- * - 'ww ww' and 'hh hh' are the window dimensions (BE)
+ * - 'ww ww' and 'hh hh' are the window dimensions (BE)
* - 'pp pp' is the packet sequence number (BE)
*/
data += 18;
@@ -7007,7 +7000,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
return 0;
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
int len) /* interrput packet length */
@@ -7035,7 +7028,7 @@ static const struct sd_desc sd_desc = {
.querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
};
@@ -7120,18 +7113,12 @@ static struct usb_driver sd_driver = {
static int __init sd_mod_init(void)
{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
+ return usb_register(&sd_driver);
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
diff --git a/drivers/media/video/hdpvr/hdpvr-control.c b/drivers/media/video/hdpvr/hdpvr-control.c
index 5a6b78b8d25d..068df4ba3f51 100644
--- a/drivers/media/video/hdpvr/hdpvr-control.c
+++ b/drivers/media/video/hdpvr/hdpvr-control.c
@@ -29,8 +29,6 @@ int hdpvr_config_call(struct hdpvr_device *dev, uint value, u8 valbuf)
int ret;
char request_type = 0x38, snd_request = 0x01;
- msleep(10);
-
mutex_lock(&dev->usbc_mutex);
dev->usbc_buf[0] = valbuf;
ret = usb_control_msg(dev->udev,
@@ -170,8 +168,7 @@ int hdpvr_set_audio(struct hdpvr_device *dev, u8 input,
if (ret == 2)
ret = 0;
} else
- ret = hdpvr_config_call(dev, CTRL_AUDIO_INPUT_VALUE,
- dev->options.audio_input+1);
+ ret = hdpvr_config_call(dev, CTRL_AUDIO_INPUT_VALUE, input);
error:
return ret;
}
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 0cae5b82e1a2..b70d6afc9fec 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -60,6 +60,7 @@ static struct usb_device_id hdpvr_table[] = {
{ USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID1) },
{ USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID2) },
{ USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID3) },
+ { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID4) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, hdpvr_table);
@@ -152,19 +153,26 @@ static int device_authorization(struct hdpvr_device *dev)
ret, print_buf);
}
#endif
- if (dev->usbc_buf[1] == HDPVR_FIRMWARE_VERSION) {
+
+ v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
+ dev->usbc_buf[1], &dev->usbc_buf[2]);
+
+ switch (dev->usbc_buf[1]) {
+ case HDPVR_FIRMWARE_VERSION:
dev->flags &= ~HDPVR_FLAG_AC3_CAP;
- } else if (dev->usbc_buf[1] == HDPVR_FIRMWARE_VERSION_AC3) {
- dev->flags |= HDPVR_FLAG_AC3_CAP;
- } else if (dev->usbc_buf[1] > HDPVR_FIRMWARE_VERSION_AC3) {
- v4l2_info(&dev->v4l2_dev, "untested firmware version 0x%x, "
- "the driver might not work\n", dev->usbc_buf[1]);
+ break;
+ case HDPVR_FIRMWARE_VERSION_AC3:
+ case HDPVR_FIRMWARE_VERSION_0X12:
+ case HDPVR_FIRMWARE_VERSION_0X15:
dev->flags |= HDPVR_FLAG_AC3_CAP;
- } else {
- v4l2_err(&dev->v4l2_dev, "unknown firmware version 0x%x\n",
- dev->usbc_buf[1]);
- ret = -EINVAL;
- goto unlock;
+ break;
+ default:
+ v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might"
+ " not work.\n");
+ if (dev->usbc_buf[1] >= HDPVR_FIRMWARE_VERSION_AC3)
+ dev->flags |= HDPVR_FLAG_AC3_CAP;
+ else
+ dev->flags &= ~HDPVR_FLAG_AC3_CAP;
}
response = dev->usbc_buf+38;
@@ -319,8 +327,12 @@ static int hdpvr_probe(struct usb_interface *interface,
if (default_video_input < HDPVR_VIDEO_INPUTS)
dev->options.video_input = default_video_input;
- if (default_audio_input < HDPVR_AUDIO_INPUTS)
+ if (default_audio_input < HDPVR_AUDIO_INPUTS) {
dev->options.audio_input = default_audio_input;
+ if (default_audio_input == HDPVR_SPDIF)
+ dev->options.audio_codec =
+ V4L2_MPEG_AUDIO_ENCODING_AC3;
+ }
dev->udev = usb_get_dev(interface_to_usbdev(interface));
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 463b81bef6e2..409de11096d4 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -127,7 +127,6 @@ int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
sizeof(i2c_adap->name));
i2c_adap->algo = &hdpvr_algo;
- i2c_adap->class = I2C_CLASS_TV_ANALOG;
i2c_adap->owner = THIS_MODULE;
i2c_adap->dev.parent = &dev->udev->dev;
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 4863a21b1f24..d38fe1043e47 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -26,7 +26,7 @@
#include <media/v4l2-ioctl.h>
#include "hdpvr.h"
-#define BULK_URB_TIMEOUT 1250 /* 1.25 seconds */
+#define BULK_URB_TIMEOUT 90 /* 0.09 seconds */
#define print_buffer_status() { \
v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, \
@@ -157,6 +157,7 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count)
mem, dev->bulk_in_size,
hdpvr_read_bulk_callback, buf);
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
buf->status = BUFSTAT_AVAILABLE;
list_add_tail(&buf->buff_list, &dev->free_buff_list);
}
@@ -337,8 +338,6 @@ static int hdpvr_stop_streaming(struct hdpvr_device *dev)
dev->bulk_in_endpointAddr),
buf, dev->bulk_in_size, &actual_length,
BULK_URB_TIMEOUT)) {
- /* wait */
- msleep(5);
v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
"%2d: got %d bytes\n", c, actual_length);
}
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index b0f046df3cd8..5efc963f9164 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -30,14 +30,17 @@
#define HD_PVR_PRODUCT_ID 0x4900
#define HD_PVR_PRODUCT_ID1 0x4901
#define HD_PVR_PRODUCT_ID2 0x4902
+#define HD_PVR_PRODUCT_ID4 0x4903
#define HD_PVR_PRODUCT_ID3 0x4982
#define UNSET (-1U)
#define NUM_BUFFERS 64
-#define HDPVR_FIRMWARE_VERSION 0x8
-#define HDPVR_FIRMWARE_VERSION_AC3 0xd
+#define HDPVR_FIRMWARE_VERSION 0x08
+#define HDPVR_FIRMWARE_VERSION_AC3 0x0d
+#define HDPVR_FIRMWARE_VERSION_0X12 0x12
+#define HDPVR_FIRMWARE_VERSION_0X15 0x15
/* #define HDPVR_DEBUG */
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index ad2c232baa6d..7ae96367b3ab 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -367,7 +367,6 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
hexium->i2c_adapter = (struct i2c_adapter) {
- .class = I2C_CLASS_TV_ANALOG,
.name = "hexium gemini",
};
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 938a1f8f880a..b72d0f0b8310 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -230,7 +230,6 @@ static int hexium_probe(struct saa7146_dev *dev)
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
hexium->i2c_adapter = (struct i2c_adapter) {
- .class = I2C_CLASS_TV_ANALOG,
.name = "hexium orion",
};
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c
new file mode 100644
index 000000000000..380e459f899d
--- /dev/null
+++ b/drivers/media/video/imx074.c
@@ -0,0 +1,508 @@
+/*
+ * Driver for IMX074 CMOS Image Sensor from Sony
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * Partially inspired by the IMX074 driver from the Android / MSM tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-chip-ident.h>
+
+/* IMX074 registers */
+
+#define MODE_SELECT 0x0100
+#define IMAGE_ORIENTATION 0x0101
+#define GROUPED_PARAMETER_HOLD 0x0104
+
+/* Integration Time */
+#define COARSE_INTEGRATION_TIME_HI 0x0202
+#define COARSE_INTEGRATION_TIME_LO 0x0203
+/* Gain */
+#define ANALOGUE_GAIN_CODE_GLOBAL_HI 0x0204
+#define ANALOGUE_GAIN_CODE_GLOBAL_LO 0x0205
+
+/* PLL registers */
+#define PRE_PLL_CLK_DIV 0x0305
+#define PLL_MULTIPLIER 0x0307
+#define PLSTATIM 0x302b
+#define VNDMY_ABLMGSHLMT 0x300a
+#define Y_OPBADDR_START_DI 0x3014
+/* mode setting */
+#define FRAME_LENGTH_LINES_HI 0x0340
+#define FRAME_LENGTH_LINES_LO 0x0341
+#define LINE_LENGTH_PCK_HI 0x0342
+#define LINE_LENGTH_PCK_LO 0x0343
+#define YADDR_START 0x0347
+#define YADDR_END 0x034b
+#define X_OUTPUT_SIZE_MSB 0x034c
+#define X_OUTPUT_SIZE_LSB 0x034d
+#define Y_OUTPUT_SIZE_MSB 0x034e
+#define Y_OUTPUT_SIZE_LSB 0x034f
+#define X_EVEN_INC 0x0381
+#define X_ODD_INC 0x0383
+#define Y_EVEN_INC 0x0385
+#define Y_ODD_INC 0x0387
+
+#define HMODEADD 0x3001
+#define VMODEADD 0x3016
+#define VAPPLINE_START 0x3069
+#define VAPPLINE_END 0x306b
+#define SHUTTER 0x3086
+#define HADDAVE 0x30e8
+#define LANESEL 0x3301
+
+/* IMX074 supported geometry */
+#define IMX074_WIDTH 1052
+#define IMX074_HEIGHT 780
+
+/* IMX074 has only one fixed colorspace per pixelcode */
+struct imx074_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+struct imx074 {
+ struct v4l2_subdev subdev;
+ const struct imx074_datafmt *fmt;
+};
+
+static const struct imx074_datafmt imx074_colour_fmts[] = {
+ {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
+};
+
+static struct imx074 *to_imx074(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct imx074, subdev);
+}
+
+/* Find a data format by a pixel code in an array */
+static const struct imx074_datafmt *imx074_find_datafmt(enum v4l2_mbus_pixelcode code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx074_colour_fmts); i++)
+ if (imx074_colour_fmts[i].code == code)
+ return imx074_colour_fmts + i;
+
+ return NULL;
+}
+
+static int reg_write(struct i2c_client *client, const u16 addr, const u8 data)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg;
+ unsigned char tx[3];
+ int ret;
+
+ msg.addr = client->addr;
+ msg.buf = tx;
+ msg.len = 3;
+ msg.flags = 0;
+
+ tx[0] = addr >> 8;
+ tx[1] = addr & 0xff;
+ tx[2] = data;
+
+ ret = i2c_transfer(adap, &msg, 1);
+
+ mdelay(2);
+
+ return ret == 1 ? 0 : -EIO;
+}
+
+static int reg_read(struct i2c_client *client, const u16 addr)
+{
+ u8 buf[2] = {addr >> 8, addr & 0xff};
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = buf,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0) {
+ dev_warn(&client->dev, "Reading register %x from %x failed\n",
+ addr, client->addr);
+ return ret;
+ }
+
+ return buf[0] & 0xff; /* no sign-extension */
+}
+
+static int imx074_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ const struct imx074_datafmt *fmt = imx074_find_datafmt(mf->code);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
+
+ if (!fmt) {
+ mf->code = imx074_colour_fmts[0].code;
+ mf->colorspace = imx074_colour_fmts[0].colorspace;
+ }
+
+ mf->width = IMX074_WIDTH;
+ mf->height = IMX074_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int imx074_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx074 *priv = to_imx074(client);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
+
+ /* MIPI CSI could have changed the format, double-check */
+ if (!imx074_find_datafmt(mf->code))
+ return -EINVAL;
+
+ imx074_try_fmt(sd, mf);
+
+ priv->fmt = imx074_find_datafmt(mf->code);
+
+ return 0;
+}
+
+static int imx074_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx074 *priv = to_imx074(client);
+
+ const struct imx074_datafmt *fmt = priv->fmt;
+
+ mf->code = fmt->code;
+ mf->colorspace = fmt->colorspace;
+ mf->width = IMX074_WIDTH;
+ mf->height = IMX074_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int imx074_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ rect->top = 0;
+ rect->left = 0;
+ rect->width = IMX074_WIDTH;
+ rect->height = IMX074_HEIGHT;
+
+ return 0;
+}
+
+static int imx074_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = IMX074_WIDTH;
+ a->bounds.height = IMX074_HEIGHT;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int imx074_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if ((unsigned int)index >= ARRAY_SIZE(imx074_colour_fmts))
+ return -EINVAL;
+
+ *code = imx074_colour_fmts[index].code;
+ return 0;
+}
+
+static int imx074_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* MODE_SELECT: stream or standby */
+ return reg_write(client, MODE_SELECT, !!enable);
+}
+
+static int imx074_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+
+ if (id->match.addr != client->addr)
+ return -ENODEV;
+
+ id->ident = V4L2_IDENT_IMX074;
+ id->revision = 0;
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
+ .s_stream = imx074_s_stream,
+ .s_mbus_fmt = imx074_s_fmt,
+ .g_mbus_fmt = imx074_g_fmt,
+ .try_mbus_fmt = imx074_try_fmt,
+ .enum_mbus_fmt = imx074_enum_fmt,
+ .g_crop = imx074_g_crop,
+ .cropcap = imx074_cropcap,
+};
+
+static struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
+ .g_chip_ident = imx074_g_chip_ident,
+};
+
+static struct v4l2_subdev_ops imx074_subdev_ops = {
+ .core = &imx074_subdev_core_ops,
+ .video = &imx074_subdev_video_ops,
+};
+
+/*
+ * We have to provide soc-camera operations, but we don't have anything to say
+ * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
+ */
+static unsigned long imx074_query_bus_param(struct soc_camera_device *icd)
+{
+ return 0;
+}
+
+static int imx074_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ return -1;
+}
+
+static struct soc_camera_ops imx074_ops = {
+ .query_bus_param = imx074_query_bus_param,
+ .set_bus_param = imx074_set_bus_param,
+};
+
+static int imx074_video_probe(struct soc_camera_device *icd,
+ struct i2c_client *client)
+{
+ int ret;
+ u16 id;
+
+ /* Read sensor Model ID */
+ ret = reg_read(client, 0);
+ if (ret < 0)
+ return ret;
+
+ id = ret << 8;
+
+ ret = reg_read(client, 1);
+ if (ret < 0)
+ return ret;
+
+ id |= ret;
+
+ dev_info(&client->dev, "Chip ID 0x%04x detected\n", id);
+
+ if (id != 0x74)
+ return -ENODEV;
+
+ /* PLL Setting EXTCLK=24MHz, 22.5times */
+ reg_write(client, PLL_MULTIPLIER, 0x2D);
+ reg_write(client, PRE_PLL_CLK_DIV, 0x02);
+ reg_write(client, PLSTATIM, 0x4B);
+
+ /* 2-lane mode */
+ reg_write(client, 0x3024, 0x00);
+
+ reg_write(client, IMAGE_ORIENTATION, 0x00);
+
+ /* select RAW mode:
+ * 0x08+0x08 = top 8 bits
+ * 0x0a+0x08 = compressed 8-bits
+ * 0x0a+0x0a = 10 bits
+ */
+ reg_write(client, 0x0112, 0x08);
+ reg_write(client, 0x0113, 0x08);
+
+ /* Base setting for High frame mode */
+ reg_write(client, VNDMY_ABLMGSHLMT, 0x80);
+ reg_write(client, Y_OPBADDR_START_DI, 0x08);
+ reg_write(client, 0x3015, 0x37);
+ reg_write(client, 0x301C, 0x01);
+ reg_write(client, 0x302C, 0x05);
+ reg_write(client, 0x3031, 0x26);
+ reg_write(client, 0x3041, 0x60);
+ reg_write(client, 0x3051, 0x24);
+ reg_write(client, 0x3053, 0x34);
+ reg_write(client, 0x3057, 0xC0);
+ reg_write(client, 0x305C, 0x09);
+ reg_write(client, 0x305D, 0x07);
+ reg_write(client, 0x3060, 0x30);
+ reg_write(client, 0x3065, 0x00);
+ reg_write(client, 0x30AA, 0x08);
+ reg_write(client, 0x30AB, 0x1C);
+ reg_write(client, 0x30B0, 0x32);
+ reg_write(client, 0x30B2, 0x83);
+ reg_write(client, 0x30D3, 0x04);
+ reg_write(client, 0x3106, 0x78);
+ reg_write(client, 0x310C, 0x82);
+ reg_write(client, 0x3304, 0x05);
+ reg_write(client, 0x3305, 0x04);
+ reg_write(client, 0x3306, 0x11);
+ reg_write(client, 0x3307, 0x02);
+ reg_write(client, 0x3308, 0x0C);
+ reg_write(client, 0x3309, 0x06);
+ reg_write(client, 0x330A, 0x08);
+ reg_write(client, 0x330B, 0x04);
+ reg_write(client, 0x330C, 0x08);
+ reg_write(client, 0x330D, 0x06);
+ reg_write(client, 0x330E, 0x01);
+ reg_write(client, 0x3381, 0x00);
+
+ /* V : 1/2V-addition (1,3), H : 1/2H-averaging (1,3) -> Full HD */
+ /* 1608 = 1560 + 48 (black lines) */
+ reg_write(client, FRAME_LENGTH_LINES_HI, 0x06);
+ reg_write(client, FRAME_LENGTH_LINES_LO, 0x48);
+ reg_write(client, YADDR_START, 0x00);
+ reg_write(client, YADDR_END, 0x2F);
+ /* 0x838 == 2104 */
+ reg_write(client, X_OUTPUT_SIZE_MSB, 0x08);
+ reg_write(client, X_OUTPUT_SIZE_LSB, 0x38);
+ /* 0x618 == 1560 */
+ reg_write(client, Y_OUTPUT_SIZE_MSB, 0x06);
+ reg_write(client, Y_OUTPUT_SIZE_LSB, 0x18);
+ reg_write(client, X_EVEN_INC, 0x01);
+ reg_write(client, X_ODD_INC, 0x03);
+ reg_write(client, Y_EVEN_INC, 0x01);
+ reg_write(client, Y_ODD_INC, 0x03);
+ reg_write(client, HMODEADD, 0x00);
+ reg_write(client, VMODEADD, 0x16);
+ reg_write(client, VAPPLINE_START, 0x24);
+ reg_write(client, VAPPLINE_END, 0x53);
+ reg_write(client, SHUTTER, 0x00);
+ reg_write(client, HADDAVE, 0x80);
+
+ reg_write(client, LANESEL, 0x00);
+
+ reg_write(client, GROUPED_PARAMETER_HOLD, 0x00); /* off */
+
+ return 0;
+}
+
+static int imx074_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct imx074 *priv;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct soc_camera_link *icl;
+ int ret;
+
+ if (!icd) {
+ dev_err(&client->dev, "IMX074: missing soc-camera data!\n");
+ return -EINVAL;
+ }
+
+ icl = to_soc_camera_link(icd);
+ if (!icl) {
+ dev_err(&client->dev, "IMX074: missing platform data!\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
+ return -EIO;
+ }
+
+ priv = kzalloc(sizeof(struct imx074), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &imx074_subdev_ops);
+
+ icd->ops = &imx074_ops;
+ priv->fmt = &imx074_colour_fmts[0];
+
+ ret = imx074_video_probe(icd, client);
+ if (ret < 0) {
+ icd->ops = NULL;
+ i2c_set_clientdata(client, NULL);
+ kfree(priv);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx074_remove(struct i2c_client *client)
+{
+ struct imx074 *priv = to_imx074(client);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+ icd->ops = NULL;
+ if (icl->free_bus)
+ icl->free_bus(icl);
+ i2c_set_clientdata(client, NULL);
+ client->driver = NULL;
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id imx074_id[] = {
+ { "imx074", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, imx074_id);
+
+static struct i2c_driver imx074_i2c_driver = {
+ .driver = {
+ .name = "imx074",
+ },
+ .probe = imx074_probe,
+ .remove = imx074_remove,
+ .id_table = imx074_id,
+};
+
+static int __init imx074_mod_init(void)
+{
+ return i2c_add_driver(&imx074_i2c_driver);
+}
+
+static void __exit imx074_mod_exit(void)
+{
+ i2c_del_driver(&imx074_i2c_driver);
+}
+
+module_init(imx074_mod_init);
+module_exit(imx074_mod_exit);
+
+MODULE_DESCRIPTION("Sony IMX074 Camera driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/indycam.c b/drivers/media/video/indycam.c
index 3d6940163b12..e5ed4db32e7b 100644
--- a/drivers/media/video/indycam.c
+++ b/drivers/media/video/indycam.c
@@ -24,7 +24,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "indycam.h"
@@ -378,9 +377,25 @@ static const struct i2c_device_id indycam_id[] = {
};
MODULE_DEVICE_TABLE(i2c, indycam_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "indycam",
- .probe = indycam_probe,
- .remove = indycam_remove,
- .id_table = indycam_id,
+static struct i2c_driver indycam_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "indycam",
+ },
+ .probe = indycam_probe,
+ .remove = indycam_remove,
+ .id_table = indycam_id,
};
+
+static __init int init_indycam(void)
+{
+ return i2c_add_driver(&indycam_driver);
+}
+
+static __exit void exit_indycam(void)
+{
+ i2c_del_driver(&indycam_driver);
+}
+
+module_init(init_indycam);
+module_exit(exit_indycam);
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 27ae8bbfb477..5a000c65ae98 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -146,26 +146,6 @@ static int get_key_pixelview(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
}
-static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
-{
- unsigned char b;
-
- /* poll IR chip */
- if (1 != i2c_master_recv(ir->c, &b, 1)) {
- dprintk(1,"read error\n");
- return -EIO;
- }
-
- /* ignore 0xaa */
- if (b==0xaa)
- return 0;
- dprintk(2,"key %02x\n", b);
-
- *ir_key = b;
- *ir_raw = b;
- return 1;
-}
-
static int get_key_fusionhdtv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char buf[4];
@@ -279,15 +259,9 @@ static void ir_key_poll(struct IR_i2c *ir)
static void ir_work(struct work_struct *work)
{
struct IR_i2c *ir = container_of(work, struct IR_i2c, work.work);
- int polling_interval = 100;
-
- /* MSI TV@nywhere Plus requires more frequent polling
- otherwise it will miss some keypresses */
- if (ir->c->adapter->id == I2C_HW_SAA7134 && ir->c->addr == 0x30)
- polling_interval = 50;
ir_key_poll(ir);
- schedule_delayed_work(&ir->work, msecs_to_jiffies(polling_interval));
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling_interval));
}
/* ----------------------------------------------------------------------- */
@@ -312,6 +286,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->c = client;
ir->input = input_dev;
+ ir->polling_interval = DEFAULT_POLLING_INTERVAL;
i2c_set_clientdata(client, ir);
switch(addr) {
@@ -321,12 +296,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
- case 0x4b:
- name = "PV951";
- ir->get_key = get_key_pv951;
- ir_type = IR_TYPE_OTHER;
- ir_codes = RC_MAP_PV951;
- break;
case 0x18:
case 0x1f:
case 0x1a:
@@ -351,27 +320,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_RC5;
ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
- case 0x0b:
- case 0x47:
- case 0x71:
- if (adap->id == I2C_HW_B_CX2388x ||
- adap->id == I2C_HW_B_CX2341X) {
- /* Handled by cx88-input */
- name = adap->id == I2C_HW_B_CX2341X ? "CX2341x remote"
- : "CX2388x remote";
- ir_type = IR_TYPE_RC5;
- ir->get_key = get_key_haup_xvr;
- if (hauppauge == 1) {
- ir_codes = RC_MAP_HAUPPAUGE_NEW;
- } else {
- ir_codes = RC_MAP_RC5_TV;
- }
- } else {
- /* Handled by saa7134-input */
- name = "SAA713x remote";
- ir_type = IR_TYPE_OTHER;
- }
- break;
case 0x40:
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
@@ -390,6 +338,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (init_data->type)
ir_type = init_data->type;
+ if (init_data->polling_interval)
+ ir->polling_interval = init_data->polling_interval;
+
switch (init_data->internal_get_key_func) {
case IR_KBD_GET_KEY_CUSTOM:
/* The bridge driver provided us its own function */
@@ -398,9 +349,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case IR_KBD_GET_KEY_PIXELVIEW:
ir->get_key = get_key_pixelview;
break;
- case IR_KBD_GET_KEY_PV951:
- ir->get_key = get_key_pv951;
- break;
case IR_KBD_GET_KEY_HAUP:
ir->get_key = get_key_haup;
break;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 75803141481e..04bacdbd10bb 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -811,15 +811,23 @@ static inline int ivtv_raw_vbi(const struct ivtv *itv)
/* Call the specified callback for all subdevs matching hw (if 0, then
match them all). Ignore any errors. */
#define ivtv_call_hw(itv, hw, o, f, args...) \
- __v4l2_device_call_subdevs(&(itv)->v4l2_dev, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+ do { \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_p(&(itv)->v4l2_dev, __sd, \
+ !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
+ } while (0)
#define ivtv_call_all(itv, o, f, args...) ivtv_call_hw(itv, 0, o, f , ##args)
/* Call the specified callback for all subdevs matching hw (if 0, then
match them all). If the callback returns an error other than 0 or
-ENOIOCTLCMD, then return with that error code. */
-#define ivtv_call_hw_err(itv, hw, o, f, args...) \
- __v4l2_device_call_subdevs_until_err(&(itv)->v4l2_dev, !(hw) || (sd->grp_id & (hw)), o, f , ##args)
+#define ivtv_call_hw_err(itv, hw, o, f, args...) \
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(&(itv)->v4l2_dev, __sd, \
+ !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
+})
#define ivtv_call_all_err(itv, o, f, args...) ivtv_call_hw_err(itv, 0, o, f , ##args)
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index a74fa099c565..9e8039ac909e 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -121,31 +121,6 @@ static const u8 hw_addrs[] = {
};
/* This array should match the IVTV_HW_ defines */
-static const char *hw_modules[] = {
- "cx25840",
- "saa7115",
- "saa7127",
- "msp3400",
- "tuner",
- "wm8775",
- "cs53l32a",
- NULL,
- "saa7115",
- "upd64031a",
- "upd64083",
- "saa717x",
- "wm8739",
- "vp27smpx",
- "m52790",
- NULL,
- NULL, /* IVTV_HW_I2C_IR_RX_AVER */
- NULL, /* IVTV_HW_I2C_IR_RX_HAUP_EXT */
- NULL, /* IVTV_HW_I2C_IR_RX_HAUP_INT */
- NULL, /* IVTV_HW_Z8F0811_IR_TX_HAUP */
- NULL, /* IVTV_HW_Z8F0811_IR_RX_HAUP */
-};
-
-/* This array should match the IVTV_HW_ defines */
static const char * const hw_devicenames[] = {
"cx25840",
"saa7115",
@@ -257,7 +232,6 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
{
struct v4l2_subdev *sd;
struct i2c_adapter *adap = &itv->i2c_adap;
- const char *mod = hw_modules[idx];
const char *type = hw_devicenames[idx];
u32 hw = 1 << idx;
@@ -266,17 +240,17 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
if (hw == IVTV_HW_TUNER) {
/* special tuner handling */
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type,
+ adap, NULL, type,
0, itv->card_i2c->radio);
if (sd)
sd->grp_id = 1 << idx;
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type,
+ adap, NULL, type,
0, itv->card_i2c->demod);
if (sd)
sd->grp_id = 1 << idx;
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type,
+ adap, NULL, type,
0, itv->card_i2c->tv);
if (sd)
sd->grp_id = 1 << idx;
@@ -293,16 +267,17 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
/* It's an I2C device other than an analog tuner or IR chip */
if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type, 0, I2C_ADDRS(hw_addrs[idx]));
+ adap, NULL, type, 0, I2C_ADDRS(hw_addrs[idx]));
} else if (hw == IVTV_HW_CX25840) {
struct cx25840_platform_data pdata;
pdata.pvr150_workaround = itv->pvr150_workaround;
sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
- adap, mod, type, 0, &pdata, hw_addrs[idx], NULL);
+ adap, NULL, type, 0, &pdata, hw_addrs[idx],
+ NULL);
} else {
sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
- adap, mod, type, hw_addrs[idx], NULL);
+ adap, NULL, type, hw_addrs[idx], NULL);
}
if (sd)
sd->grp_id = 1 << idx;
@@ -706,8 +681,7 @@ int init_ivtv_i2c(struct ivtv *itv)
/* Sanity checks for the I2C hardware arrays. They must be the
* same size.
*/
- if (ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs) ||
- ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_modules)) {
+ if (ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs)) {
IVTV_ERR("Mismatched I2C hardware arrays\n");
return -ENODEV;
}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 4eed9123683e..b686da5e4326 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -37,7 +37,6 @@
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-event.h>
#include <linux/dvb/audio.h>
-#include <linux/i2c-id.h>
u16 ivtv_service2vbi(int type)
{
diff --git a/drivers/media/video/ks0127.c b/drivers/media/video/ks0127.c
index 94734828053b..afa91182b448 100644
--- a/drivers/media/video/ks0127.c
+++ b/drivers/media/video/ks0127.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "ks0127.h"
MODULE_DESCRIPTION("KS0127 video decoder driver");
@@ -712,9 +711,25 @@ static const struct i2c_device_id ks0127_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ks0127_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "ks0127",
- .probe = ks0127_probe,
- .remove = ks0127_remove,
- .id_table = ks0127_id,
+static struct i2c_driver ks0127_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ks0127",
+ },
+ .probe = ks0127_probe,
+ .remove = ks0127_remove,
+ .id_table = ks0127_id,
};
+
+static __init int init_ks0127(void)
+{
+ return i2c_add_driver(&ks0127_driver);
+}
+
+static __exit void exit_ks0127(void)
+{
+ i2c_del_driver(&ks0127_driver);
+}
+
+module_init(init_ks0127);
+module_exit(exit_ks0127);
diff --git a/drivers/media/video/m52790.c b/drivers/media/video/m52790.c
index 4491d018eba6..5e1c9a81984c 100644
--- a/drivers/media/video/m52790.c
+++ b/drivers/media/video/m52790.c
@@ -26,12 +26,10 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/m52790.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("i2c device driver for m52790 A/V switch");
MODULE_AUTHOR("Hans Verkuil");
@@ -205,9 +203,25 @@ static const struct i2c_device_id m52790_id[] = {
};
MODULE_DEVICE_TABLE(i2c, m52790_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "m52790",
- .probe = m52790_probe,
- .remove = m52790_remove,
- .id_table = m52790_id,
+static struct i2c_driver m52790_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "m52790",
+ },
+ .probe = m52790_probe,
+ .remove = m52790_remove,
+ .id_table = m52790_id,
};
+
+static __init int init_m52790(void)
+{
+ return i2c_add_driver(&m52790_driver);
+}
+
+static __exit void exit_m52790(void)
+{
+ i2c_del_driver(&m52790_driver);
+}
+
+module_init(init_m52790);
+module_exit(exit_m52790);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index a7210d981388..3b19f5b25a72 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -848,7 +848,7 @@ static void queue_init(void *priv, struct videobuf_queue *vq,
videobuf_queue_vmalloc_init(vq, &m2mtest_qops, ctx->dev->v4l2_dev.dev,
&ctx->dev->irqlock, type, V4L2_FIELD_NONE,
- sizeof(struct m2mtest_buffer), priv);
+ sizeof(struct m2mtest_buffer), priv, NULL);
}
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 0e412131da7c..b1763ac93ab3 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -56,7 +56,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/msp3400.h>
#include <media/tvaudio.h>
#include "msp3400-driver.h"
@@ -382,7 +381,12 @@ static int msp_s_ctrl(struct v4l2_ctrl *ctrl)
void msp_update_volume(struct msp_state *state)
{
- v4l2_ctrl_s_ctrl(state->volume, v4l2_ctrl_g_ctrl(state->volume));
+ /* Force an update of the volume/mute cluster */
+ v4l2_ctrl_lock(state->volume);
+ state->volume->val = state->volume->cur.val;
+ state->muted->val = state->muted->cur.val;
+ msp_s_ctrl(state->volume);
+ v4l2_ctrl_unlock(state->volume);
}
/* --- v4l2 ioctls --- */
@@ -843,15 +847,31 @@ static const struct i2c_device_id msp_id[] = {
};
MODULE_DEVICE_TABLE(i2c, msp_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "msp3400",
- .probe = msp_probe,
- .remove = msp_remove,
- .suspend = msp_suspend,
- .resume = msp_resume,
- .id_table = msp_id,
+static struct i2c_driver msp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "msp3400",
+ },
+ .probe = msp_probe,
+ .remove = msp_remove,
+ .suspend = msp_suspend,
+ .resume = msp_resume,
+ .id_table = msp_id,
};
+static __init int init_msp(void)
+{
+ return i2c_add_driver(&msp_driver);
+}
+
+static __exit void exit_msp(void)
+{
+ i2c_del_driver(&msp_driver);
+}
+
+module_init(init_msp);
+module_exit(exit_msp);
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 79f096ddcf5d..fcb4cd941853 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -157,7 +157,7 @@ static int mt9m001_init(struct i2c_client *client)
static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Switch to master "normal" mode or stop sensor readout */
if (reg_write(client, MT9M001_OUTPUT_CONTROL, enable ? 2 : 0) < 0)
@@ -206,7 +206,7 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_rect rect = a->c;
struct soc_camera_device *icd = client->dev.platform_data;
@@ -271,7 +271,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9m001_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
a->c = mt9m001->rect;
@@ -297,7 +297,7 @@ static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9m001_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
mf->width = mt9m001->rect.width;
@@ -312,7 +312,7 @@ static int mt9m001_g_fmt(struct v4l2_subdev *sd,
static int mt9m001_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_crop a = {
.c = {
@@ -340,7 +340,7 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
static int mt9m001_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
const struct mt9m001_datafmt *fmt;
@@ -367,7 +367,7 @@ static int mt9m001_try_fmt(struct v4l2_subdev *sd,
static int mt9m001_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -386,7 +386,7 @@ static int mt9m001_g_chip_ident(struct v4l2_subdev *sd,
static int mt9m001_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -406,7 +406,7 @@ static int mt9m001_g_register(struct v4l2_subdev *sd,
static int mt9m001_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -468,7 +468,7 @@ static struct soc_camera_ops mt9m001_ops = {
static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
int data;
@@ -494,7 +494,7 @@ static int mt9m001_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct soc_camera_device *icd = client->dev.platform_data;
const struct v4l2_queryctrl *qctrl;
@@ -683,7 +683,7 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
*lines = mt9m001->y_skip_top;
@@ -704,7 +704,7 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
if (index >= mt9m001->num_fmts)
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index c71af4e0e517..525a16e73285 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -100,14 +100,14 @@
#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10)
#define MT9M111_OUTFMT_INV_PIX_CLOCK (1 << 9)
#define MT9M111_OUTFMT_RGB (1 << 8)
-#define MT9M111_OUTFMT_RGB565 (0x0 << 6)
-#define MT9M111_OUTFMT_RGB555 (0x1 << 6)
-#define MT9M111_OUTFMT_RGB444x (0x2 << 6)
-#define MT9M111_OUTFMT_RGBx444 (0x3 << 6)
-#define MT9M111_OUTFMT_TST_RAMP_OFF (0x0 << 4)
-#define MT9M111_OUTFMT_TST_RAMP_COL (0x1 << 4)
-#define MT9M111_OUTFMT_TST_RAMP_ROW (0x2 << 4)
-#define MT9M111_OUTFMT_TST_RAMP_FRAME (0x3 << 4)
+#define MT9M111_OUTFMT_RGB565 (0 << 6)
+#define MT9M111_OUTFMT_RGB555 (1 << 6)
+#define MT9M111_OUTFMT_RGB444x (2 << 6)
+#define MT9M111_OUTFMT_RGBx444 (3 << 6)
+#define MT9M111_OUTFMT_TST_RAMP_OFF (0 << 4)
+#define MT9M111_OUTFMT_TST_RAMP_COL (1 << 4)
+#define MT9M111_OUTFMT_TST_RAMP_ROW (2 << 4)
+#define MT9M111_OUTFMT_TST_RAMP_FRAME (3 << 4)
#define MT9M111_OUTFMT_SHIFT_3_UP (1 << 3)
#define MT9M111_OUTFMT_AVG_CHROMA (1 << 2)
#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y (1 << 1)
@@ -124,7 +124,7 @@
#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val))
#define MT9M111_MIN_DARK_ROWS 8
-#define MT9M111_MIN_DARK_COLS 24
+#define MT9M111_MIN_DARK_COLS 26
#define MT9M111_MAX_HEIGHT 1024
#define MT9M111_MAX_WIDTH 1280
@@ -440,7 +440,7 @@ static int mt9m111_make_rect(struct i2c_client *client,
static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect rect = a->c;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
int ret;
@@ -458,7 +458,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
a->c = mt9m111->rect;
@@ -486,7 +486,7 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9m111_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
mf->width = mt9m111->rect.width;
@@ -549,7 +549,7 @@ static int mt9m111_set_pixfmt(struct i2c_client *client,
static int mt9m111_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct mt9m111_datafmt *fmt;
struct mt9m111 *mt9m111 = to_mt9m111(client);
struct v4l2_rect rect = {
@@ -584,7 +584,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
static int mt9m111_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
const struct mt9m111_datafmt *fmt;
bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
@@ -624,7 +624,7 @@ static int mt9m111_try_fmt(struct v4l2_subdev *sd,
static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -643,7 +643,7 @@ static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
static int mt9m111_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int val;
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
@@ -664,7 +664,7 @@ static int mt9m111_g_register(struct v4l2_subdev *sd,
static int mt9m111_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
return -EINVAL;
@@ -812,7 +812,7 @@ static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
int data;
@@ -855,7 +855,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = to_mt9m111(client);
const struct v4l2_queryctrl *qctrl;
int ret;
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index a9a28b214235..9bd44a816ea1 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -163,7 +163,7 @@ static int mt9t031_disable(struct i2c_client *client)
static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
if (enable)
@@ -393,7 +393,7 @@ static int mt9t031_set_params(struct i2c_client *client,
static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect rect = a->c;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
rect.width = ALIGN(rect.width, 2);
@@ -410,7 +410,7 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
a->c = mt9t031->rect;
@@ -436,7 +436,7 @@ static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9t031_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
mf->width = mt9t031->rect.width / mt9t031->xskip;
@@ -451,7 +451,7 @@ static int mt9t031_g_fmt(struct v4l2_subdev *sd,
static int mt9t031_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
u16 xskip, yskip;
struct v4l2_rect rect = mt9t031->rect;
@@ -490,7 +490,7 @@ static int mt9t031_try_fmt(struct v4l2_subdev *sd,
static int mt9t031_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -509,7 +509,7 @@ static int mt9t031_g_chip_ident(struct v4l2_subdev *sd,
static int mt9t031_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -528,7 +528,7 @@ static int mt9t031_g_register(struct v4l2_subdev *sd,
static int mt9t031_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -545,7 +545,7 @@ static int mt9t031_s_register(struct v4l2_subdev *sd,
static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
int data;
@@ -577,7 +577,7 @@ static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
const struct v4l2_queryctrl *qctrl;
int data;
@@ -703,7 +703,7 @@ static int mt9t031_runtime_resume(struct device *dev)
struct soc_camera_device *icd = container_of(vdev->parent,
struct soc_camera_device, dev);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
int ret;
@@ -780,7 +780,7 @@ static int mt9t031_video_probe(struct i2c_client *client)
static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
*lines = mt9t031->y_skip_top;
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 8ec47e42d4d0..bffa9ee10968 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -804,7 +804,7 @@ static struct soc_camera_ops mt9t112_ops = {
static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
id->ident = priv->model;
@@ -817,7 +817,7 @@ static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
static int mt9t112_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
reg->size = 2;
@@ -831,7 +831,7 @@ static int mt9t112_g_register(struct v4l2_subdev *sd,
static int mt9t112_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
mt9t112_reg_write(ret, client, reg->reg, reg->val);
@@ -858,7 +858,7 @@ static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
************************************************************************/
static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
int ret = 0;
@@ -968,7 +968,7 @@ static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_rect *rect = &a->c;
return mt9t112_set_params(client, rect->width, rect->height,
@@ -978,7 +978,7 @@ static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9t112_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
if (!priv->format) {
@@ -1000,7 +1000,7 @@ static int mt9t112_g_fmt(struct v4l2_subdev *sd,
static int mt9t112_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* TODO: set colorspace */
return mt9t112_set_params(client, mf->width, mf->height, mf->code);
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index f5e778d5ca9f..209ff97261a9 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -11,9 +11,8 @@
#include <linux/delay.h>
#include <asm/div64.h>
#include <media/v4l2-device.h>
-#include "mt9v011.h"
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-chip-ident.h>
+#include "mt9v011.h"
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
@@ -624,9 +623,25 @@ static const struct i2c_device_id mt9v011_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mt9v011_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "mt9v011",
- .probe = mt9v011_probe,
- .remove = mt9v011_remove,
- .id_table = mt9v011_id,
+static struct i2c_driver mt9v011_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mt9v011",
+ },
+ .probe = mt9v011_probe,
+ .remove = mt9v011_remove,
+ .id_table = mt9v011_id,
};
+
+static __init int init_mt9v011(void)
+{
+ return i2c_add_driver(&mt9v011_driver);
+}
+
+static __exit void exit_mt9v011(void)
+{
+ i2c_del_driver(&mt9v011_driver);
+}
+
+module_init(init_mt9v011);
+module_exit(exit_mt9v011);
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index b48473c7896b..b96171cc79f9 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -184,7 +184,7 @@ static int mt9v022_init(struct i2c_client *client)
static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
if (enable)
@@ -273,7 +273,7 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
struct v4l2_rect rect = a->c;
int ret;
@@ -334,7 +334,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9v022_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
a->c = mt9v022->rect;
@@ -360,7 +360,7 @@ static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9v022_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
mf->width = mt9v022->rect.width;
@@ -375,7 +375,7 @@ static int mt9v022_g_fmt(struct v4l2_subdev *sd,
static int mt9v022_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
struct v4l2_crop a = {
.c = {
@@ -422,7 +422,7 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
static int mt9v022_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
const struct mt9v022_datafmt *fmt;
int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
@@ -448,7 +448,7 @@ static int mt9v022_try_fmt(struct v4l2_subdev *sd,
static int mt9v022_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
@@ -467,7 +467,7 @@ static int mt9v022_g_chip_ident(struct v4l2_subdev *sd,
static int mt9v022_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -487,7 +487,7 @@ static int mt9v022_g_register(struct v4l2_subdev *sd,
static int mt9v022_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
return -EINVAL;
@@ -565,7 +565,7 @@ static struct soc_camera_ops mt9v022_ops = {
static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct v4l2_queryctrl *qctrl;
unsigned long range;
int data;
@@ -622,7 +622,7 @@ static int mt9v022_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int data;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct v4l2_queryctrl *qctrl;
qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
@@ -817,7 +817,7 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
*lines = mt9v022->y_skip_top;
@@ -838,7 +838,7 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
if (index >= mt9v022->num_fmts)
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 5c17f9ec3d7c..5e486a88ad7c 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -161,7 +161,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
* This waits until this buffer is out of danger, i.e., until it is no
* longer in STATE_QUEUED or STATE_ACTIVE
*/
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
@@ -385,7 +385,7 @@ static void mx1_camera_init_videobuf(struct videobuf_queue *q,
&pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct mx1_buffer), icd);
+ sizeof(struct mx1_buffer), icd, NULL);
}
static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
@@ -638,7 +638,7 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd,
return 0;
}
-static int mx1_camera_reqbufs(struct soc_camera_file *icf,
+static int mx1_camera_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
int i;
@@ -650,7 +650,7 @@ static int mx1_camera_reqbufs(struct soc_camera_file *icf,
* it hadn't triggered
*/
for (i = 0; i < p->count; i++) {
- struct mx1_buffer *buf = container_of(icf->vb_vidq.bufs[i],
+ struct mx1_buffer *buf = container_of(icd->vb_vidq.bufs[i],
struct mx1_buffer, vb);
buf->inwork = 0;
INIT_LIST_HEAD(&buf->vb.queue);
@@ -661,10 +661,10 @@ static int mx1_camera_reqbufs(struct soc_camera_file *icf,
static unsigned int mx1_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
struct mx1_buffer *buf;
- buf = list_entry(icf->vb_vidq.stream.next, struct mx1_buffer,
+ buf = list_entry(icd->vb_vidq.stream.next, struct mx1_buffer,
vb.stream);
poll_wait(file, &buf->vb.done, pt);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index b6ea67221d1d..4a27862da30d 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -461,9 +461,9 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
/*
* This waits until this buffer is out of danger, i.e., until it is no
- * longer in STATE_QUEUED or STATE_ACTIVE
+ * longer in state VIDEOBUF_QUEUED or VIDEOBUF_ACTIVE
*/
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
dev_dbg(&icd->dev, "%s freed\n", __func__);
@@ -640,15 +640,27 @@ static void mx2_videobuf_release(struct videobuf_queue *vq,
* Terminate only queued but inactive buffers. Active buffers are
* released when they become inactive after videobuf_waiton().
*
- * FIXME: implement forced termination of active buffers, so that the
- * user won't get stuck in an uninterruptible state. This requires a
- * specific handling for each of the three DMA types that this driver
- * supports.
+ * FIXME: implement forced termination of active buffers for mx27 and
+ * mx27 eMMA, so that the user won't get stuck in an uninterruptible
+ * state. This requires a specific handling for each of the these DMA
+ * types.
*/
spin_lock_irqsave(&pcdev->lock, flags);
if (vb->state == VIDEOBUF_QUEUED) {
list_del(&vb->queue);
vb->state = VIDEOBUF_ERROR;
+ } else if (cpu_is_mx25() && vb->state == VIDEOBUF_ACTIVE) {
+ if (pcdev->fb1_active == buf) {
+ pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
+ writel(0, pcdev->base_csi + CSIDMASA_FB1);
+ pcdev->fb1_active = NULL;
+ } else if (pcdev->fb2_active == buf) {
+ pcdev->csicr1 &= ~CSICR1_FB2_DMA_INTEN;
+ writel(0, pcdev->base_csi + CSIDMASA_FB2);
+ pcdev->fb2_active = NULL;
+ }
+ writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
+ vb->state = VIDEOBUF_ERROR;
}
spin_unlock_irqrestore(&pcdev->lock, flags);
@@ -670,7 +682,7 @@ static void mx2_camera_init_videobuf(struct videobuf_queue *q,
videobuf_queue_dma_contig_init(q, &mx2_videobuf_ops, pcdev->dev,
&pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_NONE, sizeof(struct mx2_buffer), icd);
+ V4L2_FIELD_NONE, sizeof(struct mx2_buffer), icd, NULL);
}
#define MX2_BUS_FLAGS (SOCAM_DATAWIDTH_8 | \
@@ -716,8 +728,11 @@ static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
/*
* We only use the EMMA engine to get rid of the broken
* DMA Engine. No color space consversion at the moment.
- * We adjust incoming and outgoing pixelformat to rgb16
- * and adjust the bytesperline accordingly.
+ * We set the incomming and outgoing pixelformat to an
+ * 16 Bit wide format and adjust the bytesperline
+ * accordingly. With this configuration the inputdata
+ * will not be changed by the emma and could be any type
+ * of 16 Bit Pixelformat.
*/
writel(PRP_CNTL_CH1EN |
PRP_CNTL_CSIEN |
@@ -903,10 +918,6 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* eMMA can only do RGB565 */
- if (mx27_camera_emma(pcdev) && pix->pixelformat != V4L2_PIX_FMT_RGB565)
- return -EINVAL;
-
mf.width = pix->width;
mf.height = pix->height;
mf.field = pix->field;
@@ -950,10 +961,6 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
/* FIXME: implement MX27 limits */
- /* eMMA can only do RGB565 */
- if (mx27_camera_emma(pcdev) && pixfmt != V4L2_PIX_FMT_RGB565)
- return -EINVAL;
-
/* limit to MX25 hardware capabilities */
if (cpu_is_mx25()) {
if (xlate->host_fmt->bits_per_sample <= 8)
@@ -1426,6 +1433,9 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
if (err)
goto exit_free_emma;
+ dev_info(&pdev->dev, "MX2 Camera (CSI) driver probed, clock frequency: %ld\n",
+ clk_get_rate(pcdev->clk_csi));
+
return 0;
exit_free_emma:
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index a9be14c23912..29c5fc348133 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -185,7 +185,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx3_camera_buffer *buf
* This waits until this buffer is out of danger, i.e., until it is no
* longer in STATE_QUEUED or STATE_ACTIVE
*/
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
if (txd) {
ichan = to_idmac_chan(txd->chan);
async_tx_ack(txd);
@@ -441,7 +441,8 @@ static void mx3_camera_init_videobuf(struct videobuf_queue *q,
&mx3_cam->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct mx3_camera_buffer), icd);
+ sizeof(struct mx3_camera_buffer), icd,
+ NULL);
}
/* First part of ipu_csi_init_interface() */
@@ -976,7 +977,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
return ret;
}
-static int mx3_camera_reqbufs(struct soc_camera_file *icf,
+static int mx3_camera_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
return 0;
@@ -984,9 +985,9 @@ static int mx3_camera_reqbufs(struct soc_camera_file *icf,
static unsigned int mx3_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
- return videobuf_poll_stream(file, &icf->vb_vidq, pt);
+ return videobuf_poll_stream(file, &icd->vb_vidq, pt);
}
static int mx3_camera_querycap(struct soc_camera_host *ici,
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index b1dbcf1d2bcb..94ba698d0ad4 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -32,7 +32,6 @@
#include "tea6415c.h"
#include "tea6420.h"
-#define I2C_SAA5246A 0x11
#define I2C_SAA7111A 0x24
#define I2C_TDA9840 0x42
#define I2C_TEA6415C 0x43
@@ -186,21 +185,17 @@ static int mxb_probe(struct saa7146_dev *dev)
}
mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "saa7115", "saa7111", I2C_SAA7111A, NULL);
+ NULL, "saa7111", I2C_SAA7111A, NULL);
mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tea6420", "tea6420", I2C_TEA6420_1, NULL);
+ NULL, "tea6420", I2C_TEA6420_1, NULL);
mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tea6420", "tea6420", I2C_TEA6420_2, NULL);
+ NULL, "tea6420", I2C_TEA6420_2, NULL);
mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tea6415c", "tea6415c", I2C_TEA6415C, NULL);
+ NULL, "tea6415c", I2C_TEA6415C, NULL);
mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tda9840", "tda9840", I2C_TDA9840, NULL);
+ NULL, "tda9840", I2C_TDA9840, NULL);
mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "tuner", "tuner", I2C_TUNER, NULL);
- if (v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
- "saa5246a", "saa5246a", I2C_SAA5246A, NULL)) {
- printk(KERN_INFO "mxb: found teletext decoder\n");
- }
+ NULL, "tuner", I2C_TUNER, NULL);
/* check if all devices are present */
if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c ||
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 4ed51b1552e1..15f8793e325b 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1341,7 +1341,7 @@ static int omap_vout_open(struct file *file)
videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
&vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), vout);
+ sizeof(struct videobuf_buffer), vout, NULL);
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
return 0;
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
new file mode 100644
index 000000000000..7c30e62b50db
--- /dev/null
+++ b/drivers/media/video/omap1_camera.c
@@ -0,0 +1,1702 @@
+/*
+ * V4L2 SoC Camera driver for OMAP1 Camera Interface
+ *
+ * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Based on V4L2 Driver for i.MXL/i.MXL camera (CSI) host
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2009, Darius Augulis <augulis.darius@gmail.com>
+ *
+ * Based on PXA SoC camera driver
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Hardware specific bits initialy based on former work by Matt Callow
+ * drivers/media/video/omap/omap1510cam.c
+ * Copyright (C) 2006 Matt Callow
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <media/omap1_camera.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
+
+#include <plat/dma.h>
+
+
+#define DRIVER_NAME "omap1-camera"
+#define VERSION_CODE KERNEL_VERSION(0, 0, 1)
+
+
+/*
+ * ---------------------------------------------------------------------------
+ * OMAP1 Camera Interface registers
+ * ---------------------------------------------------------------------------
+ */
+
+#define REG_CTRLCLOCK 0x00
+#define REG_IT_STATUS 0x04
+#define REG_MODE 0x08
+#define REG_STATUS 0x0C
+#define REG_CAMDATA 0x10
+#define REG_GPIO 0x14
+#define REG_PEAK_COUNTER 0x18
+
+/* CTRLCLOCK bit shifts */
+#define LCLK_EN BIT(7)
+#define DPLL_EN BIT(6)
+#define MCLK_EN BIT(5)
+#define CAMEXCLK_EN BIT(4)
+#define POLCLK BIT(3)
+#define FOSCMOD_SHIFT 0
+#define FOSCMOD_MASK (0x7 << FOSCMOD_SHIFT)
+#define FOSCMOD_12MHz 0x0
+#define FOSCMOD_6MHz 0x2
+#define FOSCMOD_9_6MHz 0x4
+#define FOSCMOD_24MHz 0x5
+#define FOSCMOD_8MHz 0x6
+
+/* IT_STATUS bit shifts */
+#define DATA_TRANSFER BIT(5)
+#define FIFO_FULL BIT(4)
+#define H_DOWN BIT(3)
+#define H_UP BIT(2)
+#define V_DOWN BIT(1)
+#define V_UP BIT(0)
+
+/* MODE bit shifts */
+#define RAZ_FIFO BIT(18)
+#define EN_FIFO_FULL BIT(17)
+#define EN_NIRQ BIT(16)
+#define THRESHOLD_SHIFT 9
+#define THRESHOLD_MASK (0x7f << THRESHOLD_SHIFT)
+#define DMA BIT(8)
+#define EN_H_DOWN BIT(7)
+#define EN_H_UP BIT(6)
+#define EN_V_DOWN BIT(5)
+#define EN_V_UP BIT(4)
+#define ORDERCAMD BIT(3)
+
+#define IRQ_MASK (EN_V_UP | EN_V_DOWN | EN_H_UP | EN_H_DOWN | \
+ EN_NIRQ | EN_FIFO_FULL)
+
+/* STATUS bit shifts */
+#define HSTATUS BIT(1)
+#define VSTATUS BIT(0)
+
+/* GPIO bit shifts */
+#define CAM_RST BIT(0)
+
+/* end of OMAP1 Camera Interface registers */
+
+
+#define SOCAM_BUS_FLAGS (SOCAM_MASTER | \
+ SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | \
+ SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING | \
+ SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8)
+
+
+#define FIFO_SIZE ((THRESHOLD_MASK >> THRESHOLD_SHIFT) + 1)
+#define FIFO_SHIFT __fls(FIFO_SIZE)
+
+#define DMA_BURST_SHIFT (1 + OMAP_DMA_DATA_BURST_4)
+#define DMA_BURST_SIZE (1 << DMA_BURST_SHIFT)
+
+#define DMA_ELEMENT_SHIFT OMAP_DMA_DATA_TYPE_S32
+#define DMA_ELEMENT_SIZE (1 << DMA_ELEMENT_SHIFT)
+
+#define DMA_FRAME_SHIFT_CONTIG (FIFO_SHIFT - 1)
+#define DMA_FRAME_SHIFT_SG DMA_BURST_SHIFT
+
+#define DMA_FRAME_SHIFT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? \
+ DMA_FRAME_SHIFT_CONTIG : \
+ DMA_FRAME_SHIFT_SG)
+#define DMA_FRAME_SIZE(x) (1 << DMA_FRAME_SHIFT(x))
+#define DMA_SYNC OMAP_DMA_SYNC_FRAME
+#define THRESHOLD_LEVEL DMA_FRAME_SIZE
+
+
+#define MAX_VIDEO_MEM 4 /* arbitrary video memory limit in MB */
+
+
+/*
+ * Structures
+ */
+
+/* buffer for one video frame */
+struct omap1_cam_buf {
+ struct videobuf_buffer vb;
+ enum v4l2_mbus_pixelcode code;
+ int inwork;
+ struct scatterlist *sgbuf;
+ int sgcount;
+ int bytes_left;
+ enum videobuf_state result;
+};
+
+struct omap1_cam_dev {
+ struct soc_camera_host soc_host;
+ struct soc_camera_device *icd;
+ struct clk *clk;
+
+ unsigned int irq;
+ void __iomem *base;
+
+ int dma_ch;
+
+ struct omap1_cam_platform_data *pdata;
+ struct resource *res;
+ unsigned long pflags;
+ unsigned long camexclk;
+
+ struct list_head capture;
+
+ /* lock used to protect videobuf */
+ spinlock_t lock;
+
+ /* Pointers to DMA buffers */
+ struct omap1_cam_buf *active;
+ struct omap1_cam_buf *ready;
+
+ enum omap1_cam_vb_mode vb_mode;
+ int (*mmap_mapper)(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma);
+
+ u32 reg_cache[0];
+};
+
+
+static void cam_write(struct omap1_cam_dev *pcdev, u16 reg, u32 val)
+{
+ pcdev->reg_cache[reg / sizeof(u32)] = val;
+ __raw_writel(val, pcdev->base + reg);
+}
+
+static u32 cam_read(struct omap1_cam_dev *pcdev, u16 reg, bool from_cache)
+{
+ return !from_cache ? __raw_readl(pcdev->base + reg) :
+ pcdev->reg_cache[reg / sizeof(u32)];
+}
+
+#define CAM_READ(pcdev, reg) \
+ cam_read(pcdev, REG_##reg, false)
+#define CAM_WRITE(pcdev, reg, val) \
+ cam_write(pcdev, REG_##reg, val)
+#define CAM_READ_CACHE(pcdev, reg) \
+ cam_read(pcdev, REG_##reg, true)
+
+/*
+ * Videobuf operations
+ */
+static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ *size = bytes_per_line * icd->user_height;
+
+ if (!*count || *count < OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode))
+ *count = OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode);
+
+ if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
+
+ dev_dbg(icd->dev.parent,
+ "%s: count=%d, size=%d\n", __func__, *count, *size);
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ struct videobuf_buffer *vb = &buf->vb;
+
+ BUG_ON(in_interrupt());
+
+ videobuf_waiton(vb, 0, 0);
+
+ if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ videobuf_dma_contig_free(vq, vb);
+ } else {
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->dev.parent;
+ struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+ videobuf_dma_unmap(dev, dma);
+ videobuf_dma_free(dma);
+ }
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int omap1_videobuf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ int ret;
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ WARN_ON(!list_empty(&vb->queue));
+
+ BUG_ON(NULL == icd->current_fmt);
+
+ buf->inwork = 1;
+
+ if (buf->code != icd->current_fmt->code || vb->field != field ||
+ vb->width != icd->user_width ||
+ vb->height != icd->user_height) {
+ buf->code = icd->current_fmt->code;
+ vb->width = icd->user_width;
+ vb->height = icd->user_height;
+ vb->field = field;
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ vb->size = bytes_per_line * vb->height;
+
+ if (vb->baddr && vb->bsize < vb->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret)
+ goto fail;
+
+ vb->state = VIDEOBUF_PREPARED;
+ }
+ buf->inwork = 0;
+
+ return 0;
+fail:
+ free_buffer(vq, buf, pcdev->vb_mode);
+out:
+ buf->inwork = 0;
+ return ret;
+}
+
+static void set_dma_dest_params(int dma_ch, struct omap1_cam_buf *buf,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ dma_addr_t dma_addr;
+ unsigned int block_size;
+
+ if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ dma_addr = videobuf_to_dma_contig(&buf->vb);
+ block_size = buf->vb.size;
+ } else {
+ if (WARN_ON(!buf->sgbuf)) {
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ dma_addr = sg_dma_address(buf->sgbuf);
+ if (WARN_ON(!dma_addr)) {
+ buf->sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ block_size = sg_dma_len(buf->sgbuf);
+ if (WARN_ON(!block_size)) {
+ buf->sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ return;
+ }
+ if (unlikely(buf->bytes_left < block_size))
+ block_size = buf->bytes_left;
+ if (WARN_ON(dma_addr & (DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE - 1))) {
+ dma_addr = ALIGN(dma_addr, DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE);
+ block_size &= ~(DMA_FRAME_SIZE(vb_mode) *
+ DMA_ELEMENT_SIZE - 1);
+ }
+ buf->bytes_left -= block_size;
+ buf->sgcount++;
+ }
+
+ omap_set_dma_dest_params(dma_ch,
+ OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0);
+ omap_set_dma_transfer_params(dma_ch,
+ OMAP_DMA_DATA_TYPE_S32, DMA_FRAME_SIZE(vb_mode),
+ block_size >> (DMA_FRAME_SHIFT(vb_mode) + DMA_ELEMENT_SHIFT),
+ DMA_SYNC, 0, 0);
+}
+
+static struct omap1_cam_buf *prepare_next_vb(struct omap1_cam_dev *pcdev)
+{
+ struct omap1_cam_buf *buf;
+
+ /*
+ * If there is already a buffer pointed out by the pcdev->ready,
+ * (re)use it, otherwise try to fetch and configure a new one.
+ */
+ buf = pcdev->ready;
+ if (!buf) {
+ if (list_empty(&pcdev->capture))
+ return buf;
+ buf = list_entry(pcdev->capture.next,
+ struct omap1_cam_buf, vb.queue);
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ pcdev->ready = buf;
+ list_del_init(&buf->vb.queue);
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, we can safely enter next buffer parameters
+ * into the DMA programming register set after the DMA
+ * has already been activated on the previous buffer
+ */
+ set_dma_dest_params(pcdev->dma_ch, buf, pcdev->vb_mode);
+ } else {
+ /*
+ * In SG mode, the above is not safe since there are probably
+ * a bunch of sgbufs from previous sglist still pending.
+ * Instead, mark the sglist fresh for the upcoming
+ * try_next_sgbuf().
+ */
+ buf->sgbuf = NULL;
+ }
+
+ return buf;
+}
+
+static struct scatterlist *try_next_sgbuf(int dma_ch, struct omap1_cam_buf *buf)
+{
+ struct scatterlist *sgbuf;
+
+ if (likely(buf->sgbuf)) {
+ /* current sglist is active */
+ if (unlikely(!buf->bytes_left)) {
+ /* indicate sglist complete */
+ sgbuf = NULL;
+ } else {
+ /* process next sgbuf */
+ sgbuf = sg_next(buf->sgbuf);
+ if (WARN_ON(!sgbuf)) {
+ buf->result = VIDEOBUF_ERROR;
+ } else if (WARN_ON(!sg_dma_len(sgbuf))) {
+ sgbuf = NULL;
+ buf->result = VIDEOBUF_ERROR;
+ }
+ }
+ buf->sgbuf = sgbuf;
+ } else {
+ /* sglist is fresh, initialize it before using */
+ struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+
+ sgbuf = dma->sglist;
+ if (!(WARN_ON(!sgbuf))) {
+ buf->sgbuf = sgbuf;
+ buf->sgcount = 0;
+ buf->bytes_left = buf->vb.size;
+ buf->result = VIDEOBUF_DONE;
+ }
+ }
+ if (sgbuf)
+ /*
+ * Put our next sgbuf parameters (address, size)
+ * into the DMA programming register set.
+ */
+ set_dma_dest_params(dma_ch, buf, OMAP1_CAM_DMA_SG);
+
+ return sgbuf;
+}
+
+static void start_capture(struct omap1_cam_dev *pcdev)
+{
+ struct omap1_cam_buf *buf = pcdev->active;
+ u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ u32 mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN;
+
+ if (WARN_ON(!buf))
+ return;
+
+ /*
+ * Enable start of frame interrupt, which we will use for activating
+ * our end of frame watchdog when capture actually starts.
+ */
+ mode |= EN_V_UP;
+
+ if (unlikely(ctrlclock & LCLK_EN))
+ /* stop pixel clock before FIFO reset */
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+ /* reset FIFO */
+ CAM_WRITE(pcdev, MODE, mode | RAZ_FIFO);
+
+ omap_start_dma(pcdev->dma_ch);
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, it's a good moment for fetching next sgbuf
+ * from the current sglist and, if available, already putting
+ * its parameters into the DMA programming register set.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ /* (re)enable pixel clock */
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | LCLK_EN);
+ /* release FIFO reset */
+ CAM_WRITE(pcdev, MODE, mode);
+}
+
+static void suspend_capture(struct omap1_cam_dev *pcdev)
+{
+ u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+ omap_stop_dma(pcdev->dma_ch);
+}
+
+static void disable_capture(struct omap1_cam_dev *pcdev)
+{
+ u32 mode = CAM_READ_CACHE(pcdev, MODE);
+
+ CAM_WRITE(pcdev, MODE, mode & ~(IRQ_MASK | DMA));
+}
+
+static void omap1_videobuf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct soc_camera_device *icd = vq->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct omap1_cam_buf *buf;
+ u32 mode;
+
+ list_add_tail(&vb->queue, &pcdev->capture);
+ vb->state = VIDEOBUF_QUEUED;
+
+ if (pcdev->active) {
+ /*
+ * Capture in progress, so don't touch pcdev->ready even if
+ * empty. Since the transfer of the DMA programming register set
+ * content to the DMA working register set is done automatically
+ * by the DMA hardware, this can pretty well happen while we
+ * are keeping the lock here. Levae fetching it from the queue
+ * to be done when a next DMA interrupt occures instead.
+ */
+ return;
+ }
+
+ WARN_ON(pcdev->ready);
+
+ buf = prepare_next_vb(pcdev);
+ if (WARN_ON(!buf))
+ return;
+
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+
+ dev_dbg(icd->dev.parent,
+ "%s: capture not active, setup FIFO, start DMA\n", __func__);
+ mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK;
+ mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT;
+ CAM_WRITE(pcdev, MODE, mode | EN_FIFO_FULL | DMA);
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, the above prepare_next_vb() didn't actually
+ * put anything into the DMA programming register set,
+ * so we have to do it now, before activating DMA.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ start_capture(pcdev);
+}
+
+static void omap1_videobuf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct omap1_cam_buf *buf =
+ container_of(vb, struct omap1_cam_buf, vb);
+ struct soc_camera_device *icd = vq->priv_data;
+ struct device *dev = icd->dev.parent;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ switch (vb->state) {
+ case VIDEOBUF_DONE:
+ dev_dbg(dev, "%s (done)\n", __func__);
+ break;
+ case VIDEOBUF_ACTIVE:
+ dev_dbg(dev, "%s (active)\n", __func__);
+ break;
+ case VIDEOBUF_QUEUED:
+ dev_dbg(dev, "%s (queued)\n", __func__);
+ break;
+ case VIDEOBUF_PREPARED:
+ dev_dbg(dev, "%s (prepared)\n", __func__);
+ break;
+ default:
+ dev_dbg(dev, "%s (unknown %d)\n", __func__, vb->state);
+ break;
+ }
+
+ free_buffer(vq, buf, pcdev->vb_mode);
+}
+
+static void videobuf_done(struct omap1_cam_dev *pcdev,
+ enum videobuf_state result)
+{
+ struct omap1_cam_buf *buf = pcdev->active;
+ struct videobuf_buffer *vb;
+ struct device *dev = pcdev->icd->dev.parent;
+
+ if (WARN_ON(!buf)) {
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ return;
+ }
+
+ if (result == VIDEOBUF_ERROR)
+ suspend_capture(pcdev);
+
+ vb = &buf->vb;
+ if (waitqueue_active(&vb->done)) {
+ if (!pcdev->ready && result != VIDEOBUF_ERROR) {
+ /*
+ * No next buffer has been entered into the DMA
+ * programming register set on time (could be done only
+ * while the previous DMA interurpt was processed, not
+ * later), so the last DMA block, be it a whole buffer
+ * if in CONTIG or its last sgbuf if in SG mode, is
+ * about to be reused by the just autoreinitialized DMA
+ * engine, and overwritten with next frame data. Best we
+ * can do is stopping the capture as soon as possible,
+ * hopefully before the next frame start.
+ */
+ suspend_capture(pcdev);
+ }
+ vb->state = result;
+ do_gettimeofday(&vb->ts);
+ if (result != VIDEOBUF_ERROR)
+ vb->field_count++;
+ wake_up(&vb->done);
+
+ /* shift in next buffer */
+ buf = pcdev->ready;
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+
+ if (!buf) {
+ /*
+ * No next buffer was ready on time (see above), so
+ * indicate error condition to force capture restart or
+ * stop, depending on next buffer already queued or not.
+ */
+ result = VIDEOBUF_ERROR;
+ prepare_next_vb(pcdev);
+
+ buf = pcdev->ready;
+ pcdev->active = buf;
+ pcdev->ready = NULL;
+ }
+ } else if (pcdev->ready) {
+ /*
+ * In both CONTIG and SG mode, the DMA engine has possibly
+ * been already autoreinitialized with the preprogrammed
+ * pcdev->ready buffer. We can either accept this fact
+ * and just swap the buffers, or provoke an error condition
+ * and restart capture. The former seems less intrusive.
+ */
+ dev_dbg(dev, "%s: nobody waiting on videobuf, swap with next\n",
+ __func__);
+ pcdev->active = pcdev->ready;
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
+ /*
+ * In SG mode, we have to make sure that the buffer we
+ * are putting back into the pcdev->ready is marked
+ * fresh.
+ */
+ buf->sgbuf = NULL;
+ }
+ pcdev->ready = buf;
+
+ buf = pcdev->active;
+ } else {
+ /*
+ * No next buffer has been entered into
+ * the DMA programming register set on time.
+ */
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the DMA engine has already been
+ * reinitialized with the current buffer. Best we can do
+ * is not touching it.
+ */
+ dev_dbg(dev,
+ "%s: nobody waiting on videobuf, reuse it\n",
+ __func__);
+ } else {
+ /*
+ * In SG mode, the DMA engine has just been
+ * autoreinitialized with the last sgbuf from the
+ * current list. Restart capture in order to transfer
+ * next frame start into the first sgbuf, not the last
+ * one.
+ */
+ if (result != VIDEOBUF_ERROR) {
+ suspend_capture(pcdev);
+ result = VIDEOBUF_ERROR;
+ }
+ }
+ }
+
+ if (!buf) {
+ dev_dbg(dev, "%s: no more videobufs, stop capture\n", __func__);
+ disable_capture(pcdev);
+ return;
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the current buffer parameters had already
+ * been entered into the DMA programming register set while the
+ * buffer was fetched with prepare_next_vb(), they may have also
+ * been transfered into the runtime set and already active if
+ * the DMA still running.
+ */
+ } else {
+ /* In SG mode, extra steps are required */
+ if (result == VIDEOBUF_ERROR)
+ /* make sure we (re)use sglist from start on error */
+ buf->sgbuf = NULL;
+
+ /*
+ * In any case, enter the next sgbuf parameters into the DMA
+ * programming register set. They will be used either during
+ * nearest DMA autoreinitialization or, in case of an error,
+ * on DMA startup below.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ }
+
+ if (result == VIDEOBUF_ERROR) {
+ dev_dbg(dev, "%s: videobuf error; reset FIFO, restart DMA\n",
+ __func__);
+ start_capture(pcdev);
+ /*
+ * In SG mode, the above also resulted in the next sgbuf
+ * parameters being entered into the DMA programming register
+ * set, making them ready for next DMA autoreinitialization.
+ */
+ }
+
+ /*
+ * Finally, try fetching next buffer.
+ * In CONTIG mode, it will also enter it into the DMA programming
+ * register set, making it ready for next DMA autoreinitialization.
+ */
+ prepare_next_vb(pcdev);
+}
+
+static void dma_isr(int channel, unsigned short status, void *data)
+{
+ struct omap1_cam_dev *pcdev = data;
+ struct omap1_cam_buf *buf = pcdev->active;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ if (WARN_ON(!buf)) {
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ goto out;
+ }
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, assume we have just managed to collect the
+ * whole frame, hopefully before our end of frame watchdog is
+ * triggered. Then, all we have to do is disabling the watchdog
+ * for this frame, and calling videobuf_done() with success
+ * indicated.
+ */
+ CAM_WRITE(pcdev, MODE,
+ CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN);
+ videobuf_done(pcdev, VIDEOBUF_DONE);
+ } else {
+ /*
+ * In SG mode, we have to process every sgbuf from the current
+ * sglist, one after another.
+ */
+ if (buf->sgbuf) {
+ /*
+ * Current sglist not completed yet, try fetching next
+ * sgbuf, hopefully putting it into the DMA programming
+ * register set, making it ready for next DMA
+ * autoreinitialization.
+ */
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ if (buf->sgbuf)
+ goto out;
+
+ /*
+ * No more sgbufs left in the current sglist. This
+ * doesn't mean that the whole videobuffer is already
+ * complete, but only that the last sgbuf from the
+ * current sglist is about to be filled. It will be
+ * ready on next DMA interrupt, signalled with the
+ * buf->sgbuf set back to NULL.
+ */
+ if (buf->result != VIDEOBUF_ERROR) {
+ /*
+ * Video frame collected without errors so far,
+ * we can prepare for collecting a next one
+ * as soon as DMA gets autoreinitialized
+ * after the current (last) sgbuf is completed.
+ */
+ buf = prepare_next_vb(pcdev);
+ if (!buf)
+ goto out;
+
+ try_next_sgbuf(pcdev->dma_ch, buf);
+ goto out;
+ }
+ }
+ /* end of videobuf */
+ videobuf_done(pcdev, buf->result);
+ }
+
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+}
+
+static irqreturn_t cam_isr(int irq, void *data)
+{
+ struct omap1_cam_dev *pcdev = data;
+ struct device *dev = pcdev->icd->dev.parent;
+ struct omap1_cam_buf *buf = pcdev->active;
+ u32 it_status;
+ unsigned long flags;
+
+ it_status = CAM_READ(pcdev, IT_STATUS);
+ if (!it_status)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+
+ if (WARN_ON(!buf)) {
+ dev_warn(dev, "%s: unhandled camera interrupt, status == "
+ "%#x\n", __func__, it_status);
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+ goto out;
+ }
+
+ if (unlikely(it_status & FIFO_FULL)) {
+ dev_warn(dev, "%s: FIFO overflow\n", __func__);
+
+ } else if (it_status & V_DOWN) {
+ /* end of video frame watchdog */
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, the watchdog is disabled with
+ * successful DMA end of block interrupt, and reenabled
+ * on next frame start. If we get here, there is nothing
+ * to check, we must be out of sync.
+ */
+ } else {
+ if (buf->sgcount == 2) {
+ /*
+ * If exactly 2 sgbufs from the next sglist have
+ * been programmed into the DMA engine (the
+ * frist one already transfered into the DMA
+ * runtime register set, the second one still
+ * in the programming set), then we are in sync.
+ */
+ goto out;
+ }
+ }
+ dev_notice(dev, "%s: unexpected end of video frame\n",
+ __func__);
+
+ } else if (it_status & V_UP) {
+ u32 mode;
+
+ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
+ /*
+ * In CONTIG mode, we need this interrupt every frame
+ * in oredr to reenable our end of frame watchdog.
+ */
+ mode = CAM_READ_CACHE(pcdev, MODE);
+ } else {
+ /*
+ * In SG mode, the below enabled end of frame watchdog
+ * is kept on permanently, so we can turn this one shot
+ * setup off.
+ */
+ mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_UP;
+ }
+
+ if (!(mode & EN_V_DOWN)) {
+ /* (re)enable end of frame watchdog interrupt */
+ mode |= EN_V_DOWN;
+ }
+ CAM_WRITE(pcdev, MODE, mode);
+ goto out;
+
+ } else {
+ dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n",
+ __func__, it_status);
+ goto out;
+ }
+
+ videobuf_done(pcdev, VIDEOBUF_ERROR);
+out:
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static struct videobuf_queue_ops omap1_videobuf_ops = {
+ .buf_setup = omap1_videobuf_setup,
+ .buf_prepare = omap1_videobuf_prepare,
+ .buf_queue = omap1_videobuf_queue,
+ .buf_release = omap1_videobuf_release,
+};
+
+
+/*
+ * SOC Camera host operations
+ */
+
+static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset)
+{
+ /* apply/release camera sensor reset if requested by platform data */
+ if (pcdev->pflags & OMAP1_CAMERA_RST_HIGH)
+ CAM_WRITE(pcdev, GPIO, reset);
+ else if (pcdev->pflags & OMAP1_CAMERA_RST_LOW)
+ CAM_WRITE(pcdev, GPIO, !reset);
+}
+
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on OMAP1 camera sensor interface
+ */
+static int omap1_cam_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ u32 ctrlclock;
+
+ if (pcdev->icd)
+ return -EBUSY;
+
+ clk_enable(pcdev->clk);
+
+ /* setup sensor clock */
+ ctrlclock = CAM_READ(pcdev, CTRLCLOCK);
+ ctrlclock &= ~(CAMEXCLK_EN | MCLK_EN | DPLL_EN);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ ctrlclock &= ~FOSCMOD_MASK;
+ switch (pcdev->camexclk) {
+ case 6000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_6MHz;
+ break;
+ case 8000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_8MHz | DPLL_EN;
+ break;
+ case 9600000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_9_6MHz | DPLL_EN;
+ break;
+ case 12000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_12MHz;
+ break;
+ case 24000000:
+ ctrlclock |= CAMEXCLK_EN | FOSCMOD_24MHz | DPLL_EN;
+ default:
+ break;
+ }
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~DPLL_EN);
+
+ /* enable internal clock */
+ ctrlclock |= MCLK_EN;
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ sensor_reset(pcdev, false);
+
+ pcdev->icd = icd;
+
+ dev_dbg(icd->dev.parent, "OMAP1 Camera driver attached to camera %d\n",
+ icd->devnum);
+ return 0;
+}
+
+static void omap1_cam_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ u32 ctrlclock;
+
+ BUG_ON(icd != pcdev->icd);
+
+ suspend_capture(pcdev);
+ disable_capture(pcdev);
+
+ sensor_reset(pcdev, true);
+
+ /* disable and release system clocks */
+ ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ ctrlclock &= ~(MCLK_EN | DPLL_EN | CAMEXCLK_EN);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ ctrlclock = (ctrlclock & ~FOSCMOD_MASK) | FOSCMOD_12MHz;
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | MCLK_EN);
+
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~MCLK_EN);
+
+ clk_disable(pcdev->clk);
+
+ pcdev->icd = NULL;
+
+ dev_dbg(icd->dev.parent,
+ "OMAP1 Camera driver detached from camera %d\n", icd->devnum);
+}
+
+/* Duplicate standard formats based on host capability of byte swapping */
+static const struct soc_mbus_pixelfmt omap1_cam_formats[] = {
+ [V4L2_MBUS_FMT_UYVY8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_VYUY8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_YUYV8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_YVYU8_2X8] = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE] = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE] = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB565_2X8_BE] = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+ [V4L2_MBUS_FMT_RGB565_2X8_LE] = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+};
+
+static int omap1_cam_get_formats(struct soc_camera_device *icd,
+ unsigned int idx, struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->dev.parent;
+ int formats = 0, ret;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(dev, "%s: invalid format code #%d: %d\n", __func__,
+ idx, code);
+ return 0;
+ }
+
+ /* Check support for the requested bits-per-sample */
+ if (fmt->bits_per_sample != 8)
+ return 0;
+
+ switch (code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &omap1_cam_formats[code];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "%s: providing format %s "
+ "as byte swapped code #%d\n", __func__,
+ omap1_cam_formats[code].name, code);
+ }
+ default:
+ if (xlate)
+ dev_dbg(dev, "%s: providing format %s "
+ "in pass-through mode\n", __func__,
+ fmt->name);
+ }
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+static bool is_dma_aligned(s32 bytes_per_line, unsigned int height,
+ enum omap1_cam_vb_mode vb_mode)
+{
+ int size = bytes_per_line * height;
+
+ return IS_ALIGNED(bytes_per_line, DMA_ELEMENT_SIZE) &&
+ IS_ALIGNED(size, DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE);
+}
+
+static int dma_align(int *width, int *height,
+ const struct soc_mbus_pixelfmt *fmt,
+ enum omap1_cam_vb_mode vb_mode, bool enlarge)
+{
+ s32 bytes_per_line = soc_mbus_bytes_per_line(*width, fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ if (!is_dma_aligned(bytes_per_line, *height, vb_mode)) {
+ unsigned int pxalign = __fls(bytes_per_line / *width);
+ unsigned int salign = DMA_FRAME_SHIFT(vb_mode) +
+ DMA_ELEMENT_SHIFT - pxalign;
+ unsigned int incr = enlarge << salign;
+
+ v4l_bound_align_image(width, 1, *width + incr, 0,
+ height, 1, *height + incr, 0, salign);
+ return 0;
+ }
+ return 1;
+}
+
+#define subdev_call_with_sense(pcdev, dev, icd, sd, function, args...) \
+({ \
+ struct soc_camera_sense sense = { \
+ .master_clock = pcdev->camexclk, \
+ .pixel_clock_max = 0, \
+ }; \
+ int __ret; \
+ \
+ if (pcdev->pdata) \
+ sense.pixel_clock_max = pcdev->pdata->lclk_khz_max * 1000; \
+ icd->sense = &sense; \
+ __ret = v4l2_subdev_call(sd, video, function, ##args); \
+ icd->sense = NULL; \
+ \
+ if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { \
+ if (sense.pixel_clock > sense.pixel_clock_max) { \
+ dev_err(dev, "%s: pixel clock %lu " \
+ "set by the camera too high!\n", \
+ __func__, sense.pixel_clock); \
+ __ret = -EINVAL; \
+ } \
+ } \
+ __ret; \
+})
+
+static int set_mbus_format(struct omap1_cam_dev *pcdev, struct device *dev,
+ struct soc_camera_device *icd, struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf,
+ const struct soc_camera_format_xlate *xlate)
+{
+ s32 bytes_per_line;
+ int ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_mbus_fmt, mf);
+
+ if (ret < 0) {
+ dev_err(dev, "%s: s_mbus_fmt failed\n", __func__);
+ return ret;
+ }
+
+ if (mf->code != xlate->code) {
+ dev_err(dev, "%s: unexpected pixel code change\n", __func__);
+ return -EINVAL;
+ }
+
+ bytes_per_line = soc_mbus_bytes_per_line(mf->width, xlate->host_fmt);
+ if (bytes_per_line < 0) {
+ dev_err(dev, "%s: soc_mbus_bytes_per_line() failed\n",
+ __func__);
+ return bytes_per_line;
+ }
+
+ if (!is_dma_aligned(bytes_per_line, mf->height, pcdev->vb_mode)) {
+ dev_err(dev, "%s: resulting geometry %ux%u not DMA aligned\n",
+ __func__, mf->width, mf->height);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int omap1_cam_set_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *crop)
+{
+ struct v4l2_rect *rect = &crop->c;
+ const struct soc_camera_format_xlate *xlate = icd->current_fmt;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct device *dev = icd->dev.parent;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_crop, crop);
+ if (ret < 0) {
+ dev_warn(dev, "%s: failed to crop to %ux%u@%u:%u\n", __func__,
+ rect->width, rect->height, rect->left, rect->top);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0) {
+ dev_warn(dev, "%s: failed to fetch current format\n", __func__);
+ return ret;
+ }
+
+ ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+ __func__, mf.width, mf.height,
+ xlate->host_fmt->name);
+ return ret;
+ }
+
+ if (!ret) {
+ /* sensor returned geometry not DMA aligned, trying to fix */
+ ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to set format\n", __func__);
+ return ret;
+ }
+ }
+
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+
+ return 0;
+}
+
+static int omap1_cam_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct device *dev = icd->dev.parent;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(dev, "%s: format %#x not found\n", __func__,
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to align %ux%u %s with DMA\n",
+ __func__, pix->width, pix->height,
+ xlate->host_fmt->name);
+ return ret;
+ }
+
+ ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate);
+ if (ret < 0) {
+ dev_err(dev, "%s: failed to set format\n", __func__);
+ return ret;
+ }
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ return 0;
+}
+
+static int omap1_cam_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+ /* TODO: limit to mx1 hardware capabilities */
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->dev.parent, "Format %#x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ /* limit to sensor capabilities */
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ return 0;
+}
+
+static bool sg_mode;
+
+/*
+ * Local mmap_mapper wrapper,
+ * used for detecting videobuf-dma-contig buffer allocation failures
+ * and switching to videobuf-dma-sg automatically for future attempts.
+ */
+static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct soc_camera_device *icd = q->priv_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ int ret;
+
+ ret = pcdev->mmap_mapper(q, buf, vma);
+
+ if (ret == -ENOMEM)
+ sg_mode = true;
+
+ return ret;
+}
+
+static void omap1_cam_init_videobuf(struct videobuf_queue *q,
+ struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+
+ if (!sg_mode)
+ videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
+ icd->dev.parent, &pcdev->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct omap1_cam_buf), icd);
+ else
+ videobuf_queue_sg_init(q, &omap1_videobuf_ops,
+ icd->dev.parent, &pcdev->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct omap1_cam_buf), icd);
+
+ /* use videobuf mode (auto)selected with the module parameter */
+ pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG;
+
+ /*
+ * Ensure we substitute the videobuf-dma-contig version of the
+ * mmap_mapper() callback with our own wrapper, used for switching
+ * automatically to videobuf-dma-sg on buffer allocation failure.
+ */
+ if (!sg_mode && q->int_ops->mmap_mapper != omap1_cam_mmap_mapper) {
+ pcdev->mmap_mapper = q->int_ops->mmap_mapper;
+ q->int_ops->mmap_mapper = omap1_cam_mmap_mapper;
+ }
+}
+
+static int omap1_cam_reqbufs(struct soc_camera_file *icf,
+ struct v4l2_requestbuffers *p)
+{
+ int i;
+
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
+ * check whether .prepare is ever called on a linked buffer, or whether
+ * a dma IRQ can occur for an in-work or unlinked buffer. Until now
+ * it hadn't triggered
+ */
+ for (i = 0; i < p->count; i++) {
+ struct omap1_cam_buf *buf = container_of(icf->vb_vidq.bufs[i],
+ struct omap1_cam_buf, vb);
+ buf->inwork = 0;
+ INIT_LIST_HEAD(&buf->vb.queue);
+ }
+
+ return 0;
+}
+
+static int omap1_cam_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ /* cap->name is set by the friendly caller:-> */
+ strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
+ cap->version = VERSION_CODE;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
+ __u32 pixfmt)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct omap1_cam_dev *pcdev = ici->priv;
+ struct device *dev = icd->dev.parent;
+ const struct soc_camera_format_xlate *xlate;
+ const struct soc_mbus_pixelfmt *fmt;
+ unsigned long camera_flags, common_flags;
+ u32 ctrlclock, mode;
+ int ret;
+
+ camera_flags = icd->ops->query_bus_param(icd);
+
+ common_flags = soc_camera_bus_param_compatible(camera_flags,
+ SOCAM_BUS_FLAGS);
+ if (!common_flags)
+ return -EINVAL;
+
+ /* Make choices, possibly based on platform configuration */
+ if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
+ (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if (!pcdev->pdata ||
+ pcdev->pdata->flags & OMAP1_CAMERA_LCLK_RISING)
+ common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ else
+ common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ }
+
+ ret = icd->ops->set_bus_param(icd, common_flags);
+ if (ret < 0)
+ return ret;
+
+ ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK);
+ if (ctrlclock & LCLK_EN)
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+ if (common_flags & SOCAM_PCLK_SAMPLE_RISING) {
+ dev_dbg(dev, "CTRLCLOCK_REG |= POLCLK\n");
+ ctrlclock |= POLCLK;
+ } else {
+ dev_dbg(dev, "CTRLCLOCK_REG &= ~POLCLK\n");
+ ctrlclock &= ~POLCLK;
+ }
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN);
+
+ if (ctrlclock & LCLK_EN)
+ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
+
+ /* select bus endianess */
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ fmt = xlate->host_fmt;
+
+ mode = CAM_READ(pcdev, MODE) & ~(RAZ_FIFO | IRQ_MASK | DMA);
+ if (fmt->order == SOC_MBUS_ORDER_LE) {
+ dev_dbg(dev, "MODE_REG &= ~ORDERCAMD\n");
+ CAM_WRITE(pcdev, MODE, mode & ~ORDERCAMD);
+ } else {
+ dev_dbg(dev, "MODE_REG |= ORDERCAMD\n");
+ CAM_WRITE(pcdev, MODE, mode | ORDERCAMD);
+ }
+
+ return 0;
+}
+
+static unsigned int omap1_cam_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_file *icf = file->private_data;
+ struct omap1_cam_buf *buf;
+
+ buf = list_entry(icf->vb_vidq.stream.next, struct omap1_cam_buf,
+ vb.stream);
+
+ poll_wait(file, &buf->vb.done, pt);
+
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static struct soc_camera_host_ops omap1_host_ops = {
+ .owner = THIS_MODULE,
+ .add = omap1_cam_add_device,
+ .remove = omap1_cam_remove_device,
+ .get_formats = omap1_cam_get_formats,
+ .set_crop = omap1_cam_set_crop,
+ .set_fmt = omap1_cam_set_fmt,
+ .try_fmt = omap1_cam_try_fmt,
+ .init_videobuf = omap1_cam_init_videobuf,
+ .reqbufs = omap1_cam_reqbufs,
+ .querycap = omap1_cam_querycap,
+ .set_bus_param = omap1_cam_set_bus_param,
+ .poll = omap1_cam_poll,
+};
+
+static int __init omap1_cam_probe(struct platform_device *pdev)
+{
+ struct omap1_cam_dev *pcdev;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int irq;
+ int err = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || (int)irq <= 0) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ clk = clk_get(&pdev->dev, "armper_ck");
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ goto exit;
+ }
+
+ pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
+ if (!pcdev) {
+ dev_err(&pdev->dev, "Could not allocate pcdev\n");
+ err = -ENOMEM;
+ goto exit_put_clk;
+ }
+
+ pcdev->res = res;
+ pcdev->clk = clk;
+
+ pcdev->pdata = pdev->dev.platform_data;
+ pcdev->pflags = pcdev->pdata->flags;
+
+ if (pcdev->pdata)
+ pcdev->camexclk = pcdev->pdata->camexclk_khz * 1000;
+
+ switch (pcdev->camexclk) {
+ case 6000000:
+ case 8000000:
+ case 9600000:
+ case 12000000:
+ case 24000000:
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "Incorrect sensor clock frequency %ld kHz, "
+ "should be one of 0, 6, 8, 9.6, 12 or 24 MHz, "
+ "please correct your platform data\n",
+ pcdev->pdata->camexclk_khz);
+ pcdev->camexclk = 0;
+ case 0:
+ dev_info(&pdev->dev,
+ "Not providing sensor clock\n");
+ }
+
+ INIT_LIST_HEAD(&pcdev->capture);
+ spin_lock_init(&pcdev->lock);
+
+ /*
+ * Request the region.
+ */
+ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
+ err = -EBUSY;
+ goto exit_kfree;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ pcdev->irq = irq;
+ pcdev->base = base;
+
+ sensor_reset(pcdev, true);
+
+ err = omap_request_dma(OMAP_DMA_CAMERA_IF_RX, DRIVER_NAME,
+ dma_isr, (void *)pcdev, &pcdev->dma_ch);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
+ err = -EBUSY;
+ goto exit_iounmap;
+ }
+ dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
+
+ /* preconfigure DMA */
+ omap_set_dma_src_params(pcdev->dma_ch, OMAP_DMA_PORT_TIPB,
+ OMAP_DMA_AMODE_CONSTANT, res->start + REG_CAMDATA,
+ 0, 0);
+ omap_set_dma_dest_burst_mode(pcdev->dma_ch, OMAP_DMA_DATA_BURST_4);
+ /* setup DMA autoinitialization */
+ omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
+
+ err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Camera interrupt register failed\n");
+ goto exit_free_dma;
+ }
+
+ pcdev->soc_host.drv_name = DRIVER_NAME;
+ pcdev->soc_host.ops = &omap1_host_ops;
+ pcdev->soc_host.priv = pcdev;
+ pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
+ pcdev->soc_host.nr = pdev->id;
+
+ err = soc_camera_host_register(&pcdev->soc_host);
+ if (err)
+ goto exit_free_irq;
+
+ dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
+
+ return 0;
+
+exit_free_irq:
+ free_irq(pcdev->irq, pcdev);
+exit_free_dma:
+ omap_free_dma(pcdev->dma_ch);
+exit_iounmap:
+ iounmap(base);
+exit_release:
+ release_mem_region(res->start, resource_size(res));
+exit_kfree:
+ kfree(pcdev);
+exit_put_clk:
+ clk_put(clk);
+exit:
+ return err;
+}
+
+static int __exit omap1_cam_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct omap1_cam_dev *pcdev = container_of(soc_host,
+ struct omap1_cam_dev, soc_host);
+ struct resource *res;
+
+ free_irq(pcdev->irq, pcdev);
+
+ omap_free_dma(pcdev->dma_ch);
+
+ soc_camera_host_unregister(soc_host);
+
+ iounmap(pcdev->base);
+
+ res = pcdev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(pcdev);
+
+ clk_put(pcdev->clk);
+
+ dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
+
+ return 0;
+}
+
+static struct platform_driver omap1_cam_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = omap1_cam_probe,
+ .remove = __exit_p(omap1_cam_remove),
+};
+
+static int __init omap1_cam_init(void)
+{
+ return platform_driver_register(&omap1_cam_driver);
+}
+module_init(omap1_cam_init);
+
+static void __exit omap1_cam_exit(void)
+{
+ platform_driver_unregister(&omap1_cam_driver);
+}
+module_exit(omap1_cam_exit);
+
+module_param(sg_mode, bool, 0644);
+MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
+
+MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
+MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 926a5aa6f7f8..378b094aff16 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -420,7 +420,7 @@ static void omap24xxcam_vbq_release(struct videobuf_queue *vbq,
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
/* wait for buffer, especially to get out of the sgdma queue */
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vbq, vb, 0, 0);
if (vb->memory == V4L2_MEMORY_MMAP) {
dma_unmap_sg(vbq->dev, dma->sglist, dma->sglen,
dma->direction);
@@ -1491,7 +1491,7 @@ static int omap24xxcam_open(struct file *file)
videobuf_queue_sg_init(&fh->vbq, &omap24xxcam_vbq_ops, NULL,
&fh->vbq_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), fh);
+ sizeof(struct videobuf_buffer), fh, NULL);
return 0;
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
new file mode 100644
index 000000000000..b7cfeab0948c
--- /dev/null
+++ b/drivers/media/video/ov6650.c
@@ -0,0 +1,1225 @@
+/*
+ * V4L2 SoC Camera driver for OmniVision OV6650 Camera Sensor
+ *
+ * Copyright (C) 2010 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Based on OmniVision OV96xx Camera Driver
+ * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on ov772x camera driver:
+ * Copyright (C) 2008 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on ov7670 and soc_camera_platform driver,
+ * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
+ * Copyright (C) 2008 Magnus Damm
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * Hardware specific bits initialy based on former work by Matt Callow
+ * drivers/media/video/omap/sensor_ov6650.c
+ * Copyright (C) 2006 Matt Callow
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <media/soc_camera.h>
+#include <media/v4l2-chip-ident.h>
+
+
+/* Register definitions */
+#define REG_GAIN 0x00 /* range 00 - 3F */
+#define REG_BLUE 0x01
+#define REG_RED 0x02
+#define REG_SAT 0x03 /* [7:4] saturation [0:3] reserved */
+#define REG_HUE 0x04 /* [7:6] rsrvd [5] hue en [4:0] hue */
+
+#define REG_BRT 0x06
+
+#define REG_PIDH 0x0a
+#define REG_PIDL 0x0b
+
+#define REG_AECH 0x10
+#define REG_CLKRC 0x11 /* Data Format and Internal Clock */
+ /* [7:6] Input system clock (MHz)*/
+ /* 00=8, 01=12, 10=16, 11=24 */
+ /* [5:0]: Internal Clock Pre-Scaler */
+#define REG_COMA 0x12 /* [7] Reset */
+#define REG_COMB 0x13
+#define REG_COMC 0x14
+#define REG_COMD 0x15
+#define REG_COML 0x16
+#define REG_HSTRT 0x17
+#define REG_HSTOP 0x18
+#define REG_VSTRT 0x19
+#define REG_VSTOP 0x1a
+#define REG_PSHFT 0x1b
+#define REG_MIDH 0x1c
+#define REG_MIDL 0x1d
+#define REG_HSYNS 0x1e
+#define REG_HSYNE 0x1f
+#define REG_COME 0x20
+#define REG_YOFF 0x21
+#define REG_UOFF 0x22
+#define REG_VOFF 0x23
+#define REG_AEW 0x24
+#define REG_AEB 0x25
+#define REG_COMF 0x26
+#define REG_COMG 0x27
+#define REG_COMH 0x28
+#define REG_COMI 0x29
+
+#define REG_FRARL 0x2b
+#define REG_COMJ 0x2c
+#define REG_COMK 0x2d
+#define REG_AVGY 0x2e
+#define REG_REF0 0x2f
+#define REG_REF1 0x30
+#define REG_REF2 0x31
+#define REG_FRAJH 0x32
+#define REG_FRAJL 0x33
+#define REG_FACT 0x34
+#define REG_L1AEC 0x35
+#define REG_AVGU 0x36
+#define REG_AVGV 0x37
+
+#define REG_SPCB 0x60
+#define REG_SPCC 0x61
+#define REG_GAM1 0x62
+#define REG_GAM2 0x63
+#define REG_GAM3 0x64
+#define REG_SPCD 0x65
+
+#define REG_SPCE 0x68
+#define REG_ADCL 0x69
+
+#define REG_RMCO 0x6c
+#define REG_GMCO 0x6d
+#define REG_BMCO 0x6e
+
+
+/* Register bits, values, etc. */
+#define OV6650_PIDH 0x66 /* high byte of product ID number */
+#define OV6650_PIDL 0x50 /* low byte of product ID number */
+#define OV6650_MIDH 0x7F /* high byte of mfg ID */
+#define OV6650_MIDL 0xA2 /* low byte of mfg ID */
+
+#define DEF_GAIN 0x00
+#define DEF_BLUE 0x80
+#define DEF_RED 0x80
+
+#define SAT_SHIFT 4
+#define SAT_MASK (0xf << SAT_SHIFT)
+#define SET_SAT(x) (((x) << SAT_SHIFT) & SAT_MASK)
+
+#define HUE_EN BIT(5)
+#define HUE_MASK 0x1f
+#define DEF_HUE 0x10
+#define SET_HUE(x) (HUE_EN | ((x) & HUE_MASK))
+
+#define DEF_AECH 0x4D
+
+#define CLKRC_6MHz 0x00
+#define CLKRC_12MHz 0x40
+#define CLKRC_16MHz 0x80
+#define CLKRC_24MHz 0xc0
+#define CLKRC_DIV_MASK 0x3f
+#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
+
+#define COMA_RESET BIT(7)
+#define COMA_QCIF BIT(5)
+#define COMA_RAW_RGB BIT(4)
+#define COMA_RGB BIT(3)
+#define COMA_BW BIT(2)
+#define COMA_WORD_SWAP BIT(1)
+#define COMA_BYTE_SWAP BIT(0)
+#define DEF_COMA 0x00
+
+#define COMB_FLIP_V BIT(7)
+#define COMB_FLIP_H BIT(5)
+#define COMB_BAND_FILTER BIT(4)
+#define COMB_AWB BIT(2)
+#define COMB_AGC BIT(1)
+#define COMB_AEC BIT(0)
+#define DEF_COMB 0x5f
+
+#define COML_ONE_CHANNEL BIT(7)
+
+#define DEF_HSTRT 0x24
+#define DEF_HSTOP 0xd4
+#define DEF_VSTRT 0x04
+#define DEF_VSTOP 0x94
+
+#define COMF_HREF_LOW BIT(4)
+
+#define COMJ_PCLK_RISING BIT(4)
+#define COMJ_VSYNC_HIGH BIT(0)
+
+/* supported resolutions */
+#define W_QCIF (DEF_HSTOP - DEF_HSTRT)
+#define W_CIF (W_QCIF << 1)
+#define H_QCIF (DEF_VSTOP - DEF_VSTRT)
+#define H_CIF (H_QCIF << 1)
+
+#define FRAME_RATE_MAX 30
+
+
+struct ov6650_reg {
+ u8 reg;
+ u8 val;
+};
+
+struct ov6650 {
+ struct v4l2_subdev subdev;
+
+ int gain;
+ int blue;
+ int red;
+ int saturation;
+ int hue;
+ int brightness;
+ int exposure;
+ int gamma;
+ int aec;
+ bool vflip;
+ bool hflip;
+ bool awb;
+ bool agc;
+ bool half_scale; /* scale down output by 2 */
+ struct v4l2_rect rect; /* sensor cropping window */
+ unsigned long pclk_limit; /* from host */
+ unsigned long pclk_max; /* from resolution and format */
+ struct v4l2_fract tpf; /* as requested with s_parm */
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+
+static enum v4l2_mbus_pixelcode ov6650_codes[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_YVYU8_2X8,
+ V4L2_MBUS_FMT_VYUY8_2X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_GREY8_1X8,
+};
+
+static const struct v4l2_queryctrl ov6650_controls[] = {
+ {
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "AGC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 0x3f,
+ .step = 1,
+ .default_value = DEF_GAIN,
+ },
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "AWB",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = DEF_BLUE,
+ },
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = DEF_RED,
+ },
+ {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 0xf,
+ .step = 1,
+ .default_value = 0x8,
+ },
+ {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = 0,
+ .maximum = HUE_MASK,
+ .step = 1,
+ .default_value = DEF_HUE,
+ },
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x80,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AEC",
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = DEF_AECH,
+ },
+ {
+ .id = V4L2_CID_GAMMA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gamma",
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x12,
+ },
+ {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip Vertically",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip Horizontally",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+
+/* read a register */
+static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
+{
+ int ret;
+ u8 data = reg;
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &data,
+ };
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0)
+ goto err;
+
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0)
+ goto err;
+
+ *val = data;
+ return 0;
+
+err:
+ dev_err(&client->dev, "Failed reading register 0x%02x!\n", reg);
+ return ret;
+}
+
+/* write a register */
+static int ov6650_reg_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret;
+ unsigned char data[2] = { reg, val };
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = data,
+ };
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ udelay(100);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%02x!\n", reg);
+ return ret;
+ }
+ return 0;
+}
+
+
+/* Read a register, alter its bits, write it back */
+static int ov6650_reg_rmw(struct i2c_client *client, u8 reg, u8 set, u8 mask)
+{
+ u8 val;
+ int ret;
+
+ ret = ov6650_reg_read(client, reg, &val);
+ if (ret) {
+ dev_err(&client->dev,
+ "[Read]-Modify-Write of register 0x%02x failed!\n",
+ reg);
+ return ret;
+ }
+
+ val &= ~mask;
+ val |= set;
+
+ ret = ov6650_reg_write(client, reg, val);
+ if (ret)
+ dev_err(&client->dev,
+ "Read-Modify-[Write] of register 0x%02x failed!\n",
+ reg);
+
+ return ret;
+}
+
+static struct ov6650 *to_ov6650(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ov6650, subdev);
+}
+
+/* Start/Stop streaming from the device */
+static int ov6650_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+/* Alter bus settings on camera side */
+static int ov6650_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+ struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
+ int ret;
+
+ flags = soc_camera_apply_sensor_flags(icl, flags);
+
+ if (flags & SOCAM_PCLK_SAMPLE_RISING)
+ ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
+ if (ret)
+ return ret;
+
+ if (flags & SOCAM_HSYNC_ACTIVE_LOW)
+ ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
+ if (ret)
+ return ret;
+
+ if (flags & SOCAM_VSYNC_ACTIVE_HIGH)
+ ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
+ else
+ ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
+
+ return ret;
+}
+
+/* Request bus settings on camera side */
+static unsigned long ov6650_query_bus_param(struct soc_camera_device *icd)
+{
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+ unsigned long flags = SOCAM_MASTER |
+ SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
+ SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW |
+ SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW |
+ SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
+
+ return soc_camera_apply_sensor_flags(icl, flags);
+}
+
+/* Get status of additional camera capabilities */
+static int ov6650_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ uint8_t reg;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ ctrl->value = priv->agc;
+ break;
+ case V4L2_CID_GAIN:
+ if (priv->agc) {
+ ret = ov6650_reg_read(client, REG_GAIN, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->gain;
+ }
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrl->value = priv->awb;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ if (priv->awb) {
+ ret = ov6650_reg_read(client, REG_BLUE, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->blue;
+ }
+ break;
+ case V4L2_CID_RED_BALANCE:
+ if (priv->awb) {
+ ret = ov6650_reg_read(client, REG_RED, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->red;
+ }
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = priv->saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = priv->hue;
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = priv->brightness;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ctrl->value = priv->aec;
+ break;
+ case V4L2_CID_EXPOSURE:
+ if (priv->aec) {
+ ret = ov6650_reg_read(client, REG_AECH, &reg);
+ ctrl->value = reg;
+ } else {
+ ctrl->value = priv->exposure;
+ }
+ break;
+ case V4L2_CID_GAMMA:
+ ctrl->value = priv->gamma;
+ break;
+ case V4L2_CID_VFLIP:
+ ctrl->value = priv->vflip;
+ break;
+ case V4L2_CID_HFLIP:
+ ctrl->value = priv->hflip;
+ break;
+ }
+ return ret;
+}
+
+/* Set status of additional camera capabilities */
+static int ov6650_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_AGC : 0, COMB_AGC);
+ if (!ret)
+ priv->agc = ctrl->value;
+ break;
+ case V4L2_CID_GAIN:
+ ret = ov6650_reg_write(client, REG_GAIN, ctrl->value);
+ if (!ret)
+ priv->gain = ctrl->value;
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_AWB : 0, COMB_AWB);
+ if (!ret)
+ priv->awb = ctrl->value;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ret = ov6650_reg_write(client, REG_BLUE, ctrl->value);
+ if (!ret)
+ priv->blue = ctrl->value;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ret = ov6650_reg_write(client, REG_RED, ctrl->value);
+ if (!ret)
+ priv->red = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ ret = ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->value),
+ SAT_MASK);
+ if (!ret)
+ priv->saturation = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ ret = ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->value),
+ HUE_MASK);
+ if (!ret)
+ priv->hue = ctrl->value;
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ ret = ov6650_reg_write(client, REG_BRT, ctrl->value);
+ if (!ret)
+ priv->brightness = ctrl->value;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ switch (ctrl->value) {
+ case V4L2_EXPOSURE_AUTO:
+ ret = ov6650_reg_rmw(client, REG_COMB, COMB_AEC, 0);
+ break;
+ default:
+ ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_AEC);
+ break;
+ }
+ if (!ret)
+ priv->aec = ctrl->value;
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov6650_reg_write(client, REG_AECH, ctrl->value);
+ if (!ret)
+ priv->exposure = ctrl->value;
+ break;
+ case V4L2_CID_GAMMA:
+ ret = ov6650_reg_write(client, REG_GAM1, ctrl->value);
+ if (!ret)
+ priv->gamma = ctrl->value;
+ break;
+ case V4L2_CID_VFLIP:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_FLIP_V : 0, COMB_FLIP_V);
+ if (!ret)
+ priv->vflip = ctrl->value;
+ break;
+ case V4L2_CID_HFLIP:
+ ret = ov6650_reg_rmw(client, REG_COMB,
+ ctrl->value ? COMB_FLIP_H : 0, COMB_FLIP_H);
+ if (!ret)
+ priv->hflip = ctrl->value;
+ break;
+ }
+
+ return ret;
+}
+
+/* Get chip identification */
+static int ov6650_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ id->ident = V4L2_IDENT_OV6650;
+ id->revision = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov6650_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 val;
+
+ if (reg->reg & ~0xff)
+ return -EINVAL;
+
+ reg->size = 1;
+
+ ret = ov6650_reg_read(client, reg->reg, &val);
+ if (!ret)
+ reg->val = (__u64)val;
+
+ return ret;
+}
+
+static int ov6650_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg & ~0xff || reg->val & ~0xff)
+ return -EINVAL;
+
+ return ov6650_reg_write(client, reg->reg, reg->val);
+}
+#endif
+
+static int ov6650_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->c = priv->rect;
+
+ return 0;
+}
+
+static int ov6650_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_rect *rect = &a->c;
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ rect->left = ALIGN(rect->left, 2);
+ rect->width = ALIGN(rect->width, 2);
+ rect->top = ALIGN(rect->top, 2);
+ rect->height = ALIGN(rect->height, 2);
+ soc_camera_limit_side(&rect->left, &rect->width,
+ DEF_HSTRT << 1, 2, W_CIF);
+ soc_camera_limit_side(&rect->top, &rect->height,
+ DEF_VSTRT << 1, 2, H_CIF);
+
+ ret = ov6650_reg_write(client, REG_HSTRT, rect->left >> 1);
+ if (!ret) {
+ priv->rect.left = rect->left;
+ ret = ov6650_reg_write(client, REG_HSTOP,
+ (rect->left + rect->width) >> 1);
+ }
+ if (!ret) {
+ priv->rect.width = rect->width;
+ ret = ov6650_reg_write(client, REG_VSTRT, rect->top >> 1);
+ }
+ if (!ret) {
+ priv->rect.top = rect->top;
+ ret = ov6650_reg_write(client, REG_VSTOP,
+ (rect->top + rect->height) >> 1);
+ }
+ if (!ret)
+ priv->rect.height = rect->height;
+
+ return ret;
+}
+
+static int ov6650_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ a->bounds.left = DEF_HSTRT << 1;
+ a->bounds.top = DEF_VSTRT << 1;
+ a->bounds.width = W_CIF;
+ a->bounds.height = H_CIF;
+ a->defrect = a->bounds;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int ov6650_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ mf->colorspace = priv->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
+{
+ return (width > rect->width >> 1 || height > rect->height >> 1);
+}
+
+static u8 to_clkrc(struct v4l2_fract *timeperframe,
+ unsigned long pclk_limit, unsigned long pclk_max)
+{
+ unsigned long pclk;
+
+ if (timeperframe->numerator && timeperframe->denominator)
+ pclk = pclk_max * timeperframe->denominator /
+ (FRAME_RATE_MAX * timeperframe->numerator);
+ else
+ pclk = pclk_max;
+
+ if (pclk_limit && pclk_limit < pclk)
+ pclk = pclk_limit;
+
+ return (pclk_max - 1) / pclk;
+}
+
+/* set the format we will capture in */
+static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_sense *sense = icd->sense;
+ struct ov6650 *priv = to_ov6650(client);
+ bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
+ struct v4l2_crop a = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .c = {
+ .left = priv->rect.left + (priv->rect.width >> 1) -
+ (mf->width >> (1 - half_scale)),
+ .top = priv->rect.top + (priv->rect.height >> 1) -
+ (mf->height >> (1 - half_scale)),
+ .width = mf->width << half_scale,
+ .height = mf->height << half_scale,
+ },
+ };
+ enum v4l2_mbus_pixelcode code = mf->code;
+ unsigned long mclk, pclk;
+ u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
+ int ret;
+
+ /* select color matrix configuration for given color encoding */
+ switch (code) {
+ case V4L2_MBUS_FMT_GREY8_1X8:
+ dev_dbg(&client->dev, "pixel format GREY8_1X8\n");
+ coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP;
+ coma_set |= COMA_BW;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n");
+ coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP;
+ coma_set |= COMA_WORD_SWAP;
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n");
+ coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP |
+ COMA_BYTE_SWAP;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n");
+ if (half_scale) {
+ coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
+ coma_set |= COMA_BYTE_SWAP;
+ } else {
+ coma_mask |= COMA_RGB | COMA_BW;
+ coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
+ }
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n");
+ if (half_scale) {
+ coma_mask |= COMA_RGB | COMA_BW;
+ coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
+ } else {
+ coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
+ coma_set |= COMA_BYTE_SWAP;
+ }
+ break;
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n");
+ coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
+ coma_set |= COMA_RAW_RGB | COMA_RGB;
+ break;
+ case 0:
+ break;
+ default:
+ dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
+ return -EINVAL;
+ }
+ priv->code = code;
+
+ if (code == V4L2_MBUS_FMT_GREY8_1X8 ||
+ code == V4L2_MBUS_FMT_SBGGR8_1X8) {
+ coml_mask = COML_ONE_CHANNEL;
+ coml_set = 0;
+ priv->pclk_max = 4000000;
+ } else {
+ coml_mask = 0;
+ coml_set = COML_ONE_CHANNEL;
+ priv->pclk_max = 8000000;
+ }
+
+ if (code == V4L2_MBUS_FMT_SBGGR8_1X8)
+ priv->colorspace = V4L2_COLORSPACE_SRGB;
+ else if (code != 0)
+ priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ if (half_scale) {
+ dev_dbg(&client->dev, "max resolution: QCIF\n");
+ coma_set |= COMA_QCIF;
+ priv->pclk_max /= 2;
+ } else {
+ dev_dbg(&client->dev, "max resolution: CIF\n");
+ coma_mask |= COMA_QCIF;
+ }
+ priv->half_scale = half_scale;
+
+ if (sense) {
+ if (sense->master_clock == 8000000) {
+ dev_dbg(&client->dev, "8MHz input clock\n");
+ clkrc = CLKRC_6MHz;
+ } else if (sense->master_clock == 12000000) {
+ dev_dbg(&client->dev, "12MHz input clock\n");
+ clkrc = CLKRC_12MHz;
+ } else if (sense->master_clock == 16000000) {
+ dev_dbg(&client->dev, "16MHz input clock\n");
+ clkrc = CLKRC_16MHz;
+ } else if (sense->master_clock == 24000000) {
+ dev_dbg(&client->dev, "24MHz input clock\n");
+ clkrc = CLKRC_24MHz;
+ } else {
+ dev_err(&client->dev,
+ "unspported input clock, check platform data\n");
+ return -EINVAL;
+ }
+ mclk = sense->master_clock;
+ priv->pclk_limit = sense->pixel_clock_max;
+ } else {
+ clkrc = CLKRC_24MHz;
+ mclk = 24000000;
+ priv->pclk_limit = 0;
+ dev_dbg(&client->dev, "using default 24MHz input clock\n");
+ }
+
+ clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
+
+ pclk = priv->pclk_max / GET_CLKRC_DIV(clkrc);
+ dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
+ mclk / pclk, 10 * mclk % pclk / pclk);
+
+ ret = ov6650_s_crop(sd, &a);
+ if (!ret)
+ ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
+ if (!ret)
+ ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
+ if (!ret)
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
+
+ if (!ret) {
+ mf->colorspace = priv->colorspace;
+ mf->width = priv->rect.width >> half_scale;
+ mf->height = priv->rect.height >> half_scale;
+ }
+
+ return ret;
+}
+
+static int ov6650_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+
+ if (is_unscaled_ok(mf->width, mf->height, &priv->rect))
+ v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
+ &mf->height, 2, H_CIF, 1, 0);
+
+ mf->field = V4L2_FIELD_NONE;
+
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_Y10_1X10:
+ mf->code = V4L2_MBUS_FMT_GREY8_1X8;
+ case V4L2_MBUS_FMT_GREY8_1X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ break;
+ default:
+ mf->code = V4L2_MBUS_FMT_SBGGR8_1X8;
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ }
+
+ return 0;
+}
+
+static int ov6650_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(ov6650_codes))
+ return -EINVAL;
+
+ *code = ov6650_codes[index];
+ return 0;
+}
+
+static int ov6650_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_captureparm *cp = &parms->parm.capture;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(cp, 0, sizeof(*cp));
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+ cp->timeperframe.numerator = GET_CLKRC_DIV(to_clkrc(&priv->tpf,
+ priv->pclk_limit, priv->pclk_max));
+ cp->timeperframe.denominator = FRAME_RATE_MAX;
+
+ dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
+ cp->timeperframe.numerator, cp->timeperframe.denominator);
+
+ return 0;
+}
+
+static int ov6650_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+ struct v4l2_captureparm *cp = &parms->parm.capture;
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ int div, ret;
+ u8 clkrc;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (cp->extendedmode != 0)
+ return -EINVAL;
+
+ if (tpf->numerator == 0 || tpf->denominator == 0)
+ div = 1; /* Reset to full rate */
+ else
+ div = (tpf->numerator * FRAME_RATE_MAX) / tpf->denominator;
+
+ if (div == 0)
+ div = 1;
+ else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
+ div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
+
+ /*
+ * Keep result to be used as tpf limit
+ * for subseqent clock divider calculations
+ */
+ priv->tpf.numerator = div;
+ priv->tpf.denominator = FRAME_RATE_MAX;
+
+ clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
+
+ ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK);
+ if (!ret) {
+ tpf->numerator = GET_CLKRC_DIV(clkrc);
+ tpf->denominator = FRAME_RATE_MAX;
+ }
+
+ return ret;
+}
+
+/* Soft reset the camera. This has nothing to do with the RESET pin! */
+static int ov6650_reset(struct i2c_client *client)
+{
+ int ret;
+
+ dev_dbg(&client->dev, "reset\n");
+
+ ret = ov6650_reg_rmw(client, REG_COMA, COMA_RESET, 0);
+ if (ret)
+ dev_err(&client->dev,
+ "An error occured while entering soft reset!\n");
+
+ return ret;
+}
+
+/* program default register values */
+static int ov6650_prog_dflt(struct i2c_client *client)
+{
+ int ret;
+
+ dev_dbg(&client->dev, "initializing\n");
+
+ ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
+ if (!ret)
+ ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
+
+ return ret;
+}
+
+static int ov6650_video_probe(struct soc_camera_device *icd,
+ struct i2c_client *client)
+{
+ u8 pidh, pidl, midh, midl;
+ int ret = 0;
+
+ /*
+ * check and show product ID and manufacturer ID
+ */
+ ret = ov6650_reg_read(client, REG_PIDH, &pidh);
+ if (!ret)
+ ret = ov6650_reg_read(client, REG_PIDL, &pidl);
+ if (!ret)
+ ret = ov6650_reg_read(client, REG_MIDH, &midh);
+ if (!ret)
+ ret = ov6650_reg_read(client, REG_MIDL, &midl);
+
+ if (ret)
+ return ret;
+
+ if ((pidh != OV6650_PIDH) || (pidl != OV6650_PIDL)) {
+ dev_err(&client->dev, "Product ID error 0x%02x:0x%02x\n",
+ pidh, pidl);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev,
+ "ov6650 Product ID 0x%02x:0x%02x Manufacturer ID 0x%02x:0x%02x\n",
+ pidh, pidl, midh, midl);
+
+ ret = ov6650_reset(client);
+ if (!ret)
+ ret = ov6650_prog_dflt(client);
+
+ return ret;
+}
+
+static struct soc_camera_ops ov6650_ops = {
+ .set_bus_param = ov6650_set_bus_param,
+ .query_bus_param = ov6650_query_bus_param,
+ .controls = ov6650_controls,
+ .num_controls = ARRAY_SIZE(ov6650_controls),
+};
+
+static struct v4l2_subdev_core_ops ov6650_core_ops = {
+ .g_ctrl = ov6650_g_ctrl,
+ .s_ctrl = ov6650_s_ctrl,
+ .g_chip_ident = ov6650_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ov6650_get_register,
+ .s_register = ov6650_set_register,
+#endif
+};
+
+static struct v4l2_subdev_video_ops ov6650_video_ops = {
+ .s_stream = ov6650_s_stream,
+ .g_mbus_fmt = ov6650_g_fmt,
+ .s_mbus_fmt = ov6650_s_fmt,
+ .try_mbus_fmt = ov6650_try_fmt,
+ .enum_mbus_fmt = ov6650_enum_fmt,
+ .cropcap = ov6650_cropcap,
+ .g_crop = ov6650_g_crop,
+ .s_crop = ov6650_s_crop,
+ .g_parm = ov6650_g_parm,
+ .s_parm = ov6650_s_parm,
+};
+
+static struct v4l2_subdev_ops ov6650_subdev_ops = {
+ .core = &ov6650_core_ops,
+ .video = &ov6650_video_ops,
+};
+
+/*
+ * i2c_driver function
+ */
+static int ov6650_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct ov6650 *priv;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl;
+ int ret;
+
+ if (!icd) {
+ dev_err(&client->dev, "Missing soc-camera data!\n");
+ return -EINVAL;
+ }
+
+ icl = to_soc_camera_link(icd);
+ if (!icl) {
+ dev_err(&client->dev, "Missing platform_data for driver\n");
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&client->dev,
+ "Failed to allocate memory for private data!\n");
+ return -ENOMEM;
+ }
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
+
+ icd->ops = &ov6650_ops;
+
+ priv->rect.left = DEF_HSTRT << 1;
+ priv->rect.top = DEF_VSTRT << 1;
+ priv->rect.width = W_CIF;
+ priv->rect.height = H_CIF;
+ priv->half_scale = false;
+ priv->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ ret = ov6650_video_probe(icd, client);
+
+ if (ret) {
+ icd->ops = NULL;
+ i2c_set_clientdata(client, NULL);
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static int ov6650_remove(struct i2c_client *client)
+{
+ struct ov6650 *priv = to_ov6650(client);
+
+ i2c_set_clientdata(client, NULL);
+ kfree(priv);
+ return 0;
+}
+
+static const struct i2c_device_id ov6650_id[] = {
+ { "ov6650", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ov6650_id);
+
+static struct i2c_driver ov6650_i2c_driver = {
+ .driver = {
+ .name = "ov6650",
+ },
+ .probe = ov6650_probe,
+ .remove = ov6650_remove,
+ .id_table = ov6650_id,
+};
+
+static int __init ov6650_module_init(void)
+{
+ return i2c_add_driver(&ov6650_i2c_driver);
+}
+
+static void __exit ov6650_module_exit(void)
+{
+ i2c_del_driver(&ov6650_i2c_driver);
+}
+
+module_init(ov6650_module_init);
+module_exit(ov6650_module_exit);
+
+MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV6650");
+MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 91c886ab15c6..c881a64b41fd 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -18,8 +18,9 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
+#include <media/v4l2-mediabus.h>
+#include "ov7670.h"
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
@@ -43,11 +44,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define QCIF_HEIGHT 144
/*
- * Our nominal (default) frame rate.
- */
-#define OV7670_FRAME_RATE 30
-
-/*
* The 7670 sits on i2c with ID 0x42
*/
#define OV7670_I2C_ADDR 0x42
@@ -198,7 +194,11 @@ struct ov7670_info {
struct ov7670_format_struct *fmt; /* Current format */
unsigned char sat; /* Saturation value */
int hue; /* Hue value */
+ int min_width; /* Filter out smaller sizes */
+ int min_height; /* Filter out smaller sizes */
+ int clock_speed; /* External clock speed (MHz) */
u8 clkrc; /* Clock divider value */
+ bool use_smbus; /* Use smbus I/O instead of I2C */
};
static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
@@ -415,8 +415,7 @@ static struct regval_list ov7670_fmt_raw[] = {
* ov7670 is not really an SMBUS device, though, so the communication
* is not always entirely reliable.
*/
-#ifdef CONFIG_OLPC_XO_1
-static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_read_smbus(struct v4l2_subdev *sd, unsigned char reg,
unsigned char *value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -431,7 +430,7 @@ static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
}
-static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_write_smbus(struct v4l2_subdev *sd, unsigned char reg,
unsigned char value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -442,11 +441,10 @@ static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
return ret;
}
-#else /* ! CONFIG_OLPC_XO_1 */
/*
* On most platforms, we'd rather do straight i2c I/O.
*/
-static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_read_i2c(struct v4l2_subdev *sd, unsigned char reg,
unsigned char *value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -479,7 +477,7 @@ static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
}
-static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+static int ov7670_write_i2c(struct v4l2_subdev *sd, unsigned char reg,
unsigned char value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -498,8 +496,26 @@ static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
msleep(5); /* Wait for reset to run */
return ret;
}
-#endif /* CONFIG_OLPC_XO_1 */
+static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char *value)
+{
+ struct ov7670_info *info = to_state(sd);
+ if (info->use_smbus)
+ return ov7670_read_smbus(sd, reg, value);
+ else
+ return ov7670_read_i2c(sd, reg, value);
+}
+
+static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char value)
+{
+ struct ov7670_info *info = to_state(sd);
+ if (info->use_smbus)
+ return ov7670_write_smbus(sd, reg, value);
+ else
+ return ov7670_write_i2c(sd, reg, value);
+}
/*
* Write a list of register settings; ff/ff stops the process.
@@ -572,42 +588,37 @@ static int ov7670_detect(struct v4l2_subdev *sd)
/*
* Store information about the video data format. The color matrix
* is deeply tied into the format, so keep the relevant values here.
- * The magic matrix nubmers come from OmniVision.
+ * The magic matrix numbers come from OmniVision.
*/
static struct ov7670_format_struct {
- __u8 *desc;
- __u32 pixelformat;
+ enum v4l2_mbus_pixelcode mbus_code;
+ enum v4l2_colorspace colorspace;
struct regval_list *regs;
int cmatrix[CMATRIX_LEN];
- int bpp; /* Bytes per pixel */
} ov7670_formats[] = {
{
- .desc = "YUYV 4:2:2",
- .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
.regs = ov7670_fmt_yuv422,
.cmatrix = { 128, -128, 0, -34, -94, 128 },
- .bpp = 2,
},
{
- .desc = "RGB 444",
- .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_rgb444,
.cmatrix = { 179, -179, 0, -61, -176, 228 },
- .bpp = 2,
},
{
- .desc = "RGB 565",
- .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_rgb565,
.cmatrix = { 179, -179, 0, -61, -176, 228 },
- .bpp = 2,
},
{
- .desc = "Raw RGB Bayer",
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
.regs = ov7670_fmt_raw,
.cmatrix = { 0, 0, 0, 0, 0, 0 },
- .bpp = 1
},
};
#define N_OV7670_FMTS ARRAY_SIZE(ov7670_formats)
@@ -680,10 +691,10 @@ static struct ov7670_win_size {
.width = QVGA_WIDTH,
.height = QVGA_HEIGHT,
.com7_bit = COM7_FMT_QVGA,
- .hstart = 164, /* Empirically determined */
- .hstop = 20,
- .vstart = 14,
- .vstop = 494,
+ .hstart = 168, /* Empirically determined */
+ .hstop = 24,
+ .vstart = 12,
+ .vstop = 492,
.regs = NULL,
},
/* QCIF */
@@ -734,51 +745,45 @@ static int ov7670_set_hw(struct v4l2_subdev *sd, int hstart, int hstop,
}
-static int ov7670_enum_fmt(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt)
+static int ov7670_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
+ enum v4l2_mbus_pixelcode *code)
{
- struct ov7670_format_struct *ofmt;
-
- if (fmt->index >= N_OV7670_FMTS)
+ if (index >= N_OV7670_FMTS)
return -EINVAL;
- ofmt = ov7670_formats + fmt->index;
- fmt->flags = 0;
- strcpy(fmt->description, ofmt->desc);
- fmt->pixelformat = ofmt->pixelformat;
+ *code = ov7670_formats[index].mbus_code;
return 0;
}
-
static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
- struct v4l2_format *fmt,
+ struct v4l2_mbus_framefmt *fmt,
struct ov7670_format_struct **ret_fmt,
struct ov7670_win_size **ret_wsize)
{
int index;
struct ov7670_win_size *wsize;
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
for (index = 0; index < N_OV7670_FMTS; index++)
- if (ov7670_formats[index].pixelformat == pix->pixelformat)
+ if (ov7670_formats[index].mbus_code == fmt->code)
break;
if (index >= N_OV7670_FMTS) {
/* default to first format */
index = 0;
- pix->pixelformat = ov7670_formats[0].pixelformat;
+ fmt->code = ov7670_formats[0].mbus_code;
}
if (ret_fmt != NULL)
*ret_fmt = ov7670_formats + index;
/*
* Fields: the OV devices claim to be progressive.
*/
- pix->field = V4L2_FIELD_NONE;
+ fmt->field = V4L2_FIELD_NONE;
/*
* Round requested image size down to the nearest
* we support, but not below the smallest.
*/
for (wsize = ov7670_win_sizes; wsize < ov7670_win_sizes + N_WIN_SIZES;
wsize++)
- if (pix->width >= wsize->width && pix->height >= wsize->height)
+ if (fmt->width >= wsize->width && fmt->height >= wsize->height)
break;
if (wsize >= ov7670_win_sizes + N_WIN_SIZES)
wsize--; /* Take the smallest one */
@@ -787,14 +792,14 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
/*
* Note the size we'll actually handle.
*/
- pix->width = wsize->width;
- pix->height = wsize->height;
- pix->bytesperline = pix->width*ov7670_formats[index].bpp;
- pix->sizeimage = pix->height*pix->bytesperline;
+ fmt->width = wsize->width;
+ fmt->height = wsize->height;
+ fmt->colorspace = ov7670_formats[index].colorspace;
return 0;
}
-static int ov7670_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int ov7670_try_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
{
return ov7670_try_fmt_internal(sd, fmt, NULL, NULL);
}
@@ -802,15 +807,17 @@ static int ov7670_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
/*
* Set a format.
*/
-static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int ov7670_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
{
- int ret;
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
unsigned char com7;
+ int ret;
ret = ov7670_try_fmt_internal(sd, fmt, &ovfmt, &wsize);
+
if (ret)
return ret;
/*
@@ -845,7 +852,7 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
*/
if (ret == 0)
ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
- return ret;
+ return 0;
}
/*
@@ -863,7 +870,7 @@ static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
- cp->timeperframe.denominator = OV7670_FRAME_RATE;
+ cp->timeperframe.denominator = info->clock_speed;
if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
cp->timeperframe.denominator /= (info->clkrc & CLK_SCALE);
return 0;
@@ -884,26 +891,72 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
else
- div = (tpf->numerator*OV7670_FRAME_RATE)/tpf->denominator;
+ div = (tpf->numerator * info->clock_speed) / tpf->denominator;
if (div == 0)
div = 1;
else if (div > CLK_SCALE)
div = CLK_SCALE;
info->clkrc = (info->clkrc & 0x80) | div;
tpf->numerator = 1;
- tpf->denominator = OV7670_FRAME_RATE/div;
+ tpf->denominator = info->clock_speed / div;
return ov7670_write(sd, REG_CLKRC, info->clkrc);
}
-
/*
- * Code for dealing with controls.
+ * Frame intervals. Since frame rates are controlled with the clock
+ * divider, we can only do 30/n for integer n values. So no continuous
+ * or stepwise options. Here we just pick a handful of logical values.
*/
+static int ov7670_frame_rates[] = { 30, 15, 10, 5, 1 };
+
+static int ov7670_enum_frameintervals(struct v4l2_subdev *sd,
+ struct v4l2_frmivalenum *interval)
+{
+ if (interval->index >= ARRAY_SIZE(ov7670_frame_rates))
+ return -EINVAL;
+ interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ interval->discrete.numerator = 1;
+ interval->discrete.denominator = ov7670_frame_rates[interval->index];
+ return 0;
+}
+
+/*
+ * Frame size enumeration
+ */
+static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct ov7670_info *info = to_state(sd);
+ int i;
+ int num_valid = -1;
+ __u32 index = fsize->index;
+ /*
+ * If a minimum width/height was requested, filter out the capture
+ * windows that fall outside that.
+ */
+ for (i = 0; i < N_WIN_SIZES; i++) {
+ struct ov7670_win_size *win = &ov7670_win_sizes[index];
+ if (info->min_width && win->width < info->min_width)
+ continue;
+ if (info->min_height && win->height < info->min_height)
+ continue;
+ if (index == ++num_valid) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = win->width;
+ fsize->discrete.height = win->height;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+/*
+ * Code for dealing with controls.
+ */
static int ov7670_store_cmatrix(struct v4l2_subdev *sd,
int matrix[CMATRIX_LEN])
@@ -1396,6 +1449,47 @@ static int ov7670_g_chip_ident(struct v4l2_subdev *sd,
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0);
}
+static int ov7670_s_config(struct v4l2_subdev *sd, int dumb, void *data)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov7670_config *config = data;
+ struct ov7670_info *info = to_state(sd);
+ int ret;
+
+ info->clock_speed = 30; /* default: a guess */
+
+ /*
+ * Must apply configuration before initializing device, because it
+ * selects I/O method.
+ */
+ if (config) {
+ info->min_width = config->min_width;
+ info->min_height = config->min_height;
+ info->use_smbus = config->use_smbus;
+
+ if (config->clock_speed)
+ info->clock_speed = config->clock_speed;
+ }
+
+ /* Make sure it's an ov7670 */
+ ret = ov7670_detect(sd);
+ if (ret) {
+ v4l_dbg(1, debug, client,
+ "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
+ client->addr << 1, client->adapter->name);
+ kfree(info);
+ return ret;
+ }
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ info->fmt = &ov7670_formats[0];
+ info->sat = 128; /* Review this */
+ info->clkrc = info->clock_speed / 30;
+
+ return 0;
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
@@ -1434,6 +1528,7 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.s_ctrl = ov7670_s_ctrl,
.queryctrl = ov7670_queryctrl,
.reset = ov7670_reset,
+ .s_config = ov7670_s_config,
.init = ov7670_init,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov7670_g_register,
@@ -1442,11 +1537,13 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
};
static const struct v4l2_subdev_video_ops ov7670_video_ops = {
- .enum_fmt = ov7670_enum_fmt,
- .try_fmt = ov7670_try_fmt,
- .s_fmt = ov7670_s_fmt,
+ .enum_mbus_fmt = ov7670_enum_mbus_fmt,
+ .try_mbus_fmt = ov7670_try_mbus_fmt,
+ .s_mbus_fmt = ov7670_s_mbus_fmt,
.s_parm = ov7670_s_parm,
.g_parm = ov7670_g_parm,
+ .enum_frameintervals = ov7670_enum_frameintervals,
+ .enum_framesizes = ov7670_enum_framesizes,
};
static const struct v4l2_subdev_ops ov7670_ops = {
@@ -1461,7 +1558,6 @@ static int ov7670_probe(struct i2c_client *client,
{
struct v4l2_subdev *sd;
struct ov7670_info *info;
- int ret;
info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL);
if (info == NULL)
@@ -1469,22 +1565,6 @@ static int ov7670_probe(struct i2c_client *client,
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
- /* Make sure it's an ov7670 */
- ret = ov7670_detect(sd);
- if (ret) {
- v4l_dbg(1, debug, client,
- "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
- client->addr << 1, client->adapter->name);
- kfree(info);
- return ret;
- }
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- info->fmt = &ov7670_formats[0];
- info->sat = 128; /* Review this */
- info->clkrc = 1; /* 30fps */
-
return 0;
}
@@ -1504,9 +1584,25 @@ static const struct i2c_device_id ov7670_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ov7670_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "ov7670",
- .probe = ov7670_probe,
- .remove = ov7670_remove,
- .id_table = ov7670_id,
+static struct i2c_driver ov7670_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ov7670",
+ },
+ .probe = ov7670_probe,
+ .remove = ov7670_remove,
+ .id_table = ov7670_id,
};
+
+static __init int init_ov7670(void)
+{
+ return i2c_add_driver(&ov7670_driver);
+}
+
+static __exit void exit_ov7670(void)
+{
+ i2c_del_driver(&ov7670_driver);
+}
+
+module_init(init_ov7670);
+module_exit(exit_ov7670);
diff --git a/drivers/media/video/ov7670.h b/drivers/media/video/ov7670.h
new file mode 100644
index 000000000000..b133bc123031
--- /dev/null
+++ b/drivers/media/video/ov7670.h
@@ -0,0 +1,20 @@
+/*
+ * A V4L2 driver for OmniVision OV7670 cameras.
+ *
+ * Copyright 2010 One Laptop Per Child
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+
+#ifndef __OV7670_H
+#define __OV7670_H
+
+struct ov7670_config {
+ int min_width; /* Filter out smaller sizes */
+ int min_height; /* Filter out smaller sizes */
+ int clock_speed; /* External clock speed (MHz) */
+ bool use_smbus; /* Use smbus I/O instead of I2C */
+};
+
+#endif
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 25eb5d637eea..a84b770352f9 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -599,7 +599,7 @@ static int ov772x_reset(struct i2c_client *client)
static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
if (!enable) {
@@ -645,7 +645,7 @@ static unsigned long ov772x_query_bus_param(struct soc_camera_device *icd)
static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
switch (ctrl->id) {
@@ -664,7 +664,7 @@ static int ov772x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
int ret = 0;
u8 val;
@@ -715,7 +715,7 @@ static int ov772x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int ov772x_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
id->ident = priv->model;
@@ -728,7 +728,7 @@ static int ov772x_g_chip_ident(struct v4l2_subdev *sd,
static int ov772x_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
reg->size = 1;
@@ -747,7 +747,7 @@ static int ov772x_g_register(struct v4l2_subdev *sd,
static int ov772x_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->reg > 0xff ||
reg->val > 0xff)
@@ -954,7 +954,7 @@ static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int ov772x_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
if (!priv->win || !priv->cfmt) {
@@ -977,7 +977,7 @@ static int ov772x_g_fmt(struct v4l2_subdev *sd,
static int ov772x_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
int ret = ov772x_set_params(client, &mf->width, &mf->height,
mf->code);
@@ -991,7 +991,7 @@ static int ov772x_s_fmt(struct v4l2_subdev *sd,
static int ov772x_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = to_ov772x(client);
const struct ov772x_win_size *win;
int i;
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 40cdfab74ccc..99e9e1d3c83b 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -308,7 +308,7 @@ static unsigned long ov9640_query_bus_param(struct soc_camera_device *icd)
/* Get status of additional camera capabilities */
static int ov9640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
struct ov9640_priv, subdev);
@@ -326,7 +326,7 @@ static int ov9640_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
/* Set status of additional camera capabilities */
static int ov9640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
struct ov9640_priv, subdev);
@@ -360,7 +360,7 @@ static int ov9640_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int ov9640_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_priv *priv = container_of(i2c_get_clientdata(client),
struct ov9640_priv, subdev);
@@ -374,7 +374,7 @@ static int ov9640_g_chip_ident(struct v4l2_subdev *sd,
static int ov9640_get_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
u8 val;
@@ -395,7 +395,7 @@ static int ov9640_get_register(struct v4l2_subdev *sd,
static int ov9640_set_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->reg & ~0xff || reg->val & ~0xff)
return -EINVAL;
@@ -558,7 +558,7 @@ static int ov9640_prog_dflt(struct i2c_client *client)
static int ov9640_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_reg_alt alts = {0};
enum v4l2_colorspace cspace;
enum v4l2_mbus_pixelcode code = mf->code;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 70ea578d6266..bef202752cc8 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2082,20 +2082,13 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
return -EINVAL;
}
- /* Note how the 2nd and 3rd arguments are the same for
- * v4l2_i2c_new_subdev(). Why?
- * Well the 2nd argument is the module name to load, while the 3rd
- * argument is documented in the framework as being the "chipid" -
- * and every other place where I can find examples of this, the
- * "chipid" appears to just be the module name again. So here we
- * just do the same thing. */
if (i2ccnt == 1) {
pvr2_trace(PVR2_TRACE_INIT,
"Module ID %u:"
" Setting up with specified i2c address 0x%x",
mid, i2caddr[0]);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
- fname, fname,
+ NULL, fname,
i2caddr[0], NULL);
} else {
pvr2_trace(PVR2_TRACE_INIT,
@@ -2103,7 +2096,7 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
" Setting up with address probe list",
mid);
sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
- fname, fname,
+ NULL, fname,
0, i2caddr);
}
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 11980db22d31..8da42e4f1ba0 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -1,6 +1,6 @@
config USB_PWC
tristate "USB Philips Cameras"
- depends on VIDEO_V4L1
+ depends on VIDEO_V4L2
---help---
Say Y or M here if you want to use one of these Philips & OEM
webcams:
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index f7f7e04cf485..6b8fbddc0747 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -261,7 +261,7 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
return ret;
}
- if (pEntry->compressed && pdev->vpalette != VIDEO_PALETTE_RAW)
+ if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec1_init(pdev->type, pdev->release, buf, pdev->decompress_data);
pdev->cmd_len = 3;
@@ -321,7 +321,7 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->vpalette != VIDEO_PALETTE_RAW)
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec23_init(pdev, pdev->type, buf);
pdev->cmd_len = 13;
@@ -356,7 +356,7 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, i
fps = (frames / 5) - 1;
/* special case: VGA @ 5 fps and snapshot is raw bayer mode */
- if (size == PSZ_VGA && frames == 5 && snapshot && pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (size == PSZ_VGA && frames == 5 && snapshot && pdev->pixfmt != V4L2_PIX_FMT_YUV420)
{
/* Only available in case the raw palette is selected or
we have the decompressor available. This mode is
@@ -394,7 +394,7 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->vpalette != VIDEO_PALETTE_RAW)
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec23_init(pdev, pdev->type, buf);
pdev->cmd_len = 12;
@@ -429,7 +429,7 @@ int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frame
{
int ret, size;
- PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, palette %d).\n", width, height, frames, pdev->vpalette);
+ PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
size = pwc_decode_size(pdev, width, height);
if (size < 0) {
PWC_DEBUG_MODULE("Could not find suitable size.\n");
@@ -519,13 +519,13 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev)
{
int i, factor = 0;
- /* for PALETTE_YUV420P */
- switch(pdev->vpalette)
- {
- case VIDEO_PALETTE_YUV420P:
+ /* for V4L2_PIX_FMT_YUV420 */
+ switch (pdev->pixfmt) {
+ case V4L2_PIX_FMT_YUV420:
factor = 6;
break;
- case VIDEO_PALETTE_RAW:
+ case V4L2_PIX_FMT_PWC1:
+ case V4L2_PIX_FMT_PWC2:
factor = 6; /* can be uncompressed YUV420P */
break;
}
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index aea7e224cef6..e62beb4efdb4 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -163,7 +163,7 @@ static const struct v4l2_file_operations pwc_fops = {
.read = pwc_video_read,
.poll = pwc_video_poll,
.mmap = pwc_video_mmap,
- .ioctl = pwc_video_ioctl,
+ .unlocked_ioctl = pwc_video_ioctl,
};
static struct video_device pwc_template = {
.name = "Philips Webcam", /* Filled in later */
@@ -1247,8 +1247,8 @@ static int pwc_video_close(struct file *file)
PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
- lock_kernel();
pdev = video_get_drvdata(vdev);
+ mutex_lock(&pdev->modlock);
if (pdev->vopen == 0)
PWC_DEBUG_MODULE("video_close() called on closed device?\n");
@@ -1286,7 +1286,7 @@ static int pwc_video_close(struct file *file)
if (device_hint[hint].pdev == pdev)
device_hint[hint].pdev = NULL;
}
- unlock_kernel();
+ mutex_unlock(&pdev->modlock);
return 0;
}
@@ -1365,7 +1365,7 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
}
PWC_DEBUG_READ("Copying data to user space.\n");
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
bytes_to_read = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
bytes_to_read = pdev->view.size;
@@ -1800,13 +1800,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
}
pdev->vdev->release = video_device_release;
- rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
- if (rc < 0) {
- PWC_ERROR("Failed to register as video device (%d).\n", rc);
- goto err_video_release;
- }
-
- PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
/* occupy slot */
if (hint < MAX_DEV_HINTS)
@@ -1814,14 +1807,22 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev);
usb_set_intfdata(intf, pdev);
- rc = pwc_create_sysfs_files(pdev->vdev);
- if (rc)
- goto err_video_unreg;
/* Set the leds off */
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
+ rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ if (rc < 0) {
+ PWC_ERROR("Failed to register as video device (%d).\n", rc);
+ goto err_video_release;
+ }
+ rc = pwc_create_sysfs_files(pdev->vdev);
+ if (rc)
+ goto err_video_unreg;
+
+ PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
+
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
/* register webcam snapshot button input device */
pdev->button_dev = input_allocate_device();
@@ -1871,8 +1872,8 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
struct pwc_device *pdev;
int hint;
- lock_kernel();
pdev = usb_get_intfdata (intf);
+ mutex_lock(&pdev->modlock);
usb_set_intfdata (intf, NULL);
if (pdev == NULL) {
PWC_ERROR("pwc_disconnect() Called without private pointer.\n");
@@ -1897,9 +1898,7 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
wake_up_interruptible(&pdev->frameq);
/* Wait until device is closed */
if (pdev->vopen) {
- mutex_lock(&pdev->modlock);
pdev->unplugged = 1;
- mutex_unlock(&pdev->modlock);
pwc_iso_stop(pdev);
} else {
/* Device is closed, so we can safely unregister it */
@@ -1913,7 +1912,7 @@ disconnect_out:
device_hint[hint].pdev = NULL;
}
- unlock_kernel();
+ mutex_unlock(&pdev->modlock);
}
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 589c687439da..6af5bb538358 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -47,7 +47,7 @@ int pwc_decode_size(struct pwc_device *pdev, int width, int height)
you don't have the decompressor loaded or use RAW mode,
the maximum viewable size is smaller.
*/
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
{
if (width > pdev->abs_max.x || height > pdev->abs_max.y)
{
@@ -123,7 +123,7 @@ void pwc_construct(struct pwc_device *pdev)
pdev->frame_header_size = 0;
pdev->frame_trailer_size = 0;
}
- pdev->vpalette = VIDEO_PALETTE_YUV420P; /* default */
+ pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
pdev->view_min.size = pdev->view_min.x * pdev->view_min.y;
pdev->view_max.size = pdev->view_max.x * pdev->view_max.y;
/* length of image, in YUV format; always allocate enough memory. */
diff --git a/drivers/media/video/pwc/pwc-uncompress.c b/drivers/media/video/pwc/pwc-uncompress.c
index 5d82028ef942..3b73f295f032 100644
--- a/drivers/media/video/pwc/pwc-uncompress.c
+++ b/drivers/media/video/pwc/pwc-uncompress.c
@@ -54,7 +54,7 @@ int pwc_decompress(struct pwc_device *pdev)
yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
/* Raw format; that's easy... */
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
{
struct pwc_raw_frame *raw_frame = image;
raw_frame->type = cpu_to_le16(pdev->type);
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 62d89b3113a4..7061a03f5cf1 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -216,7 +216,7 @@ static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_forma
f->fmt.pix.width = pdev->view.x;
f->fmt.pix.height = pdev->view.y;
f->fmt.pix.field = V4L2_FIELD_NONE;
- if (pdev->vpalette == VIDEO_PALETTE_YUV420P) {
+ if (pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
f->fmt.pix.bytesperline = (f->fmt.pix.width * 3)/2;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
@@ -304,10 +304,10 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
fps = pdev->vframes;
}
- if (pixelformat == V4L2_PIX_FMT_YUV420)
- pdev->vpalette = VIDEO_PALETTE_YUV420P;
- else
- pdev->vpalette = VIDEO_PALETTE_RAW;
+ if (pixelformat != V4L2_PIX_FMT_YUV420 &&
+ pixelformat != V4L2_PIX_FMT_PWC1 &&
+ pixelformat != V4L2_PIX_FMT_PWC2)
+ return -EINVAL;
PWC_DEBUG_IOCTL("Try to change format to: width=%d height=%d fps=%d "
"compression=%d snapshot=%d format=%c%c%c%c\n",
@@ -330,6 +330,8 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
if (ret)
return ret;
+ pdev->pixfmt = pixelformat;
+
pwc_vidioc_fill_fmt(pdev, f);
return 0;
@@ -357,152 +359,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
switch (cmd) {
- /* Query cabapilities */
- case VIDIOCGCAP:
- {
- struct video_capability *caps = arg;
-
- strcpy(caps->name, vdev->name);
- caps->type = VID_TYPE_CAPTURE;
- caps->channels = 1;
- caps->audios = 1;
- caps->minwidth = pdev->view_min.x;
- caps->minheight = pdev->view_min.y;
- caps->maxwidth = pdev->view_max.x;
- caps->maxheight = pdev->view_max.y;
- break;
- }
-
- /* Channel functions (simulate 1 channel) */
- case VIDIOCGCHAN:
- {
- struct video_channel *v = arg;
-
- if (v->channel != 0)
- return -EINVAL;
- v->flags = 0;
- v->tuners = 0;
- v->type = VIDEO_TYPE_CAMERA;
- strcpy(v->name, "Webcam");
- return 0;
- }
-
- case VIDIOCSCHAN:
- {
- /* The spec says the argument is an integer, but
- the bttv driver uses a video_channel arg, which
- makes sense becasue it also has the norm flag.
- */
- struct video_channel *v = arg;
- if (v->channel != 0)
- return -EINVAL;
- return 0;
- }
-
-
- /* Picture functions; contrast etc. */
- case VIDIOCGPICT:
- {
- struct video_picture *p = arg;
- int val;
-
- val = pwc_get_brightness(pdev);
- if (val >= 0)
- p->brightness = (val<<9);
- else
- p->brightness = 0xffff;
- val = pwc_get_contrast(pdev);
- if (val >= 0)
- p->contrast = (val<<10);
- else
- p->contrast = 0xffff;
- /* Gamma, Whiteness, what's the difference? :) */
- val = pwc_get_gamma(pdev);
- if (val >= 0)
- p->whiteness = (val<<11);
- else
- p->whiteness = 0xffff;
- if (pwc_get_saturation(pdev, &val)<0)
- p->colour = 0xffff;
- else
- p->colour = 32768 + val * 327;
- p->depth = 24;
- p->palette = pdev->vpalette;
- p->hue = 0xFFFF; /* N/A */
- break;
- }
-
- case VIDIOCSPICT:
- {
- struct video_picture *p = arg;
- /*
- * FIXME: Suppose we are mid read
- ANSWER: No problem: the firmware of the camera
- can handle brightness/contrast/etc
- changes at _any_ time, and the palette
- is used exactly once in the uncompress
- routine.
- */
- pwc_set_brightness(pdev, p->brightness);
- pwc_set_contrast(pdev, p->contrast);
- pwc_set_gamma(pdev, p->whiteness);
- pwc_set_saturation(pdev, (p->colour-32768)/327);
- if (p->palette && p->palette != pdev->vpalette) {
- switch (p->palette) {
- case VIDEO_PALETTE_YUV420P:
- case VIDEO_PALETTE_RAW:
- pdev->vpalette = p->palette;
- return pwc_try_video_mode(pdev, pdev->image.x, pdev->image.y, pdev->vframes, pdev->vcompression, pdev->vsnapshot);
- break;
- default:
- return -EINVAL;
- break;
- }
- }
- break;
- }
-
- /* Window/size parameters */
- case VIDIOCGWIN:
- {
- struct video_window *vw = arg;
-
- vw->x = 0;
- vw->y = 0;
- vw->width = pdev->view.x;
- vw->height = pdev->view.y;
- vw->chromakey = 0;
- vw->flags = (pdev->vframes << PWC_FPS_SHIFT) |
- (pdev->vsnapshot ? PWC_FPS_SNAPSHOT : 0);
- break;
- }
-
- case VIDIOCSWIN:
- {
- struct video_window *vw = arg;
- int fps, snapshot, ret;
-
- fps = (vw->flags & PWC_FPS_FRMASK) >> PWC_FPS_SHIFT;
- snapshot = vw->flags & PWC_FPS_SNAPSHOT;
- if (fps == 0)
- fps = pdev->vframes;
- if (pdev->view.x == vw->width && pdev->view.y && fps == pdev->vframes && snapshot == pdev->vsnapshot)
- return 0;
- ret = pwc_try_video_mode(pdev, vw->width, vw->height, fps, pdev->vcompression, snapshot);
- if (ret)
- return ret;
- break;
- }
-
- /* We don't have overlay support (yet) */
- case VIDIOCGFBUF:
- {
- struct video_buffer *vb = arg;
-
- memset(vb,0,sizeof(*vb));
- break;
- }
-
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
/* mmap() functions */
case VIDIOCGMBUF:
{
@@ -517,164 +374,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
vm->offsets[i] = i * pdev->len_per_image;
break;
}
-
- case VIDIOCMCAPTURE:
- {
- /* Start capture into a given image buffer (called 'frame' in video_mmap structure) */
- struct video_mmap *vm = arg;
-
- PWC_DEBUG_READ("VIDIOCMCAPTURE: %dx%d, frame %d, format %d\n", vm->width, vm->height, vm->frame, vm->format);
- if (vm->frame < 0 || vm->frame >= pwc_mbufs)
- return -EINVAL;
-
- /* xawtv is nasty. It probes the available palettes
- by setting a very small image size and trying
- various palettes... The driver doesn't support
- such small images, so I'm working around it.
- */
- if (vm->format)
- {
- switch (vm->format)
- {
- case VIDEO_PALETTE_YUV420P:
- case VIDEO_PALETTE_RAW:
- break;
- default:
- return -EINVAL;
- break;
- }
- }
-
- if ((vm->width != pdev->view.x || vm->height != pdev->view.y) &&
- (vm->width >= pdev->view_min.x && vm->height >= pdev->view_min.y)) {
- int ret;
-
- PWC_DEBUG_OPEN("VIDIOCMCAPTURE: changing size to please xawtv :-(.\n");
- ret = pwc_try_video_mode(pdev, vm->width, vm->height, pdev->vframes, pdev->vcompression, pdev->vsnapshot);
- if (ret)
- return ret;
- } /* ... size mismatch */
-
- /* FIXME: should we lock here? */
- if (pdev->image_used[vm->frame])
- return -EBUSY; /* buffer wasn't available. Bummer */
- pdev->image_used[vm->frame] = 1;
-
- /* Okay, we're done here. In the SYNC call we wait until a
- frame comes available, then expand image into the given
- buffer.
- In contrast to the CPiA cam the Philips cams deliver a
- constant stream, almost like a grabber card. Also,
- we have separate buffers for the rawdata and the image,
- meaning we can nearly always expand into the requested buffer.
- */
- PWC_DEBUG_READ("VIDIOCMCAPTURE done.\n");
- break;
- }
-
- case VIDIOCSYNC:
- {
- /* The doc says: "Whenever a buffer is used it should
- call VIDIOCSYNC to free this frame up and continue."
-
- The only odd thing about this whole procedure is
- that MCAPTURE flags the buffer as "in use", and
- SYNC immediately unmarks it, while it isn't
- after SYNC that you know that the buffer actually
- got filled! So you better not start a CAPTURE in
- the same frame immediately (use double buffering).
- This is not a problem for this cam, since it has
- extra intermediate buffers, but a hardware
- grabber card will then overwrite the buffer
- you're working on.
- */
- int *mbuf = arg;
- int ret;
-
- PWC_DEBUG_READ("VIDIOCSYNC called (%d).\n", *mbuf);
-
- /* bounds check */
- if (*mbuf < 0 || *mbuf >= pwc_mbufs)
- return -EINVAL;
- /* check if this buffer was requested anyway */
- if (pdev->image_used[*mbuf] == 0)
- return -EINVAL;
-
- /* Add ourselves to the frame wait-queue.
-
- FIXME: needs auditing for safety.
- QUESTION: In what respect? I think that using the
- frameq is safe now.
- */
- add_wait_queue(&pdev->frameq, &wait);
- while (pdev->full_frames == NULL) {
- /* Check for unplugged/etc. here */
- if (pdev->error_status) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -pdev->error_status;
- }
-
- if (signal_pending(current)) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -ERESTARTSYS;
- }
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
-
- /* The frame is ready. Expand in the image buffer
- requested by the user. I don't care if you
- mmap() 5 buffers and request data in this order:
- buffer 4 2 3 0 1 2 3 0 4 3 1 . . .
- Grabber hardware may not be so forgiving.
- */
- PWC_DEBUG_READ("VIDIOCSYNC: frame ready.\n");
- pdev->fill_image = *mbuf; /* tell in which buffer we want the image to be expanded */
- /* Decompress, etc */
- ret = pwc_handle_frame(pdev);
- pdev->image_used[*mbuf] = 0;
- if (ret)
- return -EFAULT;
- break;
- }
-
- case VIDIOCGAUDIO:
- {
- struct video_audio *v = arg;
-
- strcpy(v->name, "Microphone");
- v->audio = -1; /* unknown audio minor */
- v->flags = 0;
- v->mode = VIDEO_SOUND_MONO;
- v->volume = 0;
- v->bass = 0;
- v->treble = 0;
- v->balance = 0x8000;
- v->step = 1;
- break;
- }
-
- case VIDIOCSAUDIO:
- {
- /* Dummy: nothing can be set */
- break;
- }
-
- case VIDIOCGUNIT:
- {
- struct video_unit *vu = arg;
-
- vu->video = pdev->vdev->minor & 0x3F;
- vu->audio = -1; /* not known yet */
- vu->vbi = -1;
- vu->radio = -1;
- vu->teletext = -1;
- break;
- }
+#endif
/* V4L2 Layer */
case VIDIOC_QUERYCAP:
@@ -1081,7 +781,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf->index = index;
buf->m.offset = index * pdev->len_per_image;
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
buf->bytesused = pdev->view.size;
@@ -1158,7 +858,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: after pwc_handle_frame\n");
buf->index = pdev->fill_image;
- if (pdev->vpalette == VIDEO_PALETTE_RAW)
+ if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
buf->bytesused = pdev->view.size;
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index f1b206632957..36a9c83b5f5d 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -34,7 +34,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/errno.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
@@ -49,7 +49,7 @@
#define PWC_MINOR 0
#define PWC_EXTRAMINOR 12
#define PWC_VERSION_CODE KERNEL_VERSION(PWC_MAJOR,PWC_MINOR,PWC_EXTRAMINOR)
-#define PWC_VERSION "10.0.13"
+#define PWC_VERSION "10.0.14"
#define PWC_NAME "pwc"
#define PFX PWC_NAME ": "
@@ -180,7 +180,7 @@ struct pwc_device
int vcinterface; /* video control interface */
int valternate; /* alternate interface needed */
int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
- int vpalette; /* palette: 420P, RAW or RGBBAYER */
+ int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or raw: _PWC1, _PWC2 */
int vframe_count; /* received frames */
int vframes_dumped; /* counter for dumped frames */
int vframes_error; /* frames received in error */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 9de7d59916bd..c143ed0a5270 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -275,7 +275,7 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
* This waits until this buffer is out of danger, i.e., until it is no
* longer in STATE_QUEUED or STATE_ACTIVE
*/
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
videobuf_dma_unmap(vq->dev, dma);
videobuf_dma_free(dma);
@@ -852,7 +852,7 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q,
*/
videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct pxa_buffer), icd);
+ sizeof(struct pxa_buffer), icd, NULL);
}
static u32 mclk_get_divisor(struct platform_device *pdev,
@@ -1539,7 +1539,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
return ret;
}
-static int pxa_camera_reqbufs(struct soc_camera_file *icf,
+static int pxa_camera_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
int i;
@@ -1551,7 +1551,7 @@ static int pxa_camera_reqbufs(struct soc_camera_file *icf,
* it hadn't triggered
*/
for (i = 0; i < p->count; i++) {
- struct pxa_buffer *buf = container_of(icf->vb_vidq.bufs[i],
+ struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i],
struct pxa_buffer, vb);
buf->inwork = 0;
INIT_LIST_HEAD(&buf->vb.queue);
@@ -1562,10 +1562,10 @@ static int pxa_camera_reqbufs(struct soc_camera_file *icf,
static unsigned int pxa_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
struct pxa_buffer *buf;
- buf = list_entry(icf->vb_vidq.stream.next, struct pxa_buffer,
+ buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer,
vb.stream);
poll_wait(file, &buf->vb.done, pt);
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index ce78fff23425..d2fa2d43ff19 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -493,7 +493,7 @@ static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Switch between preview and still shot modes */
return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
@@ -503,7 +503,7 @@ static int rj54n1_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
if (flags & SOCAM_PCLK_SAMPLE_RISING)
@@ -560,7 +560,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct v4l2_rect *rect = &a->c;
int dummy = 0, output_w, output_h,
@@ -595,7 +595,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
a->c = rj54n1->rect;
@@ -621,7 +621,7 @@ static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int rj54n1_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
mf->code = rj54n1->fmt->code;
@@ -641,7 +641,7 @@ static int rj54n1_g_fmt(struct v4l2_subdev *sd,
static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
s32 *out_w, s32 *out_h)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
output_w = *out_w, output_h = *out_h;
@@ -983,7 +983,7 @@ static int rj54n1_reg_init(struct i2c_client *client)
static int rj54n1_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 ||
@@ -1014,7 +1014,7 @@ static int rj54n1_try_fmt(struct v4l2_subdev *sd,
static int rj54n1_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
int output_w, output_h, max_w, max_h,
@@ -1145,7 +1145,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
return -EINVAL;
@@ -1163,7 +1163,7 @@ static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
static int rj54n1_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
reg->reg < 0x400 || reg->reg > 0x1fff)
@@ -1185,7 +1185,7 @@ static int rj54n1_g_register(struct v4l2_subdev *sd,
static int rj54n1_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
reg->reg < 0x400 || reg->reg > 0x1fff)
@@ -1248,7 +1248,7 @@ static struct soc_camera_ops rj54n1_ops = {
static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
int data;
@@ -1283,7 +1283,7 @@ static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int data;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct v4l2_queryctrl *qctrl;
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 8ec7c9a45a17..f5a46c458717 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -600,7 +600,7 @@ static int s2255_got_frame(struct s2255_channel *channel, int jpgsize)
dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
- return 0;
+ return rc;
}
static const struct s2255_fmt *format_by_fourcc(int fourcc)
@@ -1817,7 +1817,7 @@ static int s2255_open(struct file *file)
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
- sizeof(struct s2255_buffer), fh);
+ sizeof(struct s2255_buffer), fh, NULL);
return 0;
}
diff --git a/drivers/media/video/s5p-fimc/Makefile b/drivers/media/video/s5p-fimc/Makefile
index 0d9d54132ecc..7ea1b1403b1e 100644
--- a/drivers/media/video/s5p-fimc/Makefile
+++ b/drivers/media/video/s5p-fimc/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) := s5p-fimc.o
-s5p-fimc-y := fimc-core.o fimc-reg.o
+s5p-fimc-y := fimc-core.o fimc-reg.o fimc-capture.o
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
new file mode 100644
index 000000000000..e8f13d3e2df1
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -0,0 +1,819 @@
+/*
+ * Samsung S5P SoC series camera interface (camera capture) driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd
+ * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf-core.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "fimc-core.h"
+
+static struct v4l2_subdev *fimc_subdev_register(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *isp_info)
+{
+ struct i2c_adapter *i2c_adap;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct v4l2_subdev *sd = NULL;
+
+ i2c_adap = i2c_get_adapter(isp_info->i2c_bus_num);
+ if (!i2c_adap)
+ return ERR_PTR(-ENOMEM);
+
+ sd = v4l2_i2c_new_subdev_board(&vid_cap->v4l2_dev, i2c_adap,
+ MODULE_NAME, isp_info->board_info, NULL);
+ if (!sd) {
+ v4l2_err(&vid_cap->v4l2_dev, "failed to acquire subdev\n");
+ return NULL;
+ }
+
+ v4l2_info(&vid_cap->v4l2_dev, "subdevice %s registered successfuly\n",
+ isp_info->board_info->type);
+
+ return sd;
+}
+
+static void fimc_subdev_unregister(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct i2c_client *client;
+
+ if (vid_cap->input_index < 0)
+ return; /* Subdevice already released or not registered. */
+
+ if (vid_cap->sd) {
+ v4l2_device_unregister_subdev(vid_cap->sd);
+ client = v4l2_get_subdevdata(vid_cap->sd);
+ i2c_unregister_device(client);
+ i2c_put_adapter(client->adapter);
+ vid_cap->sd = NULL;
+ }
+
+ vid_cap->input_index = -1;
+}
+
+/**
+ * fimc_subdev_attach - attach v4l2_subdev to camera host interface
+ *
+ * @fimc: FIMC device information
+ * @index: index to the array of available subdevices,
+ * -1 for full array search or non negative value
+ * to select specific subdevice
+ */
+static int fimc_subdev_attach(struct fimc_dev *fimc, int index)
+{
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ struct s3c_platform_fimc *pdata = fimc->pdata;
+ struct s3c_fimc_isp_info *isp_info;
+ struct v4l2_subdev *sd;
+ int i;
+
+ for (i = 0; i < FIMC_MAX_CAMIF_CLIENTS; ++i) {
+ isp_info = pdata->isp_info[i];
+
+ if (!isp_info || (index >= 0 && i != index))
+ continue;
+
+ sd = fimc_subdev_register(fimc, isp_info);
+ if (sd) {
+ vid_cap->sd = sd;
+ vid_cap->input_index = i;
+
+ return 0;
+ }
+ }
+
+ vid_cap->input_index = -1;
+ vid_cap->sd = NULL;
+ v4l2_err(&vid_cap->v4l2_dev, "fimc%d: sensor attach failed\n",
+ fimc->id);
+ return -ENODEV;
+}
+
+static int fimc_isp_subdev_init(struct fimc_dev *fimc, int index)
+{
+ struct s3c_fimc_isp_info *isp_info;
+ int ret;
+
+ ret = fimc_subdev_attach(fimc, index);
+ if (ret)
+ return ret;
+
+ isp_info = fimc->pdata->isp_info[fimc->vid_cap.input_index];
+ ret = fimc_hw_set_camera_polarity(fimc, isp_info);
+ if (!ret) {
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, core,
+ s_power, 1);
+ if (!ret)
+ return ret;
+ }
+
+ fimc_subdev_unregister(fimc);
+ err("ISP initialization failed: %d", ret);
+ return ret;
+}
+
+/*
+ * At least one buffer on the pending_buf_q queue is required.
+ * Locking: The caller holds fimc->slock spinlock.
+ */
+int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
+ struct fimc_vid_buffer *fimc_vb)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_ctx *ctx = cap->ctx;
+ int ret = 0;
+
+ BUG_ON(!fimc || !fimc_vb);
+
+ ret = fimc_prepare_addr(ctx, fimc_vb, &ctx->d_frame,
+ &fimc_vb->paddr);
+ if (ret)
+ return ret;
+
+ if (test_bit(ST_CAPT_STREAM, &fimc->state)) {
+ fimc_pending_queue_add(cap, fimc_vb);
+ } else {
+ /* Setup the buffer directly for processing. */
+ int buf_id = (cap->reqbufs_count == 1) ? -1 : cap->buf_index;
+ fimc_hw_set_output_addr(fimc, &fimc_vb->paddr, buf_id);
+
+ fimc_vb->index = cap->buf_index;
+ active_queue_add(cap, fimc_vb);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ }
+ return ret;
+}
+
+static int fimc_stop_capture(struct fimc_dev *fimc)
+{
+ unsigned long flags;
+ struct fimc_vid_cap *cap;
+ int ret;
+
+ cap = &fimc->vid_cap;
+
+ if (!fimc_capture_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_CAPT_SHUT, &fimc->state);
+ fimc_deactivate_capture(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ test_bit(ST_CAPT_SHUT, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ ret = v4l2_subdev_call(cap->sd, video, s_stream, 0);
+ if (ret)
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "s_stream(0) failed\n");
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_PEND |
+ 1 << ST_CAPT_STREAM);
+
+ fimc->vid_cap.active_buf_cnt = 0;
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ dbg("state: 0x%lx", fimc->state);
+ return 0;
+}
+
+static int fimc_capture_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ int ret = 0;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ /* Return if the corresponding video mem2mem node is already opened. */
+ if (fimc_m2m_active(fimc))
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (++fimc->vid_cap.refcnt == 1) {
+ ret = fimc_isp_subdev_init(fimc, -1);
+ if (ret) {
+ fimc->vid_cap.refcnt--;
+ ret = -EIO;
+ }
+ }
+
+ file->private_data = fimc->vid_cap.ctx;
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_capture_close(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+
+ if (--fimc->vid_cap.refcnt == 0) {
+ fimc_stop_capture(fimc);
+
+ videobuf_stop(&fimc->vid_cap.vbq);
+ videobuf_mmap_free(&fimc->vid_cap.vbq);
+
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "releasing ISP\n");
+ v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
+ fimc_subdev_unregister(fimc);
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static unsigned int fimc_capture_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return POLLERR;
+
+ ret = videobuf_poll_stream(file, &cap->vbq, wait);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+static int fimc_capture_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_mmap_mapper(&cap->vbq, vma);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+/* video device file operations */
+static const struct v4l2_file_operations fimc_capture_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_capture_open,
+ .release = fimc_capture_close,
+ .poll = fimc_capture_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = fimc_capture_mmap,
+};
+
+static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+/* Synchronize formats of the camera interface input and attached sensor. */
+static int sync_capture_fmt(struct fimc_ctx *ctx)
+{
+ struct fimc_frame *frame = &ctx->s_frame;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_mbus_framefmt *fmt = &fimc->vid_cap.fmt;
+ int ret;
+
+ fmt->width = ctx->d_frame.o_width;
+ fmt->height = ctx->d_frame.o_height;
+
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_mbus_fmt, fmt);
+ if (ret == -ENOIOCTLCMD) {
+ err("s_mbus_fmt failed");
+ return ret;
+ }
+ dbg("w: %d, h: %d, code= %d", fmt->width, fmt->height, fmt->code);
+
+ frame->fmt = find_mbus_format(fmt, FMT_FLAGS_CAM);
+ if (!frame->fmt) {
+ err("fimc source format not found\n");
+ return -EINVAL;
+ }
+
+ frame->f_width = fmt->width;
+ frame->f_height = fmt->height;
+ frame->width = fmt->width;
+ frame->height = fmt->height;
+ frame->o_width = fmt->width;
+ frame->o_height = fmt->height;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+
+ return 0;
+}
+
+static int fimc_cap_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *frame;
+ struct v4l2_pix_format *pix;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ ret = fimc_vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (fimc_capture_active(fimc)) {
+ ret = -EBUSY;
+ goto sf_unlock;
+ }
+
+ frame = &ctx->d_frame;
+
+ pix = &f->fmt.pix;
+ frame->fmt = find_format(f, FMT_FLAGS_M2M | FMT_FLAGS_CAM);
+ if (!frame->fmt) {
+ err("fimc target format not found\n");
+ ret = -EINVAL;
+ goto sf_unlock;
+ }
+
+ /* Output DMA frame pixel size and offsets. */
+ frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
+ frame->f_height = pix->height;
+ frame->width = pix->width;
+ frame->height = pix->height;
+ frame->o_width = pix->width;
+ frame->o_height = pix->height;
+ frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+
+ ret = sync_capture_fmt(ctx);
+
+ ctx->state |= (FIMC_PARAMS | FIMC_DST_FMT);
+
+sf_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct fimc_ctx *ctx = priv;
+ struct s3c_platform_fimc *pldata = ctx->fimc_dev->pdata;
+ struct s3c_fimc_isp_info *isp_info;
+
+ if (i->index >= FIMC_MAX_CAMIF_CLIENTS)
+ return -EINVAL;
+
+ isp_info = pldata->isp_info[i->index];
+ if (isp_info == NULL)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strncpy(i->name, isp_info->board_info->type, 32);
+ return 0;
+}
+
+static int fimc_cap_s_input(struct file *file, void *priv,
+ unsigned int i)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct s3c_platform_fimc *pdata = fimc->pdata;
+ int ret;
+
+ if (fimc_capture_active(ctx->fimc_dev))
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (i >= FIMC_MAX_CAMIF_CLIENTS || !pdata->isp_info[i]) {
+ ret = -EINVAL;
+ goto si_unlock;
+ }
+
+ if (fimc->vid_cap.sd) {
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, core, s_power, 0);
+ if (ret)
+ err("s_power failed: %d", ret);
+ }
+
+ /* Release the attached sensor subdevice. */
+ fimc_subdev_unregister(fimc);
+
+ ret = fimc_isp_subdev_init(fimc, i);
+
+si_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
+
+ *i = cap->input_index;
+ return 0;
+}
+
+static int fimc_cap_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s3c_fimc_isp_info *isp_info;
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret = -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (fimc_capture_active(fimc) || !fimc->vid_cap.sd)
+ goto s_unlock;
+
+ if (!(ctx->state & FIMC_DST_FMT)) {
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "Format is not set\n");
+ ret = -EINVAL;
+ goto s_unlock;
+ }
+
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto s_unlock;
+
+ ret = fimc_prepare_config(ctx, ctx->state);
+ if (ret)
+ goto s_unlock;
+
+ isp_info = fimc->pdata->isp_info[fimc->vid_cap.input_index];
+ fimc_hw_set_camera_type(fimc, isp_info);
+ fimc_hw_set_camera_source(fimc, isp_info);
+ fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
+ if (ctx->state & FIMC_PARAMS) {
+ ret = fimc_set_scaler_info(ctx);
+ if (ret) {
+ err("Scaler setup error");
+ goto s_unlock;
+ }
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_scaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ }
+
+ fimc_hw_set_output_path(ctx);
+ fimc_hw_set_out_dma(ctx);
+
+ INIT_LIST_HEAD(&fimc->vid_cap.pending_buf_q);
+ INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
+ fimc->vid_cap.active_buf_cnt = 0;
+ fimc->vid_cap.frame_count = 0;
+
+ set_bit(ST_CAPT_PEND, &fimc->state);
+ ret = videobuf_streamon(&fimc->vid_cap.vbq);
+
+s_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!fimc_capture_running(fimc) && !fimc_capture_pending(fimc)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ dbg("state: 0x%lx", fimc->state);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ fimc_stop_capture(fimc);
+ ret = videobuf_streamoff(&cap->vbq);
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (fimc_capture_active(ctx->fimc_dev))
+ return -EBUSY;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_reqbufs(&cap->vbq, reqbufs);
+ if (!ret)
+ cap->reqbufs_count = reqbufs->count;
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_vid_cap *cap = &ctx->fimc_dev->vid_cap;
+
+ if (fimc_capture_active(ctx->fimc_dev))
+ return -EBUSY;
+
+ return videobuf_querybuf(&cap->vbq, buf);
+}
+
+static int fimc_cap_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_qbuf(&cap->vbq, buf);
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_cap_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
+ return -ERESTARTSYS;
+
+ ret = videobuf_dqbuf(&ctx->fimc_dev->vid_cap.vbq, buf,
+ file->f_flags & O_NONBLOCK);
+
+ mutex_unlock(&ctx->fimc_dev->lock);
+ return ret;
+}
+
+static int fimc_cap_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret = -EINVAL;
+
+ if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
+ return -ERESTARTSYS;
+
+ /* Allow any controls but 90/270 rotation while streaming */
+ if (!fimc_capture_active(ctx->fimc_dev) ||
+ ctrl->id != V4L2_CID_ROTATE ||
+ (ctrl->value != 90 && ctrl->value != 270)) {
+ ret = check_ctrl_val(ctx, ctrl);
+ if (!ret) {
+ ret = fimc_s_ctrl(ctx, ctrl);
+ if (!ret)
+ ctx->state |= FIMC_PARAMS;
+ }
+ }
+ if (ret == -EINVAL)
+ ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
+ core, s_ctrl, ctrl);
+
+ mutex_unlock(&ctx->fimc_dev->lock);
+ return ret;
+}
+
+static int fimc_cap_s_crop(struct file *file, void *fh,
+ struct v4l2_crop *cr)
+{
+ struct fimc_frame *f;
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret = -EINVAL;
+
+ if (fimc_capture_active(fimc))
+ return -EBUSY;
+
+ ret = fimc_try_crop(ctx, cr);
+ if (ret)
+ return ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (!(ctx->state & FIMC_DST_FMT)) {
+ v4l2_err(&fimc->vid_cap.v4l2_dev,
+ "Capture color format not set\n");
+ goto sc_unlock;
+ }
+
+ f = &ctx->s_frame;
+ /* Check for the pixel scaling ratio when cropping input image. */
+ ret = fimc_check_scaler_ratio(&cr->c, &ctx->d_frame);
+ if (ret) {
+ v4l2_err(&fimc->vid_cap.v4l2_dev, "Out of the scaler range");
+ } else {
+ ret = 0;
+ f->offs_h = cr->c.left;
+ f->offs_v = cr->c.top;
+ f->width = cr->c.width;
+ f->height = cr->c.height;
+ }
+
+sc_unlock:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+
+static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
+ .vidioc_querycap = fimc_vidioc_querycap_capture,
+
+ .vidioc_enum_fmt_vid_cap = fimc_vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = fimc_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = fimc_cap_s_fmt,
+ .vidioc_g_fmt_vid_cap = fimc_vidioc_g_fmt,
+
+ .vidioc_reqbufs = fimc_cap_reqbufs,
+ .vidioc_querybuf = fimc_cap_querybuf,
+
+ .vidioc_qbuf = fimc_cap_qbuf,
+ .vidioc_dqbuf = fimc_cap_dqbuf,
+
+ .vidioc_streamon = fimc_cap_streamon,
+ .vidioc_streamoff = fimc_cap_streamoff,
+
+ .vidioc_queryctrl = fimc_vidioc_queryctrl,
+ .vidioc_g_ctrl = fimc_vidioc_g_ctrl,
+ .vidioc_s_ctrl = fimc_cap_s_ctrl,
+
+ .vidioc_g_crop = fimc_vidioc_g_crop,
+ .vidioc_s_crop = fimc_cap_s_crop,
+ .vidioc_cropcap = fimc_vidioc_cropcap,
+
+ .vidioc_enum_input = fimc_cap_enum_input,
+ .vidioc_s_input = fimc_cap_s_input,
+ .vidioc_g_input = fimc_cap_g_input,
+};
+
+int fimc_register_capture_device(struct fimc_dev *fimc)
+{
+ struct v4l2_device *v4l2_dev = &fimc->vid_cap.v4l2_dev;
+ struct video_device *vfd;
+ struct fimc_vid_cap *vid_cap;
+ struct fimc_ctx *ctx;
+ struct v4l2_format f;
+ int ret;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->fimc_dev = fimc;
+ ctx->in_path = FIMC_CAMERA;
+ ctx->out_path = FIMC_DMA;
+ ctx->state = FIMC_CTX_CAP;
+
+ f.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;
+ ctx->d_frame.fmt = find_format(&f, FMT_FLAGS_M2M);
+
+ if (!v4l2_dev->name[0])
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s.capture", dev_name(&fimc->pdev->dev));
+
+ ret = v4l2_device_register(NULL, v4l2_dev);
+ if (ret)
+ goto err_info;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(v4l2_dev, "Failed to allocate video device\n");
+ goto err_v4l2_reg;
+ }
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s:cap",
+ dev_name(&fimc->pdev->dev));
+
+ vfd->fops = &fimc_capture_fops;
+ vfd->ioctl_ops = &fimc_capture_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ video_set_drvdata(vfd, fimc);
+
+ vid_cap = &fimc->vid_cap;
+ vid_cap->vfd = vfd;
+ vid_cap->active_buf_cnt = 0;
+ vid_cap->reqbufs_count = 0;
+ vid_cap->refcnt = 0;
+ /* The default color format for image sensor. */
+ vid_cap->fmt.code = V4L2_MBUS_FMT_YUYV8_2X8;
+
+ INIT_LIST_HEAD(&vid_cap->pending_buf_q);
+ INIT_LIST_HEAD(&vid_cap->active_buf_q);
+ spin_lock_init(&ctx->slock);
+ vid_cap->ctx = ctx;
+
+ videobuf_queue_dma_contig_init(&vid_cap->vbq, &fimc_qops,
+ vid_cap->v4l2_dev.dev, &fimc->irqlock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct fimc_vid_buffer), (void *)ctx);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(v4l2_dev, "Failed to register video device\n");
+ goto err_vd_reg;
+ }
+
+ v4l2_info(v4l2_dev,
+ "FIMC capture driver registered as /dev/video%d\n",
+ vfd->num);
+
+ return 0;
+
+err_vd_reg:
+ video_device_release(vfd);
+err_v4l2_reg:
+ v4l2_device_unregister(v4l2_dev);
+err_info:
+ dev_err(&fimc->pdev->dev, "failed to install\n");
+ return ret;
+}
+
+void fimc_unregister_capture_device(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *capture = &fimc->vid_cap;
+
+ if (capture->vfd)
+ video_unregister_device(capture->vfd);
+
+ kfree(capture->ctx);
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 6961c55baf9b..2e7c547894b6 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,7 +1,7 @@
/*
* S5P camera interface (video postprocessor) driver
*
- * Copyright (c) 2010 Samsung Electronics
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd
*
* Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
@@ -38,86 +38,103 @@ static struct fimc_fmt fimc_formats[] = {
.depth = 16,
.color = S5P_FIMC_RGB565,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "BGR666",
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = 32,
.color = S5P_FIMC_RGB666,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "XRGB-8-8-8-8, 24 bpp",
.fourcc = V4L2_PIX_FMT_RGB24,
.depth = 32,
.color = S5P_FIMC_RGB888,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.color = S5P_FIMC_YCBYCR422,
.buff_cnt = 1,
- .planes_cnt = 1
- }, {
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
+ }, {
.name = "YUV 4:2:2 packed, CbYCrY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
.color = S5P_FIMC_CBYCRY422,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 packed, CrYCbY",
.fourcc = V4L2_PIX_FMT_VYUY,
.depth = 16,
.color = S5P_FIMC_CRYCBY422,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = 16,
.color = S5P_FIMC_YCRYCB422,
.buff_cnt = 1,
- .planes_cnt = 1
+ .planes_cnt = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
.name = "YUV 4:2:2 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 12,
.color = S5P_FIMC_YCBCR422,
.buff_cnt = 1,
- .planes_cnt = 3
+ .planes_cnt = 3,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:2 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV16,
.depth = 16,
.color = S5P_FIMC_YCBCR422,
.buff_cnt = 1,
- .planes_cnt = 2
+ .planes_cnt = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:2 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV61,
.depth = 16,
.color = S5P_FIMC_RGB565,
.buff_cnt = 1,
- .planes_cnt = 2
+ .planes_cnt = 2,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:0 planar, YCbCr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.color = S5P_FIMC_YCBCR420,
.buff_cnt = 1,
- .planes_cnt = 3
+ .planes_cnt = 3,
+ .flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.color = S5P_FIMC_YCBCR420,
.buff_cnt = 1,
- .planes_cnt = 2
- }
- };
+ .planes_cnt = 2,
+ .flags = FMT_FLAGS_M2M,
+ },
+};
static struct v4l2_queryctrl fimc_ctrls[] = {
{
@@ -127,16 +144,14 @@ static struct v4l2_queryctrl fimc_ctrls[] = {
.minimum = 0,
.maximum = 1,
.default_value = 0,
- },
- {
+ }, {
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Vertical flip",
.minimum = 0,
.maximum = 1,
.default_value = 0,
- },
- {
+ }, {
.id = V4L2_CID_ROTATE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Rotation (CCW)",
@@ -158,7 +173,7 @@ static struct v4l2_queryctrl *get_ctrl(int id)
return NULL;
}
-static int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
+int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
{
if (r->width > f->width) {
if (f->width > (r->width * SCALER_MAX_HRATIO))
@@ -181,32 +196,27 @@ static int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f)
static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
{
- if (src >= tar * 64) {
+ u32 sh = 6;
+
+ if (src >= 64 * tar)
return -EINVAL;
- } else if (src >= tar * 32) {
- *ratio = 32;
- *shift = 5;
- } else if (src >= tar * 16) {
- *ratio = 16;
- *shift = 4;
- } else if (src >= tar * 8) {
- *ratio = 8;
- *shift = 3;
- } else if (src >= tar * 4) {
- *ratio = 4;
- *shift = 2;
- } else if (src >= tar * 2) {
- *ratio = 2;
- *shift = 1;
- } else {
- *ratio = 1;
- *shift = 0;
+
+ while (sh--) {
+ u32 tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
}
+ *shift = 0, *ratio = 1;
+
+ dbg("s: %d, t: %d, shift: %d, ratio: %d",
+ src, tar, *shift, *ratio);
return 0;
}
-static int fimc_set_scaler_info(struct fimc_ctx *ctx)
+int fimc_set_scaler_info(struct fimc_ctx *ctx)
{
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
@@ -214,8 +224,13 @@ static int fimc_set_scaler_info(struct fimc_ctx *ctx)
int tx, ty, sx, sy;
int ret;
- tx = d_frame->width;
- ty = d_frame->height;
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ ty = d_frame->width;
+ tx = d_frame->height;
+ } else {
+ tx = d_frame->width;
+ ty = d_frame->height;
+ }
if (tx <= 0 || ty <= 0) {
v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
"invalid target size: %d x %d", tx, ty);
@@ -261,12 +276,57 @@ static int fimc_set_scaler_info(struct fimc_ctx *ctx)
return 0;
}
+static void fimc_capture_handler(struct fimc_dev *fimc)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *v_buf = NULL;
+
+ if (!list_empty(&cap->active_buf_q)) {
+ v_buf = active_queue_pop(cap);
+ fimc_buf_finish(fimc, v_buf);
+ }
+
+ if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ return;
+ }
+
+ if (!list_empty(&cap->pending_buf_q)) {
+
+ v_buf = pending_queue_pop(cap);
+ fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
+ v_buf->index = cap->buf_index;
+
+ dbg("hw ptr: %d, sw ptr: %d",
+ fimc_hw_get_frame_index(fimc), cap->buf_index);
+
+ spin_lock(&fimc->irqlock);
+ v_buf->vb.state = VIDEOBUF_ACTIVE;
+ spin_unlock(&fimc->irqlock);
+
+ /* Move the buffer to the capture active queue */
+ active_queue_add(cap, v_buf);
+
+ dbg("next frame: %d, done frame: %d",
+ fimc_hw_get_frame_index(fimc), v_buf->index);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+
+ } else if (test_and_clear_bit(ST_CAPT_STREAM, &fimc->state) &&
+ cap->active_buf_cnt <= 1) {
+ fimc_deactivate_capture(fimc);
+ }
+
+ dbg("frame: %d, active_buf_cnt= %d",
+ fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
+}
static irqreturn_t fimc_isr(int irq, void *priv)
{
struct fimc_vid_buffer *src_buf, *dst_buf;
- struct fimc_dev *fimc = (struct fimc_dev *)priv;
struct fimc_ctx *ctx;
+ struct fimc_dev *fimc = priv;
BUG_ON(!fimc);
fimc_hw_clear_irq(fimc);
@@ -281,12 +341,22 @@ static irqreturn_t fimc_isr(int irq, void *priv)
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_buf && dst_buf) {
spin_lock(&fimc->irqlock);
- src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
wake_up(&src_buf->vb.done);
wake_up(&dst_buf->vb.done);
spin_unlock(&fimc->irqlock);
v4l2_m2m_job_finish(fimc->m2m.m2m_dev, ctx->m2m_ctx);
}
+ goto isr_unlock;
+
+ }
+
+ if (test_bit(ST_CAPT_RUN, &fimc->state))
+ fimc_capture_handler(fimc);
+
+ if (test_and_clear_bit(ST_CAPT_PEND, &fimc->state)) {
+ set_bit(ST_CAPT_RUN, &fimc->state);
+ wake_up(&fimc->irq_queue);
}
isr_unlock:
@@ -295,20 +365,13 @@ isr_unlock:
}
/* The color format (planes_cnt, buff_cnt) must be already configured. */
-static int fimc_prepare_addr(struct fimc_ctx *ctx,
- struct fimc_vid_buffer *buf, enum v4l2_buf_type type)
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct fimc_vid_buffer *buf,
+ struct fimc_frame *frame, struct fimc_addr *paddr)
{
- struct fimc_frame *frame;
- struct fimc_addr *paddr;
- u32 pix_size;
int ret = 0;
+ u32 pix_size;
- frame = ctx_m2m_get_frame(ctx, type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
- paddr = &frame->paddr;
-
- if (!buf)
+ if (buf == NULL || frame == NULL)
return -EINVAL;
pix_size = frame->width * frame->height;
@@ -344,8 +407,8 @@ static int fimc_prepare_addr(struct fimc_ctx *ctx,
}
}
- dbg("PHYS_ADDR: type= %d, y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
- type, paddr->y, paddr->cb, paddr->cr, ret);
+ dbg("PHYS_ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
+ paddr->y, paddr->cb, paddr->cr, ret);
return ret;
}
@@ -433,7 +496,7 @@ static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
*
* Return: 0 if dimensions are valid or non zero otherwise.
*/
-static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
+int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
{
struct fimc_frame *s_frame, *d_frame;
struct fimc_vid_buffer *buf = NULL;
@@ -443,12 +506,6 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
d_frame = &ctx->d_frame;
if (flags & FIMC_PARAMS) {
- if ((ctx->out_path == FIMC_DMA) &&
- (ctx->rotation == 90 || ctx->rotation == 270)) {
- swap(d_frame->f_width, d_frame->f_height);
- swap(d_frame->width, d_frame->height);
- }
-
/* Prepare the DMA offset ratios for scaler. */
fimc_prepare_dma_offset(ctx, &ctx->s_frame);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
@@ -466,16 +523,14 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
if (flags & FIMC_SRC_ADDR) {
buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, buf,
- V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ret = fimc_prepare_addr(ctx, buf, s_frame, &s_frame->paddr);
if (ret)
return ret;
}
if (flags & FIMC_DST_ADDR) {
buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, buf,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ ret = fimc_prepare_addr(ctx, buf, d_frame, &d_frame->paddr);
}
return ret;
@@ -499,12 +554,14 @@ static void fimc_dma_run(void *priv)
ctx->state |= (FIMC_SRC_ADDR | FIMC_DST_ADDR);
ret = fimc_prepare_config(ctx, ctx->state);
if (ret) {
- err("general configuration error");
+ err("Wrong parameters");
goto dma_unlock;
}
-
- if (fimc->m2m.ctx != ctx)
+ /* Reconfigure hardware if the context has changed. */
+ if (fimc->m2m.ctx != ctx) {
ctx->state |= FIMC_PARAMS;
+ fimc->m2m.ctx = ctx;
+ }
fimc_hw_set_input_addr(fimc, &ctx->s_frame.paddr);
@@ -512,10 +569,9 @@ static void fimc_dma_run(void *priv)
fimc_hw_set_input_path(ctx);
fimc_hw_set_in_dma(ctx);
if (fimc_set_scaler_info(ctx)) {
- err("scaler configuration error");
+ err("Scaler setup error");
goto dma_unlock;
}
- fimc_hw_set_prescaler(ctx);
fimc_hw_set_scaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
@@ -524,19 +580,15 @@ static void fimc_dma_run(void *priv)
fimc_hw_set_output_path(ctx);
if (ctx->state & (FIMC_DST_ADDR | FIMC_PARAMS))
- fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr);
+ fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr, -1);
if (ctx->state & FIMC_PARAMS)
fimc_hw_set_out_dma(ctx);
- if (ctx->scaler.enabled)
- fimc_hw_start_scaler(fimc);
- fimc_hw_en_capture(ctx);
+ fimc_activate_capture(ctx);
- ctx->state = 0;
- fimc_hw_start_in_dma(fimc);
-
- fimc->m2m.ctx = ctx;
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
+ fimc_hw_activate_input_dma(fimc, true);
dma_unlock:
spin_unlock_irqrestore(&ctx->slock, flags);
@@ -560,7 +612,7 @@ static int fimc_buf_setup(struct videobuf_queue *vq, unsigned int *count,
struct fimc_ctx *ctx = vq->priv_data;
struct fimc_frame *frame;
- frame = ctx_m2m_get_frame(ctx, vq->type);
+ frame = ctx_get_frame(ctx, vq->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
@@ -578,7 +630,7 @@ static int fimc_buf_prepare(struct videobuf_queue *vq,
struct fimc_frame *frame;
int ret;
- frame = ctx_m2m_get_frame(ctx, vq->type);
+ frame = ctx_get_frame(ctx, vq->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
@@ -618,10 +670,31 @@ static void fimc_buf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct fimc_ctx *ctx = vq->priv_data;
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ unsigned long flags;
+
+ dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
+
+ if ((ctx->state & FIMC_CTX_M2M) && ctx->m2m_ctx) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+ } else if (ctx->state & FIMC_CTX_CAP) {
+ spin_lock_irqsave(&fimc->slock, flags);
+ fimc_vid_cap_buf_queue(fimc, (struct fimc_vid_buffer *)vb);
+
+ dbg("fimc->cap.active_buf_cnt: %d",
+ fimc->vid_cap.active_buf_cnt);
+
+ if (cap->active_buf_cnt >= cap->reqbufs_count ||
+ cap->active_buf_cnt >= FIMC_MAX_OUT_BUFS) {
+ if (!test_and_set_bit(ST_CAPT_STREAM, &fimc->state))
+ fimc_activate_capture(ctx);
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
}
-static struct videobuf_queue_ops fimc_qops = {
+struct videobuf_queue_ops fimc_qops = {
.buf_setup = fimc_buf_setup,
.buf_prepare = fimc_buf_prepare,
.buf_queue = fimc_buf_queue,
@@ -644,7 +717,7 @@ static int fimc_m2m_querycap(struct file *file, void *priv,
return 0;
}
-static int fimc_m2m_enum_fmt(struct file *file, void *priv,
+int fimc_vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct fimc_fmt *fmt;
@@ -655,189 +728,210 @@ static int fimc_m2m_enum_fmt(struct file *file, void *priv,
fmt = &fimc_formats[f->index];
strncpy(f->description, fmt->name, sizeof(f->description) - 1);
f->pixelformat = fmt->fourcc;
+
return 0;
}
-static int fimc_m2m_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+int fimc_vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *frame;
- frame = ctx_m2m_get_frame(ctx, f->type);
+ frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
f->fmt.pix.width = frame->width;
f->fmt.pix.height = frame->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = frame->fmt->fourcc;
+ mutex_unlock(&fimc->lock);
return 0;
}
-static struct fimc_fmt *find_format(struct v4l2_format *f)
+struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
{
struct fimc_fmt *fmt;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
fmt = &fimc_formats[i];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == f->fmt.pix.pixelformat &&
+ (fmt->flags & mask))
break;
}
- if (i == ARRAY_SIZE(fimc_formats))
- return NULL;
- return fmt;
+ return (i == ARRAY_SIZE(fimc_formats)) ? NULL : fmt;
}
-static int fimc_m2m_try_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
+struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
+ unsigned int mask)
{
struct fimc_fmt *fmt;
- u32 max_width, max_height, mod_x, mod_y;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
+ fmt = &fimc_formats[i];
+ if (fmt->mbus_code == f->code && (fmt->flags & mask))
+ break;
+ }
+
+ return (i == ARRAY_SIZE(fimc_formats)) ? NULL : fmt;
+}
+
+
+int fimc_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
struct fimc_ctx *ctx = priv;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_pix_format *pix = &f->fmt.pix;
struct samsung_fimc_variant *variant = fimc->variant;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct fimc_fmt *fmt;
+ u32 max_width, mod_x, mod_y, mask;
+ int ret = -EINVAL, is_output = 0;
- fmt = find_format(f);
- if (!fmt) {
- v4l2_err(&fimc->m2m.v4l2_dev,
- "Fourcc format (0x%X) invalid.\n", pix->pixelformat);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (ctx->state & FIMC_CTX_CAP)
+ return -EINVAL;
+ is_output = 1;
+ } else if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
return -EINVAL;
}
+ dbg("w: %d, h: %d, bpl: %d",
+ pix->width, pix->height, pix->bytesperline);
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ mask = is_output ? FMT_FLAGS_M2M : FMT_FLAGS_M2M | FMT_FLAGS_CAM;
+ fmt = find_format(f, mask);
+ if (!fmt) {
+ v4l2_err(&fimc->m2m.v4l2_dev, "Fourcc format (0x%X) invalid.\n",
+ pix->pixelformat);
+ goto tf_out;
+ }
+
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_NONE;
else if (V4L2_FIELD_NONE != pix->field)
- return -EINVAL;
+ goto tf_out;
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- max_width = variant->scaler_dis_w;
- max_height = variant->scaler_dis_w;
- mod_x = variant->min_inp_pixsize;
- mod_y = variant->min_inp_pixsize;
- } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- max_width = variant->out_rot_dis_w;
- max_height = variant->out_rot_dis_w;
- mod_x = variant->min_out_pixsize;
- mod_y = variant->min_out_pixsize;
+ if (is_output) {
+ max_width = variant->pix_limit->scaler_dis_w;
+ mod_x = ffs(variant->min_inp_pixsize) - 1;
} else {
- err("Wrong stream type (%d)", f->type);
- return -EINVAL;
+ max_width = variant->pix_limit->out_rot_dis_w;
+ mod_x = ffs(variant->min_out_pixsize) - 1;
}
- dbg("max_w= %d, max_h= %d", max_width, max_height);
-
- if (pix->height > max_height)
- pix->height = max_height;
- if (pix->width > max_width)
- pix->width = max_width;
-
if (tiled_fmt(fmt)) {
- mod_x = 64; /* 64x32 tile */
- mod_y = 32;
+ mod_x = 6; /* 64 x 32 pixels tile */
+ mod_y = 5;
+ } else {
+ if (fimc->id == 1 && fimc->variant->pix_hoff)
+ mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
+ else
+ mod_y = mod_x;
}
- dbg("mod_x= 0x%X, mod_y= 0x%X", mod_x, mod_y);
+ dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_width);
- pix->width = (pix->width == 0) ? mod_x : ALIGN(pix->width, mod_x);
- pix->height = (pix->height == 0) ? mod_y : ALIGN(pix->height, mod_y);
+ v4l_bound_align_image(&pix->width, 16, max_width, mod_x,
+ &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
if (pix->bytesperline == 0 ||
- pix->bytesperline * 8 / fmt->depth > pix->width)
+ (pix->bytesperline * 8 / fmt->depth) > pix->width)
pix->bytesperline = (pix->width * fmt->depth) >> 3;
if (pix->sizeimage == 0)
pix->sizeimage = pix->height * pix->bytesperline;
- dbg("pix->bytesperline= %d, fmt->depth= %d",
- pix->bytesperline, fmt->depth);
+ dbg("w: %d, h: %d, bpl: %d, depth: %d",
+ pix->width, pix->height, pix->bytesperline, fmt->depth);
- return 0;
-}
+ ret = 0;
+tf_out:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct fimc_ctx *ctx = priv;
- struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev;
- struct videobuf_queue *src_vq, *dst_vq;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_device *v4l2_dev = &fimc->m2m.v4l2_dev;
+ struct videobuf_queue *vq;
struct fimc_frame *frame;
struct v4l2_pix_format *pix;
unsigned long flags;
int ret = 0;
- BUG_ON(!ctx);
-
- ret = fimc_m2m_try_fmt(file, priv, f);
+ ret = fimc_vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
- mutex_lock(&ctx->fimc_dev->lock);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
- src_vq = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
- dst_vq = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ mutex_lock(&vq->vb_lock);
- mutex_lock(&src_vq->vb_lock);
- mutex_lock(&dst_vq->vb_lock);
+ if (videobuf_queue_is_busy(vq)) {
+ v4l2_err(v4l2_dev, "%s: queue (%d) busy\n", __func__, f->type);
+ ret = -EBUSY;
+ goto sf_out;
+ }
+ spin_lock_irqsave(&ctx->slock, flags);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- if (videobuf_queue_is_busy(src_vq)) {
- v4l2_err(v4l2_dev, "%s queue busy\n", __func__);
- ret = -EBUSY;
- goto s_fmt_out;
- }
frame = &ctx->s_frame;
- spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_SRC_FMT;
- spin_unlock_irqrestore(&ctx->slock, flags);
-
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (videobuf_queue_is_busy(dst_vq)) {
- v4l2_err(v4l2_dev, "%s queue busy\n", __func__);
- ret = -EBUSY;
- goto s_fmt_out;
- }
frame = &ctx->d_frame;
- spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_DST_FMT;
- spin_unlock_irqrestore(&ctx->slock, flags);
} else {
+ spin_unlock_irqrestore(&ctx->slock, flags);
v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
"Wrong buffer/video queue type (%d)\n", f->type);
ret = -EINVAL;
- goto s_fmt_out;
+ goto sf_out;
}
+ spin_unlock_irqrestore(&ctx->slock, flags);
pix = &f->fmt.pix;
- frame->fmt = find_format(f);
+ frame->fmt = find_format(f, FMT_FLAGS_M2M);
if (!frame->fmt) {
ret = -EINVAL;
- goto s_fmt_out;
+ goto sf_out;
}
- frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
- frame->f_height = pix->sizeimage/pix->bytesperline;
- frame->width = pix->width;
- frame->height = pix->height;
- frame->o_width = pix->width;
+ frame->f_width = pix->bytesperline * 8 / frame->fmt->depth;
+ frame->f_height = pix->height;
+ frame->width = pix->width;
+ frame->height = pix->height;
+ frame->o_width = pix->width;
frame->o_height = pix->height;
- frame->offs_h = 0;
- frame->offs_v = 0;
- frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
- src_vq->field = dst_vq->field = pix->field;
+ frame->offs_h = 0;
+ frame->offs_v = 0;
+ frame->size = (pix->width * pix->height * frame->fmt->depth) >> 3;
+ vq->field = pix->field;
+
spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_PARAMS;
spin_unlock_irqrestore(&ctx->slock, flags);
- dbg("f_width= %d, f_height= %d", frame->f_width, frame->f_height);
+ dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
-s_fmt_out:
- mutex_unlock(&dst_vq->vb_lock);
- mutex_unlock(&src_vq->vb_lock);
- mutex_unlock(&ctx->fimc_dev->lock);
+sf_out:
+ mutex_unlock(&vq->vb_lock);
+ mutex_unlock(&fimc->lock);
return ret;
}
@@ -884,21 +978,33 @@ static int fimc_m2m_streamoff(struct file *file, void *priv,
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
-int fimc_m2m_queryctrl(struct file *file, void *priv,
+int fimc_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
+ struct fimc_ctx *ctx = priv;
struct v4l2_queryctrl *c;
+
c = get_ctrl(qc->id);
- if (!c)
- return -EINVAL;
- *qc = *c;
- return 0;
+ if (c) {
+ *qc = *c;
+ return 0;
+ }
+
+ if (ctx->state & FIMC_CTX_CAP)
+ return v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
+ core, queryctrl, qc);
+ return -EINVAL;
}
-int fimc_m2m_g_ctrl(struct file *file, void *priv,
+int fimc_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct fimc_ctx *ctx = priv;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
switch (ctrl->id) {
case V4L2_CID_HFLIP:
@@ -911,15 +1017,22 @@ int fimc_m2m_g_ctrl(struct file *file, void *priv,
ctrl->value = ctx->rotation;
break;
default:
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, "Invalid control\n");
- return -EINVAL;
+ if (ctx->state & FIMC_CTX_CAP) {
+ ret = v4l2_subdev_call(fimc->vid_cap.sd, core,
+ g_ctrl, ctrl);
+ } else {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "Invalid control\n");
+ ret = -EINVAL;
+ }
}
dbg("ctrl->value= %d", ctrl->value);
- return 0;
+
+ mutex_unlock(&fimc->lock);
+ return ret;
}
-static int check_ctrl_val(struct fimc_ctx *ctx,
- struct v4l2_control *ctrl)
+int check_ctrl_val(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
{
struct v4l2_queryctrl *c;
c = get_ctrl(ctrl->id);
@@ -936,22 +1049,23 @@ static int check_ctrl_val(struct fimc_ctx *ctx,
return 0;
}
-int fimc_m2m_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl)
{
- struct fimc_ctx *ctx = priv;
struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_dev *fimc = ctx->fimc_dev;
unsigned long flags;
- int ret = 0;
- ret = check_ctrl_val(ctx, ctrl);
- if (ret)
- return ret;
+ if (ctx->rotation != 0 &&
+ (ctrl->id == V4L2_CID_HFLIP || ctrl->id == V4L2_CID_VFLIP)) {
+ v4l2_err(&fimc->m2m.v4l2_dev,
+ "Simultaneous flip and rotation is not supported\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctx->slock, flags);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
- if (ctx->rotation != 0)
- return 0;
if (ctrl->value)
ctx->flip |= FLIP_X_AXIS;
else
@@ -959,8 +1073,6 @@ int fimc_m2m_s_ctrl(struct file *file, void *priv,
break;
case V4L2_CID_VFLIP:
- if (ctx->rotation != 0)
- return 0;
if (ctrl->value)
ctx->flip |= FLIP_Y_AXIS;
else
@@ -968,77 +1080,95 @@ int fimc_m2m_s_ctrl(struct file *file, void *priv,
break;
case V4L2_CID_ROTATE:
- if (ctrl->value == 90 || ctrl->value == 270) {
- if (ctx->out_path == FIMC_LCDFIFO &&
- !variant->has_inp_rot) {
- return -EINVAL;
- } else if (ctx->in_path == FIMC_DMA &&
- !variant->has_out_rot) {
- return -EINVAL;
- }
+ /* Check for the output rotator availability */
+ if ((ctrl->value == 90 || ctrl->value == 270) &&
+ (ctx->in_path == FIMC_DMA && !variant->has_out_rot)) {
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ return -EINVAL;
+ } else {
+ ctx->rotation = ctrl->value;
}
- ctx->rotation = ctrl->value;
- if (ctrl->value == 180)
- ctx->flip = FLIP_XY_AXIS;
break;
default:
- v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, "Invalid control\n");
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ v4l2_err(&fimc->m2m.v4l2_dev, "Invalid control\n");
return -EINVAL;
}
- spin_lock_irqsave(&ctx->slock, flags);
ctx->state |= FIMC_PARAMS;
spin_unlock_irqrestore(&ctx->slock, flags);
+
return 0;
}
+static int fimc_m2m_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret = 0;
+
+ ret = check_ctrl_val(ctx, ctrl);
+ if (ret)
+ return ret;
+
+ ret = fimc_s_ctrl(ctx, ctrl);
+ return 0;
+}
-static int fimc_m2m_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cr)
+int fimc_vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
{
struct fimc_frame *frame;
struct fimc_ctx *ctx = fh;
+ struct fimc_dev *fimc = ctx->fimc_dev;
- frame = ctx_m2m_get_frame(ctx, cr->type);
+ frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
- cr->bounds.left = 0;
- cr->bounds.top = 0;
- cr->bounds.width = frame->f_width;
- cr->bounds.height = frame->f_height;
- cr->defrect.left = frame->offs_h;
- cr->defrect.top = frame->offs_v;
- cr->defrect.width = frame->o_width;
- cr->defrect.height = frame->o_height;
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->f_width;
+ cr->bounds.height = frame->f_height;
+ cr->defrect = cr->bounds;
+
+ mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+int fimc_vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
struct fimc_frame *frame;
struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
- frame = ctx_m2m_get_frame(ctx, cr->type);
+ frame = ctx_get_frame(ctx, cr->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
cr->c.left = frame->offs_h;
cr->c.top = frame->offs_v;
cr->c.width = frame->width;
cr->c.height = frame->height;
+ mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
{
- struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
- unsigned long flags;
struct fimc_frame *f;
- u32 min_size;
- int ret = 0;
+ u32 min_size, halign;
+
+ f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ &ctx->s_frame : &ctx->d_frame;
if (cr->c.top < 0 || cr->c.left < 0) {
v4l2_err(&fimc->m2m.v4l2_dev,
@@ -1046,66 +1176,98 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
return -EINVAL;
}
- if (cr->c.width <= 0 || cr->c.height <= 0) {
- v4l2_err(&fimc->m2m.v4l2_dev,
- "crop width and height must be greater than 0\n");
- return -EINVAL;
- }
-
- f = ctx_m2m_get_frame(ctx, cr->type);
+ f = ctx_get_frame(ctx, cr->type);
if (IS_ERR(f))
return PTR_ERR(f);
- /* Adjust to required pixel boundary. */
- min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
- fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
-
- cr->c.width = round_down(cr->c.width, min_size);
- cr->c.height = round_down(cr->c.height, min_size);
- cr->c.left = round_down(cr->c.left + 1, min_size);
- cr->c.top = round_down(cr->c.top + 1, min_size);
+ min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ ? fimc->variant->min_inp_pixsize
+ : fimc->variant->min_out_pixsize;
- if ((cr->c.left + cr->c.width > f->o_width)
- || (cr->c.top + cr->c.height > f->o_height)) {
- v4l2_err(&fimc->m2m.v4l2_dev, "Error in S_CROP params\n");
- return -EINVAL;
+ if (ctx->state & FIMC_CTX_M2M) {
+ if (fimc->id == 1 && fimc->variant->pix_hoff)
+ halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+ else
+ halign = ffs(min_size) - 1;
+ /* there are more strict aligment requirements at camera interface */
+ } else {
+ min_size = 16;
+ halign = 4;
}
+ v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+ ffs(min_size) - 1,
+ &cr->c.height, min_size, f->o_height,
+ halign, 64/(ALIGN(f->fmt->depth, 8)));
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (cr->c.left + cr->c.width > f->o_width)
+ cr->c.left = f->o_width - cr->c.width;
+ if (cr->c.top + cr->c.height > f->o_height)
+ cr->c.top = f->o_height - cr->c.height;
+
+ cr->c.left = round_down(cr->c.left, min_size);
+ cr->c.top = round_down(cr->c.top,
+ ctx->state & FIMC_CTX_M2M ? 8 : 16);
+
+ dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+ f->f_width, f->f_height);
+
+ return 0;
+}
+
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = file->private_data;
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ unsigned long flags;
+ struct fimc_frame *f;
+ int ret;
+
+ ret = fimc_try_crop(ctx, cr);
+ if (ret)
+ return ret;
+
+ f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ &ctx->s_frame : &ctx->d_frame;
+
spin_lock_irqsave(&ctx->slock, flags);
- if ((ctx->state & FIMC_SRC_FMT) && (ctx->state & FIMC_DST_FMT)) {
- /* Check for the pixel scaling ratio when cropping input img. */
+ if (~ctx->state & (FIMC_SRC_FMT | FIMC_DST_FMT)) {
+ /* Check to see if scaling ratio is within supported range */
if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
ret = fimc_check_scaler_ratio(&cr->c, &ctx->d_frame);
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ else
ret = fimc_check_scaler_ratio(&cr->c, &ctx->s_frame);
-
if (ret) {
spin_unlock_irqrestore(&ctx->slock, flags);
- v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
+ v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
return -EINVAL;
}
}
ctx->state |= FIMC_PARAMS;
- spin_unlock_irqrestore(&ctx->slock, flags);
f->offs_h = cr->c.left;
f->offs_v = cr->c.top;
- f->width = cr->c.width;
+ f->width = cr->c.width;
f->height = cr->c.height;
+
+ spin_unlock_irqrestore(&ctx->slock, flags);
return 0;
}
static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querycap = fimc_m2m_querycap,
- .vidioc_enum_fmt_vid_cap = fimc_m2m_enum_fmt,
- .vidioc_enum_fmt_vid_out = fimc_m2m_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = fimc_vidioc_enum_fmt,
+ .vidioc_enum_fmt_vid_out = fimc_vidioc_enum_fmt,
- .vidioc_g_fmt_vid_cap = fimc_m2m_g_fmt,
- .vidioc_g_fmt_vid_out = fimc_m2m_g_fmt,
+ .vidioc_g_fmt_vid_cap = fimc_vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out = fimc_vidioc_g_fmt,
- .vidioc_try_fmt_vid_cap = fimc_m2m_try_fmt,
- .vidioc_try_fmt_vid_out = fimc_m2m_try_fmt,
+ .vidioc_try_fmt_vid_cap = fimc_vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out = fimc_vidioc_try_fmt,
.vidioc_s_fmt_vid_cap = fimc_m2m_s_fmt,
.vidioc_s_fmt_vid_out = fimc_m2m_s_fmt,
@@ -1119,13 +1281,13 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_streamon = fimc_m2m_streamon,
.vidioc_streamoff = fimc_m2m_streamoff,
- .vidioc_queryctrl = fimc_m2m_queryctrl,
- .vidioc_g_ctrl = fimc_m2m_g_ctrl,
+ .vidioc_queryctrl = fimc_vidioc_queryctrl,
+ .vidioc_g_ctrl = fimc_vidioc_g_ctrl,
.vidioc_s_ctrl = fimc_m2m_s_ctrl,
- .vidioc_g_crop = fimc_m2m_g_crop,
+ .vidioc_g_crop = fimc_vidioc_g_crop,
.vidioc_s_crop = fimc_m2m_s_crop,
- .vidioc_cropcap = fimc_m2m_cropcap
+ .vidioc_cropcap = fimc_vidioc_cropcap
};
@@ -1136,9 +1298,9 @@ static void queue_init(void *priv, struct videobuf_queue *vq,
struct fimc_dev *fimc = ctx->fimc_dev;
videobuf_queue_dma_contig_init(vq, &fimc_qops,
- fimc->m2m.v4l2_dev.dev,
+ &fimc->pdev->dev,
&fimc->irqlock, type, V4L2_FIELD_NONE,
- sizeof(struct fimc_vid_buffer), priv);
+ sizeof(struct fimc_vid_buffer), priv, NULL);
}
static int fimc_m2m_open(struct file *file)
@@ -1147,25 +1309,38 @@ static int fimc_m2m_open(struct file *file)
struct fimc_ctx *ctx = NULL;
int err = 0;
- mutex_lock(&fimc->lock);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ dbg("pid: %d, state: 0x%lx, refcnt: %d",
+ task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
+
+ /*
+ * Return if the corresponding video capture node
+ * is already opened.
+ */
+ if (fimc->vid_cap.refcnt > 0) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
fimc->m2m.refcnt++;
set_bit(ST_OUTDMA_RUN, &fimc->state);
- mutex_unlock(&fimc->lock);
-
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ if (!ctx) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
file->private_data = ctx;
ctx->fimc_dev = fimc;
- /* default format */
+ /* Default color format */
ctx->s_frame.fmt = &fimc_formats[0];
ctx->d_frame.fmt = &fimc_formats[0];
- /* per user process device context initialization */
- ctx->state = 0;
+ /* Setup the device context for mem2mem mode. */
+ ctx->state = FIMC_CTX_M2M;
ctx->flags = 0;
- ctx->effect.type = S5P_FIMC_EFFECT_ORIGINAL;
ctx->in_path = FIMC_DMA;
ctx->out_path = FIMC_DMA;
spin_lock_init(&ctx->slock);
@@ -1175,6 +1350,9 @@ static int fimc_m2m_open(struct file *file)
err = PTR_ERR(ctx->m2m_ctx);
kfree(ctx);
}
+
+err_unlock:
+ mutex_unlock(&fimc->lock);
return err;
}
@@ -1183,11 +1361,16 @@ static int fimc_m2m_release(struct file *file)
struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
+ mutex_lock(&fimc->lock);
+
+ dbg("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+
v4l2_m2m_ctx_release(ctx->m2m_ctx);
kfree(ctx);
- mutex_lock(&fimc->lock);
if (--fimc->m2m.refcnt <= 0)
clear_bit(ST_OUTDMA_RUN, &fimc->state);
+
mutex_unlock(&fimc->lock);
return 0;
}
@@ -1196,6 +1379,7 @@ static unsigned int fimc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct fimc_ctx *ctx = file->private_data;
+
return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
}
@@ -1203,6 +1387,7 @@ static unsigned int fimc_m2m_poll(struct file *file,
static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_ctx *ctx = file->private_data;
+
return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
}
@@ -1241,7 +1426,7 @@ static int fimc_register_m2m_device(struct fimc_dev *fimc)
ret = v4l2_device_register(&pdev->dev, v4l2_dev);
if (ret)
- return ret;;
+ goto err_m2m_r1;
vfd = video_device_alloc();
if (!vfd) {
@@ -1293,7 +1478,7 @@ static void fimc_unregister_m2m_device(struct fimc_dev *fimc)
if (fimc) {
v4l2_m2m_release(fimc->m2m.m2m_dev);
video_unregister_device(fimc->m2m.vfd);
- video_device_release(fimc->m2m.vfd);
+
v4l2_device_unregister(&fimc->m2m.v4l2_dev);
}
}
@@ -1337,7 +1522,7 @@ static int fimc_probe(struct platform_device *pdev)
drv_data = (struct samsung_fimc_driverdata *)
platform_get_device_id(pdev)->driver_data;
- if (pdev->id >= drv_data->devs_cnt) {
+ if (pdev->id >= drv_data->num_entities) {
dev_err(&pdev->dev, "Invalid platform device id: %d\n",
pdev->id);
return -EINVAL;
@@ -1350,9 +1535,11 @@ static int fimc_probe(struct platform_device *pdev)
fimc->id = pdev->id;
fimc->variant = drv_data->variant[fimc->id];
fimc->pdev = pdev;
+ fimc->pdata = pdev->dev.platform_data;
fimc->state = ST_IDLE;
spin_lock_init(&fimc->irqlock);
+ init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
mutex_init(&fimc->lock);
@@ -1382,6 +1569,7 @@ static int fimc_probe(struct platform_device *pdev)
ret = fimc_clk_get(fimc);
if (ret)
goto err_regs_unmap;
+ clk_set_rate(fimc->clock[0], drv_data->lclk_frequency);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -1399,25 +1587,38 @@ static int fimc_probe(struct platform_device *pdev)
goto err_clk;
}
- fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev));
- if (!fimc->work_queue) {
- ret = -ENOMEM;
- goto err_irq;
- }
-
ret = fimc_register_m2m_device(fimc);
if (ret)
- goto err_wq;
+ goto err_irq;
+
+ /* At least one camera sensor is required to register capture node */
+ if (fimc->pdata) {
+ int i;
+ for (i = 0; i < FIMC_MAX_CAMIF_CLIENTS; ++i)
+ if (fimc->pdata->isp_info[i])
+ break;
+
+ if (i < FIMC_MAX_CAMIF_CLIENTS) {
+ ret = fimc_register_capture_device(fimc);
+ if (ret)
+ goto err_m2m;
+ }
+ }
- fimc_hw_en_lastirq(fimc, true);
+ /*
+ * Exclude the additional output DMA address registers by masking
+ * them out on HW revisions that provide extended capabilites.
+ */
+ if (fimc->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(fimc, 0xF);
dev_dbg(&pdev->dev, "%s(): fimc-%d registered successfully\n",
__func__, fimc->id);
return 0;
-err_wq:
- destroy_workqueue(fimc->work_queue);
+err_m2m:
+ fimc_unregister_m2m_device(fimc);
err_irq:
free_irq(fimc->irq, fimc);
err_clk:
@@ -1429,7 +1630,7 @@ err_req_region:
kfree(fimc->regs_res);
err_info:
kfree(fimc);
- dev_err(&pdev->dev, "failed to install\n");
+
return ret;
}
@@ -1438,91 +1639,151 @@ static int __devexit fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc =
(struct fimc_dev *)platform_get_drvdata(pdev);
- v4l2_info(&fimc->m2m.v4l2_dev, "Removing %s\n", pdev->name);
-
free_irq(fimc->irq, fimc);
-
fimc_hw_reset(fimc);
fimc_unregister_m2m_device(fimc);
+ fimc_unregister_capture_device(fimc);
+
fimc_clk_release(fimc);
iounmap(fimc->regs);
release_resource(fimc->regs_res);
kfree(fimc->regs_res);
kfree(fimc);
+
+ dev_info(&pdev->dev, "%s driver unloaded\n", pdev->name);
return 0;
}
-static struct samsung_fimc_variant fimc01_variant_s5p = {
- .has_inp_rot = 1,
- .has_out_rot = 1,
+/* Image pixel limits, similar across several FIMC HW revisions. */
+static struct fimc_pix_limit s5p_pix_limit[3] = {
+ [0] = {
+ .scaler_en_w = 3264,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1920,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [1] = {
+ .scaler_en_w = 4224,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1920,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1920,
+ .out_rot_dis_w = 4224,
+ },
+ [2] = {
+ .scaler_en_w = 1920,
+ .scaler_dis_w = 8192,
+ .in_rot_en_h = 1280,
+ .in_rot_dis_w = 8192,
+ .out_rot_en_w = 1280,
+ .out_rot_dis_w = 1920,
+ },
+};
+
+static struct samsung_fimc_variant fimc0_variant_s5p = {
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
-
- .scaler_en_w = 3264,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1920,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[0],
};
static struct samsung_fimc_variant fimc2_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[1],
+};
- .scaler_en_w = 4224,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1920,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
+static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
- .pix_hoff = 1,
- .has_inp_rot = 1,
- .has_out_rot = 1,
+static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
.min_inp_pixsize = 16,
- .min_out_pixsize = 32,
-
- .scaler_en_w = 4224,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1920,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[2],
};
static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.pix_hoff = 1,
.min_inp_pixsize = 16,
- .min_out_pixsize = 32,
-
- .scaler_en_w = 1920,
- .scaler_dis_w = 8192,
- .in_rot_en_h = 1280,
- .in_rot_dis_w = 8192,
- .out_rot_en_w = 1280,
- .out_rot_dis_w = 1920,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 8,
+ .out_buf_count = 4,
+ .pix_limit = &s5p_pix_limit[2],
+};
+
+static struct samsung_fimc_variant fimc0_variant_s5pv310 = {
+ .pix_hoff = 1,
+ .has_inp_rot = 1,
+ .has_out_rot = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[1],
+};
+
+static struct samsung_fimc_variant fimc2_variant_s5pv310 = {
+ .pix_hoff = 1,
+ .min_inp_pixsize = 16,
+ .min_out_pixsize = 16,
+ .hor_offs_align = 1,
+ .out_buf_count = 32,
+ .pix_limit = &s5p_pix_limit[2],
};
+/* S5PC100 */
static struct samsung_fimc_driverdata fimc_drvdata_s5p = {
.variant = {
- [0] = &fimc01_variant_s5p,
- [1] = &fimc01_variant_s5p,
+ [0] = &fimc0_variant_s5p,
+ [1] = &fimc0_variant_s5p,
[2] = &fimc2_variant_s5p,
},
- .devs_cnt = 3
+ .num_entities = 3,
+ .lclk_frequency = 133000000UL,
};
+/* S5PV210, S5PC110 */
static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
.variant = {
- [0] = &fimc01_variant_s5pv210,
- [1] = &fimc01_variant_s5pv210,
+ [0] = &fimc0_variant_s5pv210,
+ [1] = &fimc1_variant_s5pv210,
[2] = &fimc2_variant_s5pv210,
},
- .devs_cnt = 3
+ .num_entities = 3,
+ .lclk_frequency = 166000000UL,
+};
+
+/* S5PV310, S5PC210 */
+static struct samsung_fimc_driverdata fimc_drvdata_s5pv310 = {
+ .variant = {
+ [0] = &fimc0_variant_s5pv310,
+ [1] = &fimc0_variant_s5pv310,
+ [2] = &fimc0_variant_s5pv310,
+ [3] = &fimc2_variant_s5pv310,
+ },
+ .num_entities = 4,
+ .lclk_frequency = 166000000UL,
};
static struct platform_device_id fimc_driver_ids[] = {
@@ -1532,6 +1793,9 @@ static struct platform_device_id fimc_driver_ids[] = {
}, {
.name = "s5pv210-fimc",
.driver_data = (unsigned long)&fimc_drvdata_s5pv210,
+ }, {
+ .name = "s5pv310-fimc",
+ .driver_data = (unsigned long)&fimc_drvdata_s5pv310,
},
{},
};
@@ -1547,20 +1811,12 @@ static struct platform_driver fimc_driver = {
}
};
-static char banner[] __initdata = KERN_INFO
- "S5PC Camera Interface V4L2 Driver, (c) 2010 Samsung Electronics\n";
-
static int __init fimc_init(void)
{
- u32 ret;
- printk(banner);
-
- ret = platform_driver_register(&fimc_driver);
- if (ret) {
- printk(KERN_ERR "FIMC platform driver register failed\n");
- return -1;
- }
- return 0;
+ int ret = platform_driver_register(&fimc_driver);
+ if (ret)
+ err("platform_driver_register failed: %d\n", ret);
+ return ret;
}
static void __exit fimc_exit(void)
@@ -1571,6 +1827,6 @@ static void __exit fimc_exit(void)
module_init(fimc_init);
module_exit(fimc_exit);
-MODULE_AUTHOR("Sylwester Nawrocki, s.nawrocki@samsung.com");
-MODULE_DESCRIPTION("S3C/S5P FIMC (video postprocessor) driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 6b3e0cd73cdd..3e1078516560 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -11,10 +11,14 @@
#ifndef FIMC_CORE_H_
#define FIMC_CORE_H_
+/*#define DEBUG*/
+
#include <linux/types.h>
#include <media/videobuf-core.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s3c_fimc.h>
#include <linux/videodev2.h>
#include "regs-fimc.h"
@@ -28,47 +32,72 @@
#define dbg(fmt, args...)
#endif
+/* Time to wait for next frame VSYNC interrupt while stopping operation. */
+#define FIMC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
#define NUM_FIMC_CLOCKS 2
#define MODULE_NAME "s5p-fimc"
-#define FIMC_MAX_DEVS 3
+#define FIMC_MAX_DEVS 4
#define FIMC_MAX_OUT_BUFS 4
#define SCALER_MAX_HRATIO 64
#define SCALER_MAX_VRATIO 64
+#define DMA_MIN_SIZE 8
-enum {
+/* FIMC device state flags */
+enum fimc_dev_flags {
+ /* for m2m node */
ST_IDLE,
ST_OUTDMA_RUN,
ST_M2M_PEND,
+ /* for capture node */
+ ST_CAPT_PEND,
+ ST_CAPT_RUN,
+ ST_CAPT_STREAM,
+ ST_CAPT_SHUT,
};
#define fimc_m2m_active(dev) test_bit(ST_OUTDMA_RUN, &(dev)->state)
#define fimc_m2m_pending(dev) test_bit(ST_M2M_PEND, &(dev)->state)
+#define fimc_capture_running(dev) test_bit(ST_CAPT_RUN, &(dev)->state)
+#define fimc_capture_pending(dev) test_bit(ST_CAPT_PEND, &(dev)->state)
+
+#define fimc_capture_active(dev) \
+ (test_bit(ST_CAPT_RUN, &(dev)->state) || \
+ test_bit(ST_CAPT_PEND, &(dev)->state))
+
+#define fimc_capture_streaming(dev) \
+ test_bit(ST_CAPT_STREAM, &(dev)->state)
+
+#define fimc_buf_finish(dev, vid_buf) do { \
+ spin_lock(&(dev)->irqlock); \
+ (vid_buf)->vb.state = VIDEOBUF_DONE; \
+ spin_unlock(&(dev)->irqlock); \
+ wake_up(&(vid_buf)->vb.done); \
+} while (0)
+
enum fimc_datapath {
- FIMC_ITU_CAM_A,
- FIMC_ITU_CAM_B,
- FIMC_MIPI_CAM,
+ FIMC_CAMERA,
FIMC_DMA,
FIMC_LCDFIFO,
FIMC_WRITEBACK
};
enum fimc_color_fmt {
- S5P_FIMC_RGB565,
+ S5P_FIMC_RGB565 = 0x10,
S5P_FIMC_RGB666,
S5P_FIMC_RGB888,
- S5P_FIMC_YCBCR420,
+ S5P_FIMC_RGB30_LOCAL,
+ S5P_FIMC_YCBCR420 = 0x20,
S5P_FIMC_YCBCR422,
S5P_FIMC_YCBYCR422,
S5P_FIMC_YCRYCB422,
S5P_FIMC_CBYCRY422,
S5P_FIMC_CRYCBY422,
- S5P_FIMC_RGB30_LOCAL,
S5P_FIMC_YCBCR444_LOCAL,
- S5P_FIMC_MAX_COLOR = S5P_FIMC_YCBCR444_LOCAL,
- S5P_FIMC_COLOR_MASK = 0x0F,
};
+#define fimc_fmt_is_rgb(x) ((x) & 0x10)
+
/* Y/Cb/Cr components order at DMA output for 1 plane YCbCr 4:2:2 formats. */
#define S5P_FIMC_OUT_CRYCBY S5P_CIOCTRL_ORDER422_CRYCBY
#define S5P_FIMC_OUT_CBYCRY S5P_CIOCTRL_ORDER422_YCRYCB
@@ -93,11 +122,13 @@ enum fimc_color_fmt {
#define S5P_FIMC_EFFECT_SIKHOUETTE S5P_CIIMGEFF_FIN_SILHOUETTE
/* The hardware context state. */
-#define FIMC_PARAMS (1 << 0)
-#define FIMC_SRC_ADDR (1 << 1)
-#define FIMC_DST_ADDR (1 << 2)
-#define FIMC_SRC_FMT (1 << 3)
-#define FIMC_DST_FMT (1 << 4)
+#define FIMC_PARAMS (1 << 0)
+#define FIMC_SRC_ADDR (1 << 1)
+#define FIMC_DST_ADDR (1 << 2)
+#define FIMC_SRC_FMT (1 << 3)
+#define FIMC_DST_FMT (1 << 4)
+#define FIMC_CTX_M2M (1 << 5)
+#define FIMC_CTX_CAP (1 << 6)
/* Image conversion flags */
#define FIMC_IN_DMA_ACCESS_TILED (1 << 0)
@@ -106,7 +137,9 @@ enum fimc_color_fmt {
#define FIMC_OUT_DMA_ACCESS_LINEAR (0 << 1)
#define FIMC_SCAN_MODE_PROGRESSIVE (0 << 2)
#define FIMC_SCAN_MODE_INTERLACED (1 << 2)
-/* YCbCr data dynamic range for RGB-YUV color conversion. Y/Cb/Cr: (0 ~ 255) */
+/*
+ * YCbCr data dynamic range for RGB-YUV color conversion.
+ * Y/Cb/Cr: (0 ~ 255) */
#define FIMC_COLOR_RANGE_WIDE (0 << 3)
/* Y (16 ~ 235), Cb/Cr (16 ~ 240) */
#define FIMC_COLOR_RANGE_NARROW (1 << 3)
@@ -118,20 +151,25 @@ enum fimc_color_fmt {
/**
* struct fimc_fmt - the driver's internal color format data
+ * @mbus_code: Media Bus pixel code, -1 if not applicable
* @name: format description
- * @fourcc: the fourcc code for this format
+ * @fourcc: the fourcc code for this format, 0 if not applicable
* @color: the corresponding fimc_color_fmt
- * @depth: number of bits per pixel
+ * @depth: driver's private 'number of bits per pixel'
* @buff_cnt: number of physically non-contiguous data planes
* @planes_cnt: number of physically contiguous data planes
*/
struct fimc_fmt {
+ enum v4l2_mbus_pixelcode mbus_code;
char *name;
u32 fourcc;
u32 color;
- u32 depth;
u16 buff_cnt;
u16 planes_cnt;
+ u16 depth;
+ u16 flags;
+#define FMT_FLAGS_CAM (1 << 0)
+#define FMT_FLAGS_M2M (1 << 1)
};
/**
@@ -167,37 +205,37 @@ struct fimc_effect {
/**
* struct fimc_scaler - the configuration data for FIMC inetrnal scaler
*
- * @enabled: the flag set when the scaler is used
+ * @scaleup_h: flag indicating scaling up horizontally
+ * @scaleup_v: flag indicating scaling up vertically
+ * @copy_mode: flag indicating transparent DMA transfer (no scaling
+ * and color format conversion)
+ * @enabled: flag indicating if the scaler is used
* @hfactor: horizontal shift factor
* @vfactor: vertical shift factor
* @pre_hratio: horizontal ratio of the prescaler
* @pre_vratio: vertical ratio of the prescaler
* @pre_dst_width: the prescaler's destination width
* @pre_dst_height: the prescaler's destination height
- * @scaleup_h: flag indicating scaling up horizontally
- * @scaleup_v: flag indicating scaling up vertically
* @main_hratio: the main scaler's horizontal ratio
* @main_vratio: the main scaler's vertical ratio
- * @real_width: source width - offset
- * @real_height: source height - offset
- * @copy_mode: flag set if one-to-one mode is used, i.e. no scaling
- * and color format conversion
+ * @real_width: source pixel (width - offset)
+ * @real_height: source pixel (height - offset)
*/
struct fimc_scaler {
- u32 enabled;
+ unsigned int scaleup_h:1;
+ unsigned int scaleup_v:1;
+ unsigned int copy_mode:1;
+ unsigned int enabled:1;
u32 hfactor;
u32 vfactor;
u32 pre_hratio;
u32 pre_vratio;
u32 pre_dst_width;
u32 pre_dst_height;
- u32 scaleup_h;
- u32 scaleup_v;
u32 main_hratio;
u32 main_vratio;
u32 real_width;
u32 real_height;
- u32 copy_mode;
};
/**
@@ -215,15 +253,18 @@ struct fimc_addr {
/**
* struct fimc_vid_buffer - the driver's video buffer
- * @vb: v4l videobuf buffer
+ * @vb: v4l videobuf buffer
+ * @paddr: precalculated physical address set
+ * @index: buffer index for the output DMA engine
*/
struct fimc_vid_buffer {
struct videobuf_buffer vb;
+ struct fimc_addr paddr;
+ int index;
};
/**
- * struct fimc_frame - input/output frame format properties
- *
+ * struct fimc_frame - source/target frame properties
* @f_width: image full width (virtual screen size)
* @f_height: image full height (virtual screen size)
* @o_width: original image width as set by S_FMT
@@ -270,67 +311,119 @@ struct fimc_m2m_device {
};
/**
+ * struct fimc_vid_cap - camera capture device information
+ * @ctx: hardware context data
+ * @vfd: video device node for camera capture mode
+ * @v4l2_dev: v4l2_device struct to manage subdevs
+ * @sd: pointer to camera sensor subdevice currently in use
+ * @fmt: Media Bus format configured at selected image sensor
+ * @pending_buf_q: the pending buffer queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vbq: the capture am video buffer queue
+ * @active_buf_cnt: number of video buffers scheduled in hardware
+ * @buf_index: index for managing the output DMA buffers
+ * @frame_count: the frame counter for statistics
+ * @reqbufs_count: the number of buffers requested in REQBUFS ioctl
+ * @input_index: input (camera sensor) index
+ * @refcnt: driver's private reference counter
+ */
+struct fimc_vid_cap {
+ struct fimc_ctx *ctx;
+ struct video_device *vfd;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *sd;
+ struct v4l2_mbus_framefmt fmt;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct videobuf_queue vbq;
+ int active_buf_cnt;
+ int buf_index;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+ int input_index;
+ int refcnt;
+};
+
+/**
+ * struct fimc_pix_limit - image pixel size limits in various IP configurations
+ *
+ * @scaler_en_w: max input pixel width when the scaler is enabled
+ * @scaler_dis_w: max input pixel width when the scaler is disabled
+ * @in_rot_en_h: max input width with the input rotator is on
+ * @in_rot_dis_w: max input width with the input rotator is off
+ * @out_rot_en_w: max output width with the output rotator on
+ * @out_rot_dis_w: max output width with the output rotator off
+ */
+struct fimc_pix_limit {
+ u16 scaler_en_w;
+ u16 scaler_dis_w;
+ u16 in_rot_en_h;
+ u16 in_rot_dis_w;
+ u16 out_rot_en_w;
+ u16 out_rot_dis_w;
+};
+
+/**
* struct samsung_fimc_variant - camera interface variant information
*
* @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
* @has_inp_rot: set if has input rotator
* @has_out_rot: set if has output rotator
+ * @pix_limit: pixel size constraints for the scaler
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
- * @scaler_en_w: maximum input pixel width when the scaler is enabled
- * @scaler_dis_w: maximum input pixel width when the scaler is disabled
- * @in_rot_en_h: maximum input width when the input rotator is used
- * @in_rot_dis_w: maximum input width when the input rotator is used
- * @out_rot_en_w: maximum output width for the output rotator enabled
- * @out_rot_dis_w: maximum output width for the output rotator enabled
+ * @hor_offs_align: horizontal pixel offset aligment
+ * @out_buf_count: the number of buffers in output DMA sequence
*/
struct samsung_fimc_variant {
unsigned int pix_hoff:1;
unsigned int has_inp_rot:1;
unsigned int has_out_rot:1;
-
+ struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
- u16 scaler_en_w;
- u16 scaler_dis_w;
- u16 in_rot_en_h;
- u16 in_rot_dis_w;
- u16 out_rot_en_w;
- u16 out_rot_dis_w;
+ u16 hor_offs_align;
+ u16 out_buf_count;
};
/**
- * struct samsung_fimc_driverdata - per-device type driver data for init time.
+ * struct samsung_fimc_driverdata - per device type driver data for init time.
*
* @variant: the variant information for this driver.
* @dev_cnt: number of fimc sub-devices available in SoC
+ * @lclk_frequency: fimc bus clock frequency
*/
struct samsung_fimc_driverdata {
struct samsung_fimc_variant *variant[FIMC_MAX_DEVS];
- int devs_cnt;
+ unsigned long lclk_frequency;
+ int num_entities;
};
struct fimc_ctx;
/**
- * struct fimc_subdev - abstraction for a FIMC entity
+ * struct fimc_dev - abstraction for FIMC entity
*
* @slock: the spinlock protecting this data structure
* @lock: the mutex protecting this data structure
* @pdev: pointer to the FIMC platform device
+ * @pdata: pointer to the device platform data
* @id: FIMC device index (0..2)
* @clock[]: the clocks required for FIMC operation
* @regs: the mapped hardware registers
* @regs_res: the resource claimed for IO registers
* @irq: interrupt number of the FIMC subdevice
- * @irqlock: spinlock protecting videbuffer queue
+ * @irqlock: spinlock protecting videobuffer queue
+ * @irq_queue:
* @m2m: memory-to-memory V4L2 device information
- * @state: the FIMC device state flags
+ * @vid_cap: camera capture device information
+ * @state: flags used to synchronize m2m and capture mode operation
*/
struct fimc_dev {
spinlock_t slock;
struct mutex lock;
struct platform_device *pdev;
+ struct s3c_platform_fimc *pdata;
struct samsung_fimc_variant *variant;
int id;
struct clk *clock[NUM_FIMC_CLOCKS];
@@ -338,8 +431,9 @@ struct fimc_dev {
struct resource *regs_res;
int irq;
spinlock_t irqlock;
- struct workqueue_struct *work_queue;
+ wait_queue_head_t irq_queue;
struct fimc_m2m_device m2m;
+ struct fimc_vid_cap vid_cap;
unsigned long state;
};
@@ -359,7 +453,7 @@ struct fimc_dev {
* @effect: image effect
* @rotation: image clockwise rotation in degrees
* @flip: image flip mode
- * @flags: an additional flags for image conversion
+ * @flags: additional flags for image conversion
* @state: flags to keep track of user configuration
* @fimc_dev: the FIMC device this context applies to
* @m2m_ctx: memory-to-memory device context
@@ -384,6 +478,7 @@ struct fimc_ctx {
struct v4l2_m2m_ctx *m2m_ctx;
};
+extern struct videobuf_queue_ops fimc_qops;
static inline int tiled_fmt(struct fimc_fmt *fmt)
{
@@ -397,18 +492,24 @@ static inline void fimc_hw_clear_irq(struct fimc_dev *dev)
writel(cfg, dev->regs + S5P_CIGCTRL);
}
-static inline void fimc_hw_start_scaler(struct fimc_dev *dev)
+static inline void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
{
u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg |= S5P_CISCCTRL_SCALERSTART;
+ if (on)
+ cfg |= S5P_CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~S5P_CISCCTRL_SCALERSTART;
writel(cfg, dev->regs + S5P_CISCCTRL);
}
-static inline void fimc_hw_stop_scaler(struct fimc_dev *dev)
+static inline void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
{
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg &= ~S5P_CISCCTRL_SCALERSTART;
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ u32 cfg = readl(dev->regs + S5P_MSCTRL);
+ if (on)
+ cfg |= S5P_MSCTRL_ENVID;
+ else
+ cfg &= ~S5P_MSCTRL_ENVID;
+ writel(cfg, dev->regs + S5P_MSCTRL);
}
static inline void fimc_hw_dis_capture(struct fimc_dev *dev)
@@ -418,27 +519,30 @@ static inline void fimc_hw_dis_capture(struct fimc_dev *dev)
writel(cfg, dev->regs + S5P_CIIMGCPT);
}
-static inline void fimc_hw_start_in_dma(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- cfg |= S5P_MSCTRL_ENVID;
- writel(cfg, dev->regs + S5P_MSCTRL);
-}
-
-static inline void fimc_hw_stop_in_dma(struct fimc_dev *dev)
+/**
+ * fimc_hw_set_dma_seq - configure output DMA buffer sequence
+ * @mask: each bit corresponds to one of 32 output buffer registers set
+ * 1 to include buffer in the sequence, 0 to disable
+ *
+ * This function mask output DMA ring buffers, i.e. it allows to configure
+ * which of the output buffer address registers will be used by the DMA
+ * engine.
+ */
+static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
{
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- cfg &= ~S5P_MSCTRL_ENVID;
- writel(cfg, dev->regs + S5P_MSCTRL);
+ writel(mask, dev->regs + S5P_CIFCNTSEQ);
}
-static inline struct fimc_frame *ctx_m2m_get_frame(struct fimc_ctx *ctx,
- enum v4l2_buf_type type)
+static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
+ enum v4l2_buf_type type)
{
struct fimc_frame *frame;
if (V4L2_BUF_TYPE_VIDEO_OUTPUT == type) {
- frame = &ctx->s_frame;
+ if (ctx->state & FIMC_CTX_M2M)
+ frame = &ctx->s_frame;
+ else
+ return ERR_PTR(-EINVAL);
} else if (V4L2_BUF_TYPE_VIDEO_CAPTURE == type) {
frame = &ctx->d_frame;
} else {
@@ -450,22 +554,137 @@ static inline struct fimc_frame *ctx_m2m_get_frame(struct fimc_ctx *ctx,
return frame;
}
+static inline u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
+{
+ u32 reg = readl(dev->regs + S5P_CISTATUS);
+ return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
+ S5P_CISTATUS_FRAMECNT_SHIFT;
+}
+
/* -----------------------------------------------------*/
/* fimc-reg.c */
-void fimc_hw_reset(struct fimc_dev *dev);
+void fimc_hw_reset(struct fimc_dev *fimc);
void fimc_hw_set_rotation(struct fimc_ctx *ctx);
void fimc_hw_set_target_format(struct fimc_ctx *ctx);
void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
-void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable);
-void fimc_hw_en_irq(struct fimc_dev *dev, int enable);
-void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
+void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
void fimc_hw_set_scaler(struct fimc_ctx *ctx);
void fimc_hw_en_capture(struct fimc_ctx *ctx);
void fimc_hw_set_effect(struct fimc_ctx *ctx);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
void fimc_hw_set_input_path(struct fimc_ctx *ctx);
void fimc_hw_set_output_path(struct fimc_ctx *ctx);
-void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr);
-void fimc_hw_set_output_addr(struct fimc_dev *dev, struct fimc_addr *paddr);
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
+ int index);
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam);
+int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam);
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam);
+
+/* -----------------------------------------------------*/
+/* fimc-core.c */
+int fimc_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int fimc_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f);
+int fimc_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f);
+int fimc_vidioc_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *cr);
+int fimc_vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr);
+int fimc_vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc);
+int fimc_vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl);
+
+int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr);
+int check_ctrl_val(struct fimc_ctx *ctx, struct v4l2_control *ctrl);
+int fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_control *ctrl);
+
+struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask);
+struct fimc_fmt *find_mbus_format(struct v4l2_mbus_framefmt *f,
+ unsigned int mask);
+
+int fimc_check_scaler_ratio(struct v4l2_rect *r, struct fimc_frame *f);
+int fimc_set_scaler_info(struct fimc_ctx *ctx);
+int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags);
+int fimc_prepare_addr(struct fimc_ctx *ctx, struct fimc_vid_buffer *buf,
+ struct fimc_frame *frame, struct fimc_addr *paddr);
+
+/* -----------------------------------------------------*/
+/* fimc-capture.c */
+int fimc_register_capture_device(struct fimc_dev *fimc);
+void fimc_unregister_capture_device(struct fimc_dev *fimc);
+int fimc_sensor_sd_init(struct fimc_dev *fimc, int index);
+int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
+ struct fimc_vid_buffer *fimc_vb);
+
+/* Locking: the caller holds fimc->slock */
+static inline void fimc_activate_capture(struct fimc_ctx *ctx)
+{
+ fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
+ fimc_hw_en_capture(ctx);
+}
+
+static inline void fimc_deactivate_capture(struct fimc_dev *fimc)
+{
+ fimc_hw_en_lastirq(fimc, true);
+ fimc_hw_dis_capture(fimc);
+ fimc_hw_enable_scaler(fimc, false);
+ fimc_hw_en_lastirq(fimc, false);
+}
+
+/*
+ * Add video buffer to the active buffers queue.
+ * The caller holds irqlock spinlock.
+ */
+static inline void active_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ buf->vb.state = VIDEOBUF_ACTIVE;
+ list_add_tail(&buf->vb.queue, &vid_cap->active_buf_q);
+ vid_cap->active_buf_cnt++;
+}
+
+/*
+ * Pop a video buffer from the capture active buffers queue
+ * Locking: Need to be called with dev->slock held.
+ */
+static inline struct fimc_vid_buffer *
+active_queue_pop(struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->active_buf_q.next,
+ struct fimc_vid_buffer, vb.queue);
+ list_del(&buf->vb.queue);
+ vid_cap->active_buf_cnt--;
+ return buf;
+}
+
+/* Add video buffer to the capture pending buffers queue */
+static inline void fimc_pending_queue_add(struct fimc_vid_cap *vid_cap,
+ struct fimc_vid_buffer *buf)
+{
+ buf->vb.state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->vb.queue, &vid_cap->pending_buf_q);
+}
+
+/* Add video buffer to the capture pending buffers queue */
+static inline struct fimc_vid_buffer *
+pending_queue_pop(struct fimc_vid_cap *vid_cap)
+{
+ struct fimc_vid_buffer *buf;
+ buf = list_entry(vid_cap->pending_buf_q.next,
+ struct fimc_vid_buffer, vb.queue);
+ list_del(&buf->vb.queue);
+ return buf;
+}
+
#endif /* FIMC_CORE_H_ */
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 5570f1ce0c9c..511631a2e5c3 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <mach/map.h>
+#include <media/s3c_fimc.h>
#include "fimc-core.h"
@@ -29,49 +30,11 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg |= (S5P_CIGCTRL_SWRST | S5P_CIGCTRL_IRQ_LEVEL);
writel(cfg, dev->regs + S5P_CIGCTRL);
- msleep(1);
+ udelay(1000);
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
writel(cfg, dev->regs + S5P_CIGCTRL);
-
-}
-
-void fimc_hw_set_rotation(struct fimc_ctx *ctx)
-{
- u32 cfg, flip;
- struct fimc_dev *dev = ctx->fimc_dev;
-
- cfg = readl(dev->regs + S5P_CITRGFMT);
- cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90);
-
- flip = readl(dev->regs + S5P_MSCTRL);
- flip &= ~S5P_MSCTRL_FLIP_MASK;
-
- /*
- * The input and output rotator cannot work simultaneously.
- * Use the output rotator in output DMA mode or the input rotator
- * in direct fifo output mode.
- */
- if (ctx->rotation == 90 || ctx->rotation == 270) {
- if (ctx->out_path == FIMC_LCDFIFO) {
- cfg |= S5P_CITRGFMT_INROT90;
- if (ctx->rotation == 270)
- flip |= S5P_MSCTRL_FLIP_180;
- } else {
- cfg |= S5P_CITRGFMT_OUTROT90;
- if (ctx->rotation == 270)
- cfg |= S5P_CITRGFMT_FLIP_180;
- }
- } else if (ctx->rotation == 180) {
- if (ctx->out_path == FIMC_LCDFIFO)
- flip |= S5P_MSCTRL_FLIP_180;
- else
- cfg |= S5P_CITRGFMT_FLIP_180;
- }
- if (ctx->rotation == 180 || ctx->rotation == 270)
- writel(flip, dev->regs + S5P_MSCTRL);
- writel(cfg, dev->regs + S5P_CITRGFMT);
}
static u32 fimc_hw_get_in_flip(u32 ctx_flip)
@@ -114,6 +77,46 @@ static u32 fimc_hw_get_target_flip(u32 ctx_flip)
return flip;
}
+void fimc_hw_set_rotation(struct fimc_ctx *ctx)
+{
+ u32 cfg, flip;
+ struct fimc_dev *dev = ctx->fimc_dev;
+
+ cfg = readl(dev->regs + S5P_CITRGFMT);
+ cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90 |
+ S5P_CITRGFMT_FLIP_180);
+
+ flip = readl(dev->regs + S5P_MSCTRL);
+ flip &= ~S5P_MSCTRL_FLIP_MASK;
+
+ /*
+ * The input and output rotator cannot work simultaneously.
+ * Use the output rotator in output DMA mode or the input rotator
+ * in direct fifo output mode.
+ */
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ if (ctx->out_path == FIMC_LCDFIFO) {
+ cfg |= S5P_CITRGFMT_INROT90;
+ if (ctx->rotation == 270)
+ flip |= S5P_MSCTRL_FLIP_180;
+ } else {
+ cfg |= S5P_CITRGFMT_OUTROT90;
+ if (ctx->rotation == 270)
+ cfg |= S5P_CITRGFMT_FLIP_180;
+ }
+ } else if (ctx->rotation == 180) {
+ if (ctx->out_path == FIMC_LCDFIFO)
+ flip |= S5P_MSCTRL_FLIP_180;
+ else
+ cfg |= S5P_CITRGFMT_FLIP_180;
+ }
+ if (ctx->rotation == 180 || ctx->rotation == 270)
+ writel(flip, dev->regs + S5P_MSCTRL);
+
+ cfg |= fimc_hw_get_target_flip(ctx->flip);
+ writel(cfg, dev->regs + S5P_CITRGFMT);
+}
+
void fimc_hw_set_target_format(struct fimc_ctx *ctx)
{
u32 cfg;
@@ -149,13 +152,15 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
break;
}
- cfg |= S5P_CITRGFMT_HSIZE(frame->width);
- cfg |= S5P_CITRGFMT_VSIZE(frame->height);
+ if (ctx->rotation == 90 || ctx->rotation == 270) {
+ cfg |= S5P_CITRGFMT_HSIZE(frame->height);
+ cfg |= S5P_CITRGFMT_VSIZE(frame->width);
+ } else {
- if (ctx->rotation == 0) {
- cfg &= ~S5P_CITRGFMT_FLIP_MASK;
- cfg |= fimc_hw_get_target_flip(ctx->flip);
+ cfg |= S5P_CITRGFMT_HSIZE(frame->width);
+ cfg |= S5P_CITRGFMT_VSIZE(frame->height);
}
+
writel(cfg, dev->regs + S5P_CITRGFMT);
cfg = readl(dev->regs + S5P_CITAREA) & ~S5P_CITAREA_MASK;
@@ -167,16 +172,20 @@ static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_frame *frame = &ctx->d_frame;
- u32 cfg = 0;
+ u32 cfg;
- if (ctx->rotation == 90 || ctx->rotation == 270) {
- cfg |= S5P_ORIG_SIZE_HOR(frame->f_height);
- cfg |= S5P_ORIG_SIZE_VER(frame->f_width);
- } else {
- cfg |= S5P_ORIG_SIZE_HOR(frame->f_width);
- cfg |= S5P_ORIG_SIZE_VER(frame->f_height);
- }
+ cfg = S5P_ORIG_SIZE_HOR(frame->f_width);
+ cfg |= S5P_ORIG_SIZE_VER(frame->f_height);
writel(cfg, dev->regs + S5P_ORGOSIZE);
+
+ /* Select color space conversion equation (HD/SD size).*/
+ cfg = readl(dev->regs + S5P_CIGCTRL);
+ if (frame->f_width >= 1280) /* HD */
+ cfg |= S5P_CIGCTRL_CSC_ITU601_709;
+ else /* SD */
+ cfg &= ~S5P_CIGCTRL_CSC_ITU601_709;
+ writel(cfg, dev->regs + S5P_CIGCTRL);
+
}
void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
@@ -232,36 +241,28 @@ static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
{
- unsigned long flags;
- u32 cfg;
-
- spin_lock_irqsave(&dev->slock, flags);
-
- cfg = readl(dev->regs + S5P_CIOCTRL);
+ u32 cfg = readl(dev->regs + S5P_CIOCTRL);
if (enable)
cfg |= S5P_CIOCTRL_LASTIRQ_ENABLE;
else
cfg &= ~S5P_CIOCTRL_LASTIRQ_ENABLE;
writel(cfg, dev->regs + S5P_CIOCTRL);
-
- spin_unlock_irqrestore(&dev->slock, flags);
}
-void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
+static void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_scaler *sc = &ctx->scaler;
- u32 cfg = 0, shfactor;
+ u32 cfg, shfactor;
shfactor = 10 - (sc->hfactor + sc->vfactor);
- cfg |= S5P_CISCPRERATIO_SHFACTOR(shfactor);
+ cfg = S5P_CISCPRERATIO_SHFACTOR(shfactor);
cfg |= S5P_CISCPRERATIO_HOR(sc->pre_hratio);
cfg |= S5P_CISCPRERATIO_VER(sc->pre_vratio);
writel(cfg, dev->regs + S5P_CISCPRERATIO);
- cfg = 0;
- cfg |= S5P_CISCPREDST_WIDTH(sc->pre_dst_width);
+ cfg = S5P_CISCPREDST_WIDTH(sc->pre_dst_width);
cfg |= S5P_CISCPREDST_HEIGHT(sc->pre_dst_height);
writel(cfg, dev->regs + S5P_CISCPREDST);
}
@@ -274,6 +275,8 @@ void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_frame *dst_frame = &ctx->d_frame;
u32 cfg = 0;
+ fimc_hw_set_prescaler(ctx);
+
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -325,14 +328,18 @@ void fimc_hw_set_scaler(struct fimc_ctx *ctx)
void fimc_hw_en_capture(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg;
- cfg = readl(dev->regs + S5P_CIIMGCPT);
- /* One shot mode for output DMA or freerun for FIFO. */
- if (ctx->out_path == FIMC_DMA)
- cfg |= S5P_CIIMGCPT_CPT_FREN_ENABLE;
- else
- cfg &= ~S5P_CIIMGCPT_CPT_FREN_ENABLE;
+ u32 cfg = readl(dev->regs + S5P_CIIMGCPT);
+
+ if (ctx->out_path == FIMC_DMA) {
+ /* one shot mode */
+ cfg |= S5P_CIIMGCPT_CPT_FREN_ENABLE | S5P_CIIMGCPT_IMGCPTEN;
+ } else {
+ /* Continous frame capture mode (freerun). */
+ cfg &= ~(S5P_CIIMGCPT_CPT_FREN_ENABLE |
+ S5P_CIIMGCPT_CPT_FRMOD_CNT);
+ cfg |= S5P_CIIMGCPT_IMGCPTEN;
+ }
if (ctx->scaler.enabled)
cfg |= S5P_CIIMGCPT_IMGCPTEN_SC;
@@ -364,7 +371,7 @@ static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
u32 cfg_r = 0;
if (FIMC_LCDFIFO == ctx->out_path)
- cfg_r |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ cfg_r |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
cfg_o |= S5P_ORIG_SIZE_HOR(frame->f_width);
cfg_o |= S5P_ORIG_SIZE_VER(frame->f_height);
@@ -380,27 +387,25 @@ void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_frame *frame = &ctx->s_frame;
struct fimc_dma_offset *offset = &frame->dma_offset;
- u32 cfg = 0;
+ u32 cfg;
/* Set the pixel offsets. */
- cfg |= S5P_CIO_OFFS_HOR(offset->y_h);
+ cfg = S5P_CIO_OFFS_HOR(offset->y_h);
cfg |= S5P_CIO_OFFS_VER(offset->y_v);
writel(cfg, dev->regs + S5P_CIIYOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cb_h);
+ cfg = S5P_CIO_OFFS_HOR(offset->cb_h);
cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
writel(cfg, dev->regs + S5P_CIICBOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cr_h);
+ cfg = S5P_CIO_OFFS_HOR(offset->cr_h);
cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
writel(cfg, dev->regs + S5P_CIICROFF);
/* Input original and real size. */
fimc_hw_set_in_dma_size(ctx);
- /* Autoload is used currently only in FIFO mode. */
+ /* Use DMA autoload only in FIFO mode. */
fimc_hw_en_autoload(dev, ctx->out_path == FIMC_LCDFIFO);
/* Set the input DMA to process single frame only. */
@@ -501,27 +506,163 @@ void fimc_hw_set_output_path(struct fimc_ctx *ctx)
void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
{
- u32 cfg = 0;
-
- cfg = readl(dev->regs + S5P_CIREAL_ISIZE);
+ u32 cfg = readl(dev->regs + S5P_CIREAL_ISIZE);
cfg |= S5P_CIREAL_ISIZE_ADDR_CH_DIS;
writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
- writel(paddr->y, dev->regs + S5P_CIIYSA0);
- writel(paddr->cb, dev->regs + S5P_CIICBSA0);
- writel(paddr->cr, dev->regs + S5P_CIICRSA0);
+ writel(paddr->y, dev->regs + S5P_CIIYSA(0));
+ writel(paddr->cb, dev->regs + S5P_CIICBSA(0));
+ writel(paddr->cr, dev->regs + S5P_CIICRSA(0));
cfg &= ~S5P_CIREAL_ISIZE_ADDR_CH_DIS;
writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
}
-void fimc_hw_set_output_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
+void fimc_hw_set_output_addr(struct fimc_dev *dev,
+ struct fimc_addr *paddr, int index)
{
- int i;
- /* Set all the output register sets to point to single video buffer. */
- for (i = 0; i < FIMC_MAX_OUT_BUFS; i++) {
+ int i = (index == -1) ? 0 : index;
+ do {
writel(paddr->y, dev->regs + S5P_CIOYSA(i));
writel(paddr->cb, dev->regs + S5P_CIOCBSA(i));
writel(paddr->cr, dev->regs + S5P_CIOCRSA(i));
+ dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
+ i, paddr->y, paddr->cb, paddr->cr);
+ } while (index == -1 && ++i < FIMC_MAX_OUT_BUFS);
+}
+
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam)
+{
+ u32 cfg = readl(fimc->regs + S5P_CIGCTRL);
+
+ cfg &= ~(S5P_CIGCTRL_INVPOLPCLK | S5P_CIGCTRL_INVPOLVSYNC |
+ S5P_CIGCTRL_INVPOLHREF | S5P_CIGCTRL_INVPOLHSYNC);
+
+ if (cam->flags & FIMC_CLK_INV_PCLK)
+ cfg |= S5P_CIGCTRL_INVPOLPCLK;
+
+ if (cam->flags & FIMC_CLK_INV_VSYNC)
+ cfg |= S5P_CIGCTRL_INVPOLVSYNC;
+
+ if (cam->flags & FIMC_CLK_INV_HREF)
+ cfg |= S5P_CIGCTRL_INVPOLHREF;
+
+ if (cam->flags & FIMC_CLK_INV_HSYNC)
+ cfg |= S5P_CIGCTRL_INVPOLHSYNC;
+
+ writel(cfg, fimc->regs + S5P_CIGCTRL);
+
+ return 0;
+}
+
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam)
+{
+ struct fimc_frame *f = &fimc->vid_cap.ctx->s_frame;
+ u32 cfg = 0;
+
+ if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
+
+ switch (fimc->vid_cap.fmt.code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_YCBYCR;
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_YCRYCB;
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_CRYCBY;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ cfg = S5P_CISRCFMT_ORDER422_CBYCRY;
+ break;
+ default:
+ err("camera image format not supported: %d",
+ fimc->vid_cap.fmt.code);
+ return -EINVAL;
+ }
+
+ if (cam->bus_type == FIMC_ITU_601) {
+ if (cam->bus_width == 8) {
+ cfg |= S5P_CISRCFMT_ITU601_8BIT;
+ } else if (cam->bus_width == 16) {
+ cfg |= S5P_CISRCFMT_ITU601_16BIT;
+ } else {
+ err("invalid bus width: %d", cam->bus_width);
+ return -EINVAL;
+ }
+ } /* else defaults to ITU-R BT.656 8-bit */
}
+
+ cfg |= S5P_CISRCFMT_HSIZE(f->o_width) | S5P_CISRCFMT_VSIZE(f->o_height);
+ writel(cfg, fimc->regs + S5P_CISRCFMT);
+ return 0;
+}
+
+
+int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+{
+ u32 hoff2, voff2;
+
+ u32 cfg = readl(fimc->regs + S5P_CIWDOFST);
+
+ cfg &= ~(S5P_CIWDOFST_HOROFF_MASK | S5P_CIWDOFST_VEROFF_MASK);
+ cfg |= S5P_CIWDOFST_OFF_EN |
+ S5P_CIWDOFST_HOROFF(f->offs_h) |
+ S5P_CIWDOFST_VEROFF(f->offs_v);
+
+ writel(cfg, fimc->regs + S5P_CIWDOFST);
+
+ /* See CIWDOFSTn register description in the datasheet for details. */
+ hoff2 = f->o_width - f->width - f->offs_h;
+ voff2 = f->o_height - f->height - f->offs_v;
+ cfg = S5P_CIWDOFST2_HOROFF(hoff2) | S5P_CIWDOFST2_VEROFF(voff2);
+
+ writel(cfg, fimc->regs + S5P_CIWDOFST2);
+ return 0;
+}
+
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct s3c_fimc_isp_info *cam)
+{
+ u32 cfg, tmp;
+ struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+
+ cfg = readl(fimc->regs + S5P_CIGCTRL);
+
+ /* Select ITU B interface, disable Writeback path and test pattern. */
+ cfg &= ~(S5P_CIGCTRL_TESTPAT_MASK | S5P_CIGCTRL_SELCAM_ITU_A |
+ S5P_CIGCTRL_SELCAM_MIPI | S5P_CIGCTRL_CAMIF_SELWB |
+ S5P_CIGCTRL_SELCAM_MIPI_A);
+
+ if (cam->bus_type == FIMC_MIPI_CSI2) {
+ cfg |= S5P_CIGCTRL_SELCAM_MIPI;
+
+ if (cam->mux_id == 0)
+ cfg |= S5P_CIGCTRL_SELCAM_MIPI_A;
+
+ /* TODO: add remaining supported formats. */
+ if (vid_cap->fmt.code == V4L2_MBUS_FMT_VYUY8_2X8) {
+ tmp = S5P_CSIIMGFMT_YCBCR422_8BIT;
+ } else {
+ err("camera image format not supported: %d",
+ vid_cap->fmt.code);
+ return -EINVAL;
+ }
+ writel(tmp | (0x1 << 8), fimc->regs + S5P_CSIIMGFMT);
+
+ } else if (cam->bus_type == FIMC_ITU_601 ||
+ cam->bus_type == FIMC_ITU_656) {
+ if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
+ cfg |= S5P_CIGCTRL_SELCAM_ITU_A;
+ } else if (cam->bus_type == FIMC_LCD_WB) {
+ cfg |= S5P_CIGCTRL_CAMIF_SELWB;
+ } else {
+ err("invalid camera bus type selected\n");
+ return -EINVAL;
+ }
+ writel(cfg, fimc->regs + S5P_CIGCTRL);
+
+ return 0;
}
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
index a3cfe824db00..a57daedb5b5c 100644
--- a/drivers/media/video/s5p-fimc/regs-fimc.h
+++ b/drivers/media/video/s5p-fimc/regs-fimc.h
@@ -11,10 +11,6 @@
#ifndef REGS_FIMC_H_
#define REGS_FIMC_H_
-#define S5P_CIOYSA(__x) (0x18 + (__x) * 4)
-#define S5P_CIOCBSA(__x) (0x28 + (__x) * 4)
-#define S5P_CIOCRSA(__x) (0x38 + (__x) * 4)
-
/* Input source format */
#define S5P_CISRCFMT 0x00
#define S5P_CISRCFMT_ITU601_8BIT (1 << 31)
@@ -28,22 +24,21 @@
/* Window offset */
#define S5P_CIWDOFST 0x04
-#define S5P_CIWDOFST_WINOFSEN (1 << 31)
+#define S5P_CIWDOFST_OFF_EN (1 << 31)
#define S5P_CIWDOFST_CLROVFIY (1 << 30)
#define S5P_CIWDOFST_CLROVRLB (1 << 29)
-#define S5P_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define S5P_CIWDOFST_HOROFF_MASK (0x7ff << 16)
#define S5P_CIWDOFST_CLROVFICB (1 << 15)
#define S5P_CIWDOFST_CLROVFICR (1 << 14)
-#define S5P_CIWDOFST_WINHOROFST(x) ((x) << 16)
-#define S5P_CIWDOFST_WINVEROFST(x) ((x) << 0)
-#define S5P_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+#define S5P_CIWDOFST_HOROFF(x) ((x) << 16)
+#define S5P_CIWDOFST_VEROFF(x) ((x) << 0)
+#define S5P_CIWDOFST_VEROFF_MASK (0xfff << 0)
/* Global control */
#define S5P_CIGCTRL 0x08
#define S5P_CIGCTRL_SWRST (1 << 31)
#define S5P_CIGCTRL_CAMRST_A (1 << 30)
#define S5P_CIGCTRL_SELCAM_ITU_A (1 << 29)
-#define S5P_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
#define S5P_CIGCTRL_TESTPAT_NORMAL (0 << 27)
#define S5P_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
#define S5P_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
@@ -61,6 +56,8 @@
#define S5P_CIGCTRL_SHDW_DISABLE (1 << 12)
#define S5P_CIGCTRL_SELCAM_MIPI_A (1 << 7)
#define S5P_CIGCTRL_CAMIF_SELWB (1 << 6)
+/* 0 - ITU601; 1 - ITU709 */
+#define S5P_CIGCTRL_CSC_ITU601_709 (1 << 5)
#define S5P_CIGCTRL_INVPOLHSYNC (1 << 4)
#define S5P_CIGCTRL_SELCAM_MIPI (1 << 3)
#define S5P_CIGCTRL_INTERLACE (1 << 0)
@@ -72,23 +69,10 @@
#define S5P_CIWDOFST2_HOROFF(x) ((x) << 16)
#define S5P_CIWDOFST2_VEROFF(x) ((x) << 0)
-/* Output DMA Y plane start address */
-#define S5P_CIOYSA1 0x18
-#define S5P_CIOYSA2 0x1c
-#define S5P_CIOYSA3 0x20
-#define S5P_CIOYSA4 0x24
-
-/* Output DMA Cb plane start address */
-#define S5P_CIOCBSA1 0x28
-#define S5P_CIOCBSA2 0x2c
-#define S5P_CIOCBSA3 0x30
-#define S5P_CIOCBSA4 0x34
-
-/* Output DMA Cr plane start address */
-#define S5P_CIOCRSA1 0x38
-#define S5P_CIOCRSA2 0x3c
-#define S5P_CIOCRSA3 0x40
-#define S5P_CIOCRSA4 0x44
+/* Output DMA Y/Cb/Cr plane start addresses */
+#define S5P_CIOYSA(n) (0x18 + (n) * 4)
+#define S5P_CIOCBSA(n) (0x28 + (n) * 4)
+#define S5P_CIOCRSA(n) (0x38 + (n) * 4)
/* Target image format */
#define S5P_CITRGFMT 0x48
@@ -168,6 +152,8 @@
#define S5P_CISTATUS_OVFICB (1 << 30)
#define S5P_CISTATUS_OVFICR (1 << 29)
#define S5P_CISTATUS_VSYNC (1 << 28)
+#define S5P_CISTATUS_FRAMECNT_MASK (3 << 26)
+#define S5P_CISTATUS_FRAMECNT_SHIFT 26
#define S5P_CISTATUS_WINOFF_EN (1 << 25)
#define S5P_CISTATUS_IMGCPT_EN (1 << 22)
#define S5P_CISTATUS_IMGCPT_SCEN (1 << 21)
@@ -206,10 +192,10 @@
#define S5P_CIIMGEFF_PAT_CB(x) ((x) << 13)
#define S5P_CIIMGEFF_PAT_CR(x) ((x) << 0)
-/* Input DMA Y/Cb/Cr plane start address 0 */
-#define S5P_CIIYSA0 0xd4
-#define S5P_CIICBSA0 0xd8
-#define S5P_CIICRSA0 0xdc
+/* Input DMA Y/Cb/Cr plane start address 0/1 */
+#define S5P_CIIYSA(n) (0xd4 + (n) * 0x70)
+#define S5P_CIICBSA(n) (0xd8 + (n) * 0x70)
+#define S5P_CIICRSA(n) (0xdc + (n) * 0x70)
/* Real input DMA image size */
#define S5P_CIREAL_ISIZE 0xf8
@@ -250,11 +236,6 @@
#define S5P_MSCTRL_ENVID (1 << 0)
#define S5P_MSCTRL_FRAME_COUNT(x) ((x) << 24)
-/* Input DMA Y/Cb/Cr plane start address 1 */
-#define S5P_CIIYSA1 0x144
-#define S5P_CIICBSA1 0x148
-#define S5P_CIICRSA1 0x14c
-
/* Output DMA Y/Cb/Cr offset */
#define S5P_CIOYOFF 0x168
#define S5P_CIOCBOFF 0x16c
@@ -289,5 +270,16 @@
/* MIPI CSI image format */
#define S5P_CSIIMGFMT 0x194
+#define S5P_CSIIMGFMT_YCBCR422_8BIT 0x1e
+#define S5P_CSIIMGFMT_RAW8 0x2a
+#define S5P_CSIIMGFMT_RAW10 0x2b
+#define S5P_CSIIMGFMT_RAW12 0x2c
+#define S5P_CSIIMGFMT_USER1 0x30
+#define S5P_CSIIMGFMT_USER2 0x31
+#define S5P_CSIIMGFMT_USER3 0x32
+#define S5P_CSIIMGFMT_USER4 0x33
+
+/* Output frame buffer sequence mask */
+#define S5P_CIFCNTSEQ 0x1FC
#endif /* REGS_FIMC_H_ */
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
deleted file mode 100644
index 6b3b09ef8978..000000000000
--- a/drivers/media/video/saa5246a.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-/*
- * Driver for the SAA5246A or SAA5281 Teletext (=Videotext) decoder chips from
- * Philips.
- *
- * Only capturing of Teletext pages is tested. The videotext chips also have a
- * TV output but my hardware doesn't use it. For this reason this driver does
- * not support changing any TV display settings.
- *
- * Copyright (C) 2004 Michael Geng <linux@MichaelGeng.de>
- *
- * Derived from
- *
- * saa5249 driver
- * Copyright (C) 1998 Richard Guenther
- * <richard.guenther@student.uni-tuebingen.de>
- *
- * with changes by
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * and
- *
- * vtx.c
- * Copyright (C) 1994-97 Martin Buck <martin-2.buck@student.uni-ulm.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/videotext.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
-
-MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>");
-MODULE_DESCRIPTION("Philips SAA5246A, SAA5281 Teletext decoder driver");
-MODULE_LICENSE("GPL");
-
-#define MAJOR_VERSION 1 /* driver major version number */
-#define MINOR_VERSION 8 /* driver minor version number */
-
-/* Number of DAUs = number of pages that can be searched at the same time. */
-#define NUM_DAUS 4
-
-#define NUM_ROWS_PER_PAGE 40
-
-/* first column is 0 (not 1) */
-#define POS_TIME_START 32
-#define POS_TIME_END 39
-
-#define POS_HEADER_START 7
-#define POS_HEADER_END 31
-
-/* Returns 'true' if the part of the videotext page described with req contains
- (at least parts of) the time field */
-#define REQ_CONTAINS_TIME(p_req) \
- ((p_req)->start <= POS_TIME_END && \
- (p_req)->end >= POS_TIME_START)
-
-/* Returns 'true' if the part of the videotext page described with req contains
- (at least parts of) the page header */
-#define REQ_CONTAINS_HEADER(p_req) \
- ((p_req)->start <= POS_HEADER_END && \
- (p_req)->end >= POS_HEADER_START)
-
-/*****************************************************************************/
-/* Mode register numbers of the SAA5246A */
-/*****************************************************************************/
-#define SAA5246A_REGISTER_R0 0
-#define SAA5246A_REGISTER_R1 1
-#define SAA5246A_REGISTER_R2 2
-#define SAA5246A_REGISTER_R3 3
-#define SAA5246A_REGISTER_R4 4
-#define SAA5246A_REGISTER_R5 5
-#define SAA5246A_REGISTER_R6 6
-#define SAA5246A_REGISTER_R7 7
-#define SAA5246A_REGISTER_R8 8
-#define SAA5246A_REGISTER_R9 9
-#define SAA5246A_REGISTER_R10 10
-#define SAA5246A_REGISTER_R11 11
-#define SAA5246A_REGISTER_R11B 11
-
-/* SAA5246A mode registers often autoincrement to the next register.
- Therefore we use variable argument lists. The following macro indicates
- the end of a command list. */
-#define COMMAND_END (-1)
-
-/*****************************************************************************/
-/* Contents of the mode registers of the SAA5246A */
-/*****************************************************************************/
-/* Register R0 (Advanced Control) */
-#define R0_SELECT_R11 0x00
-#define R0_SELECT_R11B 0x01
-
-#define R0_PLL_TIME_CONSTANT_LONG 0x00
-#define R0_PLL_TIME_CONSTANT_SHORT 0x02
-
-#define R0_ENABLE_nODD_EVEN_OUTPUT 0x00
-#define R0_DISABLE_nODD_EVEN_OUTPUT 0x04
-
-#define R0_ENABLE_HDR_POLL 0x00
-#define R0_DISABLE_HDR_POLL 0x10
-
-#define R0_DO_NOT_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED 0x00
-#define R0_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED 0x20
-
-#define R0_NO_FREE_RUN_PLL 0x00
-#define R0_FREE_RUN_PLL 0x40
-
-#define R0_NO_AUTOMATIC_FASTEXT_PROMPT 0x00
-#define R0_AUTOMATIC_FASTEXT_PROMPT 0x80
-
-/* Register R1 (Mode) */
-#define R1_INTERLACED_312_AND_HALF_312_AND_HALF_LINES 0x00
-#define R1_NON_INTERLACED_312_313_LINES 0x01
-#define R1_NON_INTERLACED_312_312_LINES 0x02
-#define R1_FFB_LEADING_EDGE_IN_FIRST_BROAD_PULSE 0x03
-#define R1_FFB_LEADING_EDGE_IN_SECOND_BROAD_PULSE 0x07
-
-#define R1_DEW 0x00
-#define R1_FULL_FIELD 0x08
-
-#define R1_EXTENDED_PACKET_DISABLE 0x00
-#define R1_EXTENDED_PACKET_ENABLE 0x10
-
-#define R1_DAUS_ALL_ON 0x00
-#define R1_DAUS_ALL_OFF 0x20
-
-#define R1_7_BITS_PLUS_PARITY 0x00
-#define R1_8_BITS_NO_PARITY 0x40
-
-#define R1_VCS_TO_SCS 0x00
-#define R1_NO_VCS_TO_SCS 0x80
-
-/* Register R2 (Page request address) */
-#define R2_IN_R3_SELECT_PAGE_HUNDREDS 0x00
-#define R2_IN_R3_SELECT_PAGE_TENS 0x01
-#define R2_IN_R3_SELECT_PAGE_UNITS 0x02
-#define R2_IN_R3_SELECT_HOURS_TENS 0x03
-#define R2_IN_R3_SELECT_HOURS_UNITS 0x04
-#define R2_IN_R3_SELECT_MINUTES_TENS 0x05
-#define R2_IN_R3_SELECT_MINUTES_UNITS 0x06
-
-#define R2_DAU_0 0x00
-#define R2_DAU_1 0x10
-#define R2_DAU_2 0x20
-#define R2_DAU_3 0x30
-
-#define R2_BANK_0 0x00
-#define R2_BANK 1 0x40
-
-#define R2_HAMMING_CHECK_ON 0x80
-#define R2_HAMMING_CHECK_OFF 0x00
-
-/* Register R3 (Page request data) */
-#define R3_PAGE_HUNDREDS_0 0x00
-#define R3_PAGE_HUNDREDS_1 0x01
-#define R3_PAGE_HUNDREDS_2 0x02
-#define R3_PAGE_HUNDREDS_3 0x03
-#define R3_PAGE_HUNDREDS_4 0x04
-#define R3_PAGE_HUNDREDS_5 0x05
-#define R3_PAGE_HUNDREDS_6 0x06
-#define R3_PAGE_HUNDREDS_7 0x07
-
-#define R3_HOLD_PAGE 0x00
-#define R3_UPDATE_PAGE 0x08
-
-#define R3_PAGE_HUNDREDS_DO_NOT_CARE 0x00
-#define R3_PAGE_HUNDREDS_DO_CARE 0x10
-
-#define R3_PAGE_TENS_DO_NOT_CARE 0x00
-#define R3_PAGE_TENS_DO_CARE 0x10
-
-#define R3_PAGE_UNITS_DO_NOT_CARE 0x00
-#define R3_PAGE_UNITS_DO_CARE 0x10
-
-#define R3_HOURS_TENS_DO_NOT_CARE 0x00
-#define R3_HOURS_TENS_DO_CARE 0x10
-
-#define R3_HOURS_UNITS_DO_NOT_CARE 0x00
-#define R3_HOURS_UNITS_DO_CARE 0x10
-
-#define R3_MINUTES_TENS_DO_NOT_CARE 0x00
-#define R3_MINUTES_TENS_DO_CARE 0x10
-
-#define R3_MINUTES_UNITS_DO_NOT_CARE 0x00
-#define R3_MINUTES_UNITS_DO_CARE 0x10
-
-/* Register R4 (Display chapter) */
-#define R4_DISPLAY_PAGE_0 0x00
-#define R4_DISPLAY_PAGE_1 0x01
-#define R4_DISPLAY_PAGE_2 0x02
-#define R4_DISPLAY_PAGE_3 0x03
-#define R4_DISPLAY_PAGE_4 0x04
-#define R4_DISPLAY_PAGE_5 0x05
-#define R4_DISPLAY_PAGE_6 0x06
-#define R4_DISPLAY_PAGE_7 0x07
-
-/* Register R5 (Normal display control) */
-#define R5_PICTURE_INSIDE_BOXING_OFF 0x00
-#define R5_PICTURE_INSIDE_BOXING_ON 0x01
-
-#define R5_PICTURE_OUTSIDE_BOXING_OFF 0x00
-#define R5_PICTURE_OUTSIDE_BOXING_ON 0x02
-
-#define R5_TEXT_INSIDE_BOXING_OFF 0x00
-#define R5_TEXT_INSIDE_BOXING_ON 0x04
-
-#define R5_TEXT_OUTSIDE_BOXING_OFF 0x00
-#define R5_TEXT_OUTSIDE_BOXING_ON 0x08
-
-#define R5_CONTRAST_REDUCTION_INSIDE_BOXING_OFF 0x00
-#define R5_CONTRAST_REDUCTION_INSIDE_BOXING_ON 0x10
-
-#define R5_CONTRAST_REDUCTION_OUTSIDE_BOXING_OFF 0x00
-#define R5_CONTRAST_REDUCTION_OUTSIDE_BOXING_ON 0x20
-
-#define R5_BACKGROUND_COLOR_INSIDE_BOXING_OFF 0x00
-#define R5_BACKGROUND_COLOR_INSIDE_BOXING_ON 0x40
-
-#define R5_BACKGROUND_COLOR_OUTSIDE_BOXING_OFF 0x00
-#define R5_BACKGROUND_COLOR_OUTSIDE_BOXING_ON 0x80
-
-/* Register R6 (Newsflash display) */
-#define R6_NEWSFLASH_PICTURE_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_PICTURE_INSIDE_BOXING_ON 0x01
-
-#define R6_NEWSFLASH_PICTURE_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_PICTURE_OUTSIDE_BOXING_ON 0x02
-
-#define R6_NEWSFLASH_TEXT_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_TEXT_INSIDE_BOXING_ON 0x04
-
-#define R6_NEWSFLASH_TEXT_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_TEXT_OUTSIDE_BOXING_ON 0x08
-
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_INSIDE_BOXING_ON 0x10
-
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_CONTRAST_REDUCTION_OUTSIDE_BOXING_ON 0x20
-
-#define R6_NEWSFLASH_BACKGROUND_COLOR_INSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_BACKGROUND_COLOR_INSIDE_BOXING_ON 0x40
-
-#define R6_NEWSFLASH_BACKGROUND_COLOR_OUTSIDE_BOXING_OFF 0x00
-#define R6_NEWSFLASH_BACKGROUND_COLOR_OUTSIDE_BOXING_ON 0x80
-
-/* Register R7 (Display mode) */
-#define R7_BOX_OFF_ROW_0 0x00
-#define R7_BOX_ON_ROW_0 0x01
-
-#define R7_BOX_OFF_ROW_1_TO_23 0x00
-#define R7_BOX_ON_ROW_1_TO_23 0x02
-
-#define R7_BOX_OFF_ROW_24 0x00
-#define R7_BOX_ON_ROW_24 0x04
-
-#define R7_SINGLE_HEIGHT 0x00
-#define R7_DOUBLE_HEIGHT 0x08
-
-#define R7_TOP_HALF 0x00
-#define R7_BOTTOM_HALF 0x10
-
-#define R7_REVEAL_OFF 0x00
-#define R7_REVEAL_ON 0x20
-
-#define R7_CURSER_OFF 0x00
-#define R7_CURSER_ON 0x40
-
-#define R7_STATUS_BOTTOM 0x00
-#define R7_STATUS_TOP 0x80
-
-/* Register R8 (Active chapter) */
-#define R8_ACTIVE_CHAPTER_0 0x00
-#define R8_ACTIVE_CHAPTER_1 0x01
-#define R8_ACTIVE_CHAPTER_2 0x02
-#define R8_ACTIVE_CHAPTER_3 0x03
-#define R8_ACTIVE_CHAPTER_4 0x04
-#define R8_ACTIVE_CHAPTER_5 0x05
-#define R8_ACTIVE_CHAPTER_6 0x06
-#define R8_ACTIVE_CHAPTER_7 0x07
-
-#define R8_CLEAR_MEMORY 0x08
-#define R8_DO_NOT_CLEAR_MEMORY 0x00
-
-/* Register R9 (Curser row) */
-#define R9_CURSER_ROW_0 0x00
-#define R9_CURSER_ROW_1 0x01
-#define R9_CURSER_ROW_2 0x02
-#define R9_CURSER_ROW_25 0x19
-
-/* Register R10 (Curser column) */
-#define R10_CURSER_COLUMN_0 0x00
-#define R10_CURSER_COLUMN_6 0x06
-#define R10_CURSER_COLUMN_8 0x08
-
-/*****************************************************************************/
-/* Row 25 control data in column 0 to 9 */
-/*****************************************************************************/
-#define ROW25_COLUMN0_PAGE_UNITS 0x0F
-
-#define ROW25_COLUMN1_PAGE_TENS 0x0F
-
-#define ROW25_COLUMN2_MINUTES_UNITS 0x0F
-
-#define ROW25_COLUMN3_MINUTES_TENS 0x07
-#define ROW25_COLUMN3_DELETE_PAGE 0x08
-
-#define ROW25_COLUMN4_HOUR_UNITS 0x0F
-
-#define ROW25_COLUMN5_HOUR_TENS 0x03
-#define ROW25_COLUMN5_INSERT_HEADLINE 0x04
-#define ROW25_COLUMN5_INSERT_SUBTITLE 0x08
-
-#define ROW25_COLUMN6_SUPPRESS_HEADER 0x01
-#define ROW25_COLUMN6_UPDATE_PAGE 0x02
-#define ROW25_COLUMN6_INTERRUPTED_SEQUENCE 0x04
-#define ROW25_COLUMN6_SUPPRESS_DISPLAY 0x08
-
-#define ROW25_COLUMN7_SERIAL_MODE 0x01
-#define ROW25_COLUMN7_CHARACTER_SET 0x0E
-
-#define ROW25_COLUMN8_PAGE_HUNDREDS 0x07
-#define ROW25_COLUMN8_PAGE_NOT_FOUND 0x10
-
-#define ROW25_COLUMN9_PAGE_BEING_LOOKED_FOR 0x20
-
-#define ROW25_COLUMN0_TO_7_HAMMING_ERROR 0x10
-
-/*****************************************************************************/
-/* Helper macros for extracting page, hour and minute digits */
-/*****************************************************************************/
-/* BYTE_POS 0 is at row 0, column 0,
- BYTE_POS 1 is at row 0, column 1,
- BYTE_POS 40 is at row 1, column 0, (with NUM_ROWS_PER_PAGE = 40)
- BYTE_POS 41 is at row 1, column 1, (with NUM_ROWS_PER_PAGE = 40),
- ... */
-#define ROW(BYTE_POS) (BYTE_POS / NUM_ROWS_PER_PAGE)
-#define COLUMN(BYTE_POS) (BYTE_POS % NUM_ROWS_PER_PAGE)
-
-/*****************************************************************************/
-/* Helper macros for extracting page, hour and minute digits */
-/*****************************************************************************/
-/* Macros for extracting hundreds, tens and units of a page number which
- must be in the range 0 ... 0x799.
- Note that page is coded in hexadecimal, i.e. 0x123 means page 123.
- page 0x.. means page 8.. */
-#define HUNDREDS_OF_PAGE(page) (((page) / 0x100) & 0x7)
-#define TENS_OF_PAGE(page) (((page) / 0x10) & 0xF)
-#define UNITS_OF_PAGE(page) ((page) & 0xF)
-
-/* Macros for extracting tens and units of a hour information which
- must be in the range 0 ... 0x24.
- Note that hour is coded in hexadecimal, i.e. 0x12 means 12 hours */
-#define TENS_OF_HOUR(hour) ((hour) / 0x10)
-#define UNITS_OF_HOUR(hour) ((hour) & 0xF)
-
-/* Macros for extracting tens and units of a minute information which
- must be in the range 0 ... 0x59.
- Note that minute is coded in hexadecimal, i.e. 0x12 means 12 minutes */
-#define TENS_OF_MINUTE(minute) ((minute) / 0x10)
-#define UNITS_OF_MINUTE(minute) ((minute) & 0xF)
-
-#define HOUR_MAX 0x23
-#define MINUTE_MAX 0x59
-#define PAGE_MAX 0x8FF
-
-
-struct saa5246a_device
-{
- struct v4l2_subdev sd;
- struct video_device *vdev;
- u8 pgbuf[NUM_DAUS][VTX_VIRTUALSIZE];
- int is_searching[NUM_DAUS];
- unsigned long in_use;
- struct mutex lock;
-};
-
-static inline struct saa5246a_device *to_dev(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct saa5246a_device, sd);
-}
-
-static struct video_device saa_template; /* Declared near bottom */
-
-/*
- * I2C interfaces
- */
-
-static int i2c_sendbuf(struct saa5246a_device *t, int reg, int count, u8 *data)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
- char buf[64];
-
- buf[0] = reg;
- memcpy(buf+1, data, count);
-
- if (i2c_master_send(client, buf, count + 1) == count + 1)
- return 0;
- return -1;
-}
-
-static int i2c_senddata(struct saa5246a_device *t, ...)
-{
- unsigned char buf[64];
- int v;
- int ct = 0;
- va_list argp;
- va_start(argp, t);
-
- while ((v = va_arg(argp, int)) != -1)
- buf[ct++] = v;
-
- va_end(argp);
- return i2c_sendbuf(t, buf[0], ct-1, buf+1);
-}
-
-/* Get count number of bytes from I²C-device at address adr, store them in buf.
- * Start & stop handshaking is done by this routine, ack will be sent after the
- * last byte to inhibit further sending of data. If uaccess is 'true', data is
- * written to user-space with put_user. Returns -1 if I²C-device didn't send
- * acknowledge, 0 otherwise
- */
-static int i2c_getdata(struct saa5246a_device *t, int count, u8 *buf)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
-
- if (i2c_master_recv(client, buf, count) != count)
- return -1;
- return 0;
-}
-
-/* When a page is found then the not FOUND bit in one of the status registers
- * of the SAA5264A chip is cleared. Unfortunately this bit is not set
- * automatically when a new page is requested. Instead this function must be
- * called after a page has been requested.
- *
- * Return value: 0 if successful
- */
-static int saa5246a_clear_found_bit(struct saa5246a_device *t,
- unsigned char dau_no)
-{
- unsigned char row_25_column_8;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
-
- dau_no |
- R8_DO_NOT_CLEAR_MEMORY,
-
- R9_CURSER_ROW_25,
-
- R10_CURSER_COLUMN_8,
-
- COMMAND_END) ||
- i2c_getdata(t, 1, &row_25_column_8))
- {
- return -EIO;
- }
- row_25_column_8 |= ROW25_COLUMN8_PAGE_NOT_FOUND;
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
-
- dau_no |
- R8_DO_NOT_CLEAR_MEMORY,
-
- R9_CURSER_ROW_25,
-
- R10_CURSER_COLUMN_8,
-
- row_25_column_8,
-
- COMMAND_END))
- {
- return -EIO;
- }
-
- return 0;
-}
-
-/* Requests one videotext page as described in req. The fields of req are
- * checked and an error is returned if something is invalid.
- *
- * Return value: 0 if successful
- */
-static int saa5246a_request_page(struct saa5246a_device *t,
- vtx_pagereq_t *req)
-{
- if (req->pagemask < 0 || req->pagemask >= PGMASK_MAX)
- return -EINVAL;
- if (req->pagemask & PGMASK_PAGE)
- if (req->page < 0 || req->page > PAGE_MAX)
- return -EINVAL;
- if (req->pagemask & PGMASK_HOUR)
- if (req->hour < 0 || req->hour > HOUR_MAX)
- return -EINVAL;
- if (req->pagemask & PGMASK_MINUTE)
- if (req->minute < 0 || req->minute > MINUTE_MAX)
- return -EINVAL;
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R2,
-
- R2_IN_R3_SELECT_PAGE_HUNDREDS |
- req->pgbuf << 4 |
- R2_BANK_0 |
- R2_HAMMING_CHECK_OFF,
-
- HUNDREDS_OF_PAGE(req->page) |
- R3_HOLD_PAGE |
- (req->pagemask & PG_HUND ?
- R3_PAGE_HUNDREDS_DO_CARE :
- R3_PAGE_HUNDREDS_DO_NOT_CARE),
-
- TENS_OF_PAGE(req->page) |
- (req->pagemask & PG_TEN ?
- R3_PAGE_TENS_DO_CARE :
- R3_PAGE_TENS_DO_NOT_CARE),
-
- UNITS_OF_PAGE(req->page) |
- (req->pagemask & PG_UNIT ?
- R3_PAGE_UNITS_DO_CARE :
- R3_PAGE_UNITS_DO_NOT_CARE),
-
- TENS_OF_HOUR(req->hour) |
- (req->pagemask & HR_TEN ?
- R3_HOURS_TENS_DO_CARE :
- R3_HOURS_TENS_DO_NOT_CARE),
-
- UNITS_OF_HOUR(req->hour) |
- (req->pagemask & HR_UNIT ?
- R3_HOURS_UNITS_DO_CARE :
- R3_HOURS_UNITS_DO_NOT_CARE),
-
- TENS_OF_MINUTE(req->minute) |
- (req->pagemask & MIN_TEN ?
- R3_MINUTES_TENS_DO_CARE :
- R3_MINUTES_TENS_DO_NOT_CARE),
-
- UNITS_OF_MINUTE(req->minute) |
- (req->pagemask & MIN_UNIT ?
- R3_MINUTES_UNITS_DO_CARE :
- R3_MINUTES_UNITS_DO_NOT_CARE),
-
- COMMAND_END) || i2c_senddata(t, SAA5246A_REGISTER_R2,
-
- R2_IN_R3_SELECT_PAGE_HUNDREDS |
- req->pgbuf << 4 |
- R2_BANK_0 |
- R2_HAMMING_CHECK_OFF,
-
- HUNDREDS_OF_PAGE(req->page) |
- R3_UPDATE_PAGE |
- (req->pagemask & PG_HUND ?
- R3_PAGE_HUNDREDS_DO_CARE :
- R3_PAGE_HUNDREDS_DO_NOT_CARE),
-
- COMMAND_END))
- {
- return -EIO;
- }
-
- t->is_searching[req->pgbuf] = true;
- return 0;
-}
-
-/* This routine decodes the page number from the infobits contained in line 25.
- *
- * Parameters:
- * infobits: must be bits 0 to 9 of column 25
- *
- * Return value: page number coded in hexadecimal, i. e. page 123 is coded 0x123
- */
-static inline int saa5246a_extract_pagenum_from_infobits(
- unsigned char infobits[10])
-{
- int page_hundreds, page_tens, page_units;
-
- page_units = infobits[0] & ROW25_COLUMN0_PAGE_UNITS;
- page_tens = infobits[1] & ROW25_COLUMN1_PAGE_TENS;
- page_hundreds = infobits[8] & ROW25_COLUMN8_PAGE_HUNDREDS;
-
- /* page 0x.. means page 8.. */
- if (page_hundreds == 0)
- page_hundreds = 8;
-
- return((page_hundreds << 8) | (page_tens << 4) | page_units);
-}
-
-/* Decodes the hour from the infobits contained in line 25.
- *
- * Parameters:
- * infobits: must be bits 0 to 9 of column 25
- *
- * Return: hour coded in hexadecimal, i. e. 12h is coded 0x12
- */
-static inline int saa5246a_extract_hour_from_infobits(
- unsigned char infobits[10])
-{
- int hour_tens, hour_units;
-
- hour_units = infobits[4] & ROW25_COLUMN4_HOUR_UNITS;
- hour_tens = infobits[5] & ROW25_COLUMN5_HOUR_TENS;
-
- return((hour_tens << 4) | hour_units);
-}
-
-/* Decodes the minutes from the infobits contained in line 25.
- *
- * Parameters:
- * infobits: must be bits 0 to 9 of column 25
- *
- * Return: minutes coded in hexadecimal, i. e. 10min is coded 0x10
- */
-static inline int saa5246a_extract_minutes_from_infobits(
- unsigned char infobits[10])
-{
- int minutes_tens, minutes_units;
-
- minutes_units = infobits[2] & ROW25_COLUMN2_MINUTES_UNITS;
- minutes_tens = infobits[3] & ROW25_COLUMN3_MINUTES_TENS;
-
- return((minutes_tens << 4) | minutes_units);
-}
-
-/* Reads the status bits contained in the first 10 columns of the first line
- * and extracts the information into info.
- *
- * Return value: 0 if successful
- */
-static inline int saa5246a_get_status(struct saa5246a_device *t,
- vtx_pageinfo_t *info, unsigned char dau_no)
-{
- unsigned char infobits[10];
- int column;
-
- if (dau_no >= NUM_DAUS)
- return -EINVAL;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
-
- dau_no |
- R8_DO_NOT_CLEAR_MEMORY,
-
- R9_CURSER_ROW_25,
-
- R10_CURSER_COLUMN_0,
-
- COMMAND_END) ||
- i2c_getdata(t, 10, infobits))
- {
- return -EIO;
- }
-
- info->pagenum = saa5246a_extract_pagenum_from_infobits(infobits);
- info->hour = saa5246a_extract_hour_from_infobits(infobits);
- info->minute = saa5246a_extract_minutes_from_infobits(infobits);
- info->charset = ((infobits[7] & ROW25_COLUMN7_CHARACTER_SET) >> 1);
- info->delete = !!(infobits[3] & ROW25_COLUMN3_DELETE_PAGE);
- info->headline = !!(infobits[5] & ROW25_COLUMN5_INSERT_HEADLINE);
- info->subtitle = !!(infobits[5] & ROW25_COLUMN5_INSERT_SUBTITLE);
- info->supp_header = !!(infobits[6] & ROW25_COLUMN6_SUPPRESS_HEADER);
- info->update = !!(infobits[6] & ROW25_COLUMN6_UPDATE_PAGE);
- info->inter_seq = !!(infobits[6] & ROW25_COLUMN6_INTERRUPTED_SEQUENCE);
- info->dis_disp = !!(infobits[6] & ROW25_COLUMN6_SUPPRESS_DISPLAY);
- info->serial = !!(infobits[7] & ROW25_COLUMN7_SERIAL_MODE);
- info->notfound = !!(infobits[8] & ROW25_COLUMN8_PAGE_NOT_FOUND);
- info->pblf = !!(infobits[9] & ROW25_COLUMN9_PAGE_BEING_LOOKED_FOR);
- info->hamming = 0;
- for (column = 0; column <= 7; column++) {
- if (infobits[column] & ROW25_COLUMN0_TO_7_HAMMING_ERROR) {
- info->hamming = 1;
- break;
- }
- }
- if (!info->hamming && !info->notfound)
- t->is_searching[dau_no] = false;
- return 0;
-}
-
-/* Reads 1 videotext page buffer of the SAA5246A.
- *
- * req is used both as input and as output. It contains information which part
- * must be read. The videotext page is copied into req->buffer.
- *
- * Return value: 0 if successful
- */
-static inline int saa5246a_get_page(struct saa5246a_device *t,
- vtx_pagereq_t *req)
-{
- int start, end, size;
- char *buf;
- int err;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS ||
- req->start < 0 || req->start > req->end || req->end >= VTX_PAGESIZE)
- return -EINVAL;
-
- buf = kmalloc(VTX_PAGESIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Read "normal" part of page */
- err = -EIO;
-
- end = min(req->end, VTX_PAGESIZE - 1);
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
- req->pgbuf | R8_DO_NOT_CLEAR_MEMORY,
- ROW(req->start), COLUMN(req->start), COMMAND_END))
- goto out;
- if (i2c_getdata(t, end - req->start + 1, buf))
- goto out;
- err = -EFAULT;
- if (copy_to_user(req->buffer, buf, end - req->start + 1))
- goto out;
-
- /* Always get the time from buffer 4, since this stupid SAA5246A only
- * updates the currently displayed buffer...
- */
- if (REQ_CONTAINS_TIME(req)) {
- start = max(req->start, POS_TIME_START);
- end = min(req->end, POS_TIME_END);
- size = end - start + 1;
- err = -EINVAL;
- if (size < 0)
- goto out;
- err = -EIO;
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
- R8_ACTIVE_CHAPTER_4 | R8_DO_NOT_CLEAR_MEMORY,
- R9_CURSER_ROW_0, start, COMMAND_END))
- goto out;
- if (i2c_getdata(t, size, buf))
- goto out;
- err = -EFAULT;
- if (copy_to_user(req->buffer + start - req->start, buf, size))
- goto out;
- }
- /* Insert the header from buffer 4 only, if acquisition circuit is still searching for a page */
- if (REQ_CONTAINS_HEADER(req) && t->is_searching[req->pgbuf]) {
- start = max(req->start, POS_HEADER_START);
- end = min(req->end, POS_HEADER_END);
- size = end - start + 1;
- err = -EINVAL;
- if (size < 0)
- goto out;
- err = -EIO;
- if (i2c_senddata(t, SAA5246A_REGISTER_R8,
- R8_ACTIVE_CHAPTER_4 | R8_DO_NOT_CLEAR_MEMORY,
- R9_CURSER_ROW_0, start, COMMAND_END))
- goto out;
- if (i2c_getdata(t, end - start + 1, buf))
- goto out;
- err = -EFAULT;
- if (copy_to_user(req->buffer + start - req->start, buf, size))
- goto out;
- }
- err = 0;
-out:
- kfree(buf);
- return err;
-}
-
-/* Stops the acquisition circuit given in dau_no. The page buffer associated
- * with this acquisition circuit will no more be updated. The other daus are
- * not affected.
- *
- * Return value: 0 if successful
- */
-static inline int saa5246a_stop_dau(struct saa5246a_device *t,
- unsigned char dau_no)
-{
- if (dau_no >= NUM_DAUS)
- return -EINVAL;
- if (i2c_senddata(t, SAA5246A_REGISTER_R2,
-
- R2_IN_R3_SELECT_PAGE_HUNDREDS |
- dau_no << 4 |
- R2_BANK_0 |
- R2_HAMMING_CHECK_OFF,
-
- R3_PAGE_HUNDREDS_0 |
- R3_HOLD_PAGE |
- R3_PAGE_HUNDREDS_DO_NOT_CARE,
-
- COMMAND_END))
- {
- return -EIO;
- }
- t->is_searching[dau_no] = false;
- return 0;
-}
-
-/* Handles ioctls defined in videotext.h
- *
- * Returns 0 if successful
- */
-static long do_saa5246a_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
- struct saa5246a_device *t = video_drvdata(file);
-
- switch(cmd)
- {
- case VTXIOCGETINFO:
- {
- vtx_info_t *info = arg;
-
- info->version_major = MAJOR_VERSION;
- info->version_minor = MINOR_VERSION;
- info->numpages = NUM_DAUS;
- return 0;
- }
-
- case VTXIOCCLRPAGE:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- memset(t->pgbuf[req->pgbuf], ' ', sizeof(t->pgbuf[0]));
- return 0;
- }
-
- case VTXIOCCLRFOUND:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- return(saa5246a_clear_found_bit(t, req->pgbuf));
- }
-
- case VTXIOCPAGEREQ:
- {
- vtx_pagereq_t *req = arg;
-
- return(saa5246a_request_page(t, req));
- }
-
- case VTXIOCGETSTAT:
- {
- vtx_pagereq_t *req = arg;
- vtx_pageinfo_t info;
- int rval;
-
- if ((rval = saa5246a_get_status(t, &info, req->pgbuf)))
- return rval;
- if(copy_to_user(req->buffer, &info,
- sizeof(vtx_pageinfo_t)))
- return -EFAULT;
- return 0;
- }
-
- case VTXIOCGETPAGE:
- {
- vtx_pagereq_t *req = arg;
-
- return(saa5246a_get_page(t, req));
- }
-
- case VTXIOCSTOPDAU:
- {
- vtx_pagereq_t *req = arg;
-
- return(saa5246a_stop_dau(t, req->pgbuf));
- }
-
- case VTXIOCPUTPAGE:
- case VTXIOCSETDISP:
- case VTXIOCPUTSTAT:
- return 0;
-
- case VTXIOCCLRCACHE:
- {
- return 0;
- }
-
- case VTXIOCSETVIRT:
- {
- /* I do not know what "virtual mode" means */
- return 0;
- }
- }
- return -EINVAL;
-}
-
-/*
- * Translates old vtx IOCTLs to new ones
- *
- * This keeps new kernel versions compatible with old userspace programs.
- */
-static inline unsigned int vtx_fix_command(unsigned int cmd)
-{
- switch (cmd) {
- case VTXIOCGETINFO_OLD:
- cmd = VTXIOCGETINFO;
- break;
- case VTXIOCCLRPAGE_OLD:
- cmd = VTXIOCCLRPAGE;
- break;
- case VTXIOCCLRFOUND_OLD:
- cmd = VTXIOCCLRFOUND;
- break;
- case VTXIOCPAGEREQ_OLD:
- cmd = VTXIOCPAGEREQ;
- break;
- case VTXIOCGETSTAT_OLD:
- cmd = VTXIOCGETSTAT;
- break;
- case VTXIOCGETPAGE_OLD:
- cmd = VTXIOCGETPAGE;
- break;
- case VTXIOCSTOPDAU_OLD:
- cmd = VTXIOCSTOPDAU;
- break;
- case VTXIOCPUTPAGE_OLD:
- cmd = VTXIOCPUTPAGE;
- break;
- case VTXIOCSETDISP_OLD:
- cmd = VTXIOCSETDISP;
- break;
- case VTXIOCPUTSTAT_OLD:
- cmd = VTXIOCPUTSTAT;
- break;
- case VTXIOCCLRCACHE_OLD:
- cmd = VTXIOCCLRCACHE;
- break;
- case VTXIOCSETVIRT_OLD:
- cmd = VTXIOCSETVIRT;
- break;
- }
- return cmd;
-}
-
-/*
- * Handle the locking
- */
-static long saa5246a_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct saa5246a_device *t = video_drvdata(file);
- long err;
-
- cmd = vtx_fix_command(cmd);
- mutex_lock(&t->lock);
- err = video_usercopy(file, cmd, arg, do_saa5246a_ioctl);
- mutex_unlock(&t->lock);
- return err;
-}
-
-static int saa5246a_open(struct file *file)
-{
- struct saa5246a_device *t = video_drvdata(file);
-
- if (test_and_set_bit(0, &t->in_use))
- return -EBUSY;
-
- if (i2c_senddata(t, SAA5246A_REGISTER_R0,
- R0_SELECT_R11 |
- R0_PLL_TIME_CONSTANT_LONG |
- R0_ENABLE_nODD_EVEN_OUTPUT |
- R0_ENABLE_HDR_POLL |
- R0_DO_NOT_FORCE_nODD_EVEN_LOW_IF_PICTURE_DISPLAYED |
- R0_NO_FREE_RUN_PLL |
- R0_NO_AUTOMATIC_FASTEXT_PROMPT,
-
- R1_NON_INTERLACED_312_312_LINES |
- R1_DEW |
- R1_EXTENDED_PACKET_DISABLE |
- R1_DAUS_ALL_ON |
- R1_8_BITS_NO_PARITY |
- R1_VCS_TO_SCS,
-
- COMMAND_END) ||
- i2c_senddata(t, SAA5246A_REGISTER_R4,
-
- /* We do not care much for the TV display but nevertheless we
- * need the currently displayed page later because only on that
- * page the time is updated. */
- R4_DISPLAY_PAGE_4,
-
- COMMAND_END))
- {
- clear_bit(0, &t->in_use);
- return -EIO;
- }
- return 0;
-}
-
-static int saa5246a_release(struct file *file)
-{
- struct saa5246a_device *t = video_drvdata(file);
-
- /* Stop all acquisition circuits. */
- i2c_senddata(t, SAA5246A_REGISTER_R1,
-
- R1_INTERLACED_312_AND_HALF_312_AND_HALF_LINES |
- R1_DEW |
- R1_EXTENDED_PACKET_DISABLE |
- R1_DAUS_ALL_OFF |
- R1_8_BITS_NO_PARITY |
- R1_VCS_TO_SCS,
-
- COMMAND_END);
- clear_bit(0, &t->in_use);
- return 0;
-}
-
-static const struct v4l2_file_operations saa_fops = {
- .owner = THIS_MODULE,
- .open = saa5246a_open,
- .release = saa5246a_release,
- .ioctl = saa5246a_ioctl,
-};
-
-static struct video_device saa_template =
-{
- .name = "saa5246a",
- .fops = &saa_fops,
- .release = video_device_release,
-};
-
-static int saa5246a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA5246A, 0);
-}
-
-static const struct v4l2_subdev_core_ops saa5246a_core_ops = {
- .g_chip_ident = saa5246a_g_chip_ident,
-};
-
-static const struct v4l2_subdev_ops saa5246a_ops = {
- .core = &saa5246a_core_ops,
-};
-
-
-static int saa5246a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int pgbuf;
- int err;
- struct saa5246a_device *t;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%x (%s)\n",
- client->addr << 1, client->adapter->name);
- v4l_info(client, "VideoText version %d.%d\n",
- MAJOR_VERSION, MINOR_VERSION);
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL)
- return -ENOMEM;
- sd = &t->sd;
- v4l2_i2c_subdev_init(sd, client, &saa5246a_ops);
- mutex_init(&t->lock);
-
- /* Now create a video4linux device */
- t->vdev = video_device_alloc();
- if (t->vdev == NULL) {
- kfree(t);
- return -ENOMEM;
- }
- memcpy(t->vdev, &saa_template, sizeof(*t->vdev));
-
- for (pgbuf = 0; pgbuf < NUM_DAUS; pgbuf++) {
- memset(t->pgbuf[pgbuf], ' ', sizeof(t->pgbuf[0]));
- t->is_searching[pgbuf] = false;
- }
- video_set_drvdata(t->vdev, t);
-
- /* Register it */
- err = video_register_device(t->vdev, VFL_TYPE_VTX, -1);
- if (err < 0) {
- video_device_release(t->vdev);
- kfree(t);
- return err;
- }
- return 0;
-}
-
-static int saa5246a_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct saa5246a_device *t = to_dev(sd);
-
- video_unregister_device(t->vdev);
- v4l2_device_unregister_subdev(sd);
- kfree(t);
- return 0;
-}
-
-static const struct i2c_device_id saa5246a_id[] = {
- { "saa5246a", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, saa5246a_id);
-
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa5246a",
- .probe = saa5246a_probe,
- .remove = saa5246a_remove,
- .id_table = saa5246a_id,
-};
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
deleted file mode 100644
index 31ff27df4cbf..000000000000
--- a/drivers/media/video/saa5249.c
+++ /dev/null
@@ -1,650 +0,0 @@
-/*
- * Modified in order to keep it compatible both with new and old videotext IOCTLs by
- * Michael Geng <linux@MichaelGeng.de>
- *
- * Cleaned up to use existing videodev interface and allow the idea
- * of multiple teletext decoders on the video4linux iface. Changed i2c
- * to cover addressing clashes on device busses. It's also rebuilt so
- * you can add arbitary multiple teletext devices to Linux video4linux
- * now (well 32 anyway).
- *
- * Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * The original driver was heavily modified to match the i2c interface
- * It was truncated to use the WinTV boards, too.
- *
- * Copyright (c) 1998 Richard Guenther <richard.guenther@student.uni-tuebingen.de>
- *
- * Derived From
- *
- * vtx.c:
- * This is a loadable character-device-driver for videotext-interfaces
- * (aka teletext). Please check the Makefile/README for a list of supported
- * interfaces.
- *
- * Copyright (c) 1994-97 Martin Buck <martin-2.buck@student.uni-ulm.de>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/videotext.h>
-#include <linux/videodev2.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
-
-MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>");
-MODULE_DESCRIPTION("Philips SAA5249 Teletext decoder driver");
-MODULE_LICENSE("GPL");
-
-
-#define VTX_VER_MAJ 1
-#define VTX_VER_MIN 8
-
-
-#define NUM_DAUS 4
-#define NUM_BUFS 8
-
-static const int disp_modes[8][3] =
-{
- { 0x46, 0x03, 0x03 }, /* DISPOFF */
- { 0x46, 0xcc, 0xcc }, /* DISPNORM */
- { 0x44, 0x0f, 0x0f }, /* DISPTRANS */
- { 0x46, 0xcc, 0x46 }, /* DISPINS */
- { 0x44, 0x03, 0x03 }, /* DISPOFF, interlaced */
- { 0x44, 0xcc, 0xcc }, /* DISPNORM, interlaced */
- { 0x44, 0x0f, 0x0f }, /* DISPTRANS, interlaced */
- { 0x44, 0xcc, 0x46 } /* DISPINS, interlaced */
-};
-
-
-
-#define PAGE_WAIT msecs_to_jiffies(300) /* Time between requesting page and */
- /* checking status bits */
-#define PGBUF_EXPIRE msecs_to_jiffies(15000) /* Time to wait before retransmitting */
- /* page regardless of infobits */
-typedef struct {
- u8 pgbuf[VTX_VIRTUALSIZE]; /* Page-buffer */
- u8 laststat[10]; /* Last value of infobits for DAU */
- u8 sregs[7]; /* Page-request registers */
- unsigned long expire; /* Time when page will be expired */
- unsigned clrfound : 1; /* VTXIOCCLRFOUND has been called */
- unsigned stopped : 1; /* VTXIOCSTOPDAU has been called */
-} vdau_t;
-
-struct saa5249_device
-{
- struct v4l2_subdev sd;
- struct video_device *vdev;
- vdau_t vdau[NUM_DAUS]; /* Data for virtual DAUs (the 5249 only has one */
- /* real DAU, so we have to simulate some more) */
- int vtx_use_count;
- int is_searching[NUM_DAUS];
- int disp_mode;
- int virtual_mode;
- unsigned long in_use;
- struct mutex lock;
-};
-
-static inline struct saa5249_device *to_dev(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct saa5249_device, sd);
-}
-
-
-#define CCTWR 34 /* I˛C write/read-address of vtx-chip */
-#define CCTRD 35
-#define NOACK_REPEAT 10 /* Retry access this many times on failure */
-#define CLEAR_DELAY msecs_to_jiffies(50) /* Time required to clear a page */
-#define READY_TIMEOUT msecs_to_jiffies(30) /* Time to wait for ready signal of I2C-bus interface */
-#define INIT_DELAY 500 /* Time in usec to wait at initialization of CEA interface */
-#define START_DELAY 10 /* Time in usec to wait before starting write-cycle (CEA) */
-
-#define VTX_DEV_MINOR 0
-
-static struct video_device saa_template; /* Declared near bottom */
-
-/*
- * Wait the given number of jiffies (10ms). This calls the scheduler, so the actual
- * delay may be longer.
- */
-
-static void jdelay(unsigned long delay)
-{
- sigset_t oldblocked = current->blocked;
-
- spin_lock_irq(&current->sighand->siglock);
- sigfillset(&current->blocked);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- msleep_interruptible(jiffies_to_msecs(delay));
-
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = oldblocked;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-}
-
-
-/*
- * I2C interfaces
- */
-
-static int i2c_sendbuf(struct saa5249_device *t, int reg, int count, u8 *data)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
- char buf[64];
-
- buf[0] = reg;
- memcpy(buf+1, data, count);
-
- if (i2c_master_send(client, buf, count + 1) == count + 1)
- return 0;
- return -1;
-}
-
-static int i2c_senddata(struct saa5249_device *t, ...)
-{
- unsigned char buf[64];
- int v;
- int ct = 0;
- va_list argp;
- va_start(argp,t);
-
- while ((v = va_arg(argp, int)) != -1)
- buf[ct++] = v;
-
- va_end(argp);
- return i2c_sendbuf(t, buf[0], ct-1, buf+1);
-}
-
-/* Get count number of bytes from I²C-device at address adr, store them in buf. Start & stop
- * handshaking is done by this routine, ack will be sent after the last byte to inhibit further
- * sending of data. If uaccess is 'true', data is written to user-space with put_user.
- * Returns -1 if I²C-device didn't send acknowledge, 0 otherwise
- */
-
-static int i2c_getdata(struct saa5249_device *t, int count, u8 *buf)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
-
- if (i2c_master_recv(client, buf, count) != count)
- return -1;
- return 0;
-}
-
-
-/*
- * Standard character-device-driver functions
- */
-
-static long do_saa5249_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
- static int virtual_mode = false;
- struct saa5249_device *t = video_drvdata(file);
-
- switch (cmd) {
- case VTXIOCGETINFO:
- {
- vtx_info_t *info = arg;
- info->version_major = VTX_VER_MAJ;
- info->version_minor = VTX_VER_MIN;
- info->numpages = NUM_DAUS;
- /*info->cct_type = CCT_TYPE;*/
- return 0;
- }
-
- case VTXIOCCLRPAGE:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- memset(t->vdau[req->pgbuf].pgbuf, ' ', sizeof(t->vdau[0].pgbuf));
- t->vdau[req->pgbuf].clrfound = true;
- return 0;
- }
-
- case VTXIOCCLRFOUND:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- t->vdau[req->pgbuf].clrfound = true;
- return 0;
- }
-
- case VTXIOCPAGEREQ:
- {
- vtx_pagereq_t *req = arg;
- if (!(req->pagemask & PGMASK_PAGE))
- req->page = 0;
- if (!(req->pagemask & PGMASK_HOUR))
- req->hour = 0;
- if (!(req->pagemask & PGMASK_MINUTE))
- req->minute = 0;
- if (req->page < 0 || req->page > 0x8ff) /* 7FF ?? */
- return -EINVAL;
- req->page &= 0x7ff;
- if (req->hour < 0 || req->hour > 0x3f || req->minute < 0 || req->minute > 0x7f ||
- req->pagemask < 0 || req->pagemask >= PGMASK_MAX || req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- t->vdau[req->pgbuf].sregs[0] = (req->pagemask & PG_HUND ? 0x10 : 0) | (req->page / 0x100);
- t->vdau[req->pgbuf].sregs[1] = (req->pagemask & PG_TEN ? 0x10 : 0) | ((req->page / 0x10) & 0xf);
- t->vdau[req->pgbuf].sregs[2] = (req->pagemask & PG_UNIT ? 0x10 : 0) | (req->page & 0xf);
- t->vdau[req->pgbuf].sregs[3] = (req->pagemask & HR_TEN ? 0x10 : 0) | (req->hour / 0x10);
- t->vdau[req->pgbuf].sregs[4] = (req->pagemask & HR_UNIT ? 0x10 : 0) | (req->hour & 0xf);
- t->vdau[req->pgbuf].sregs[5] = (req->pagemask & MIN_TEN ? 0x10 : 0) | (req->minute / 0x10);
- t->vdau[req->pgbuf].sregs[6] = (req->pagemask & MIN_UNIT ? 0x10 : 0) | (req->minute & 0xf);
- t->vdau[req->pgbuf].stopped = false;
- t->vdau[req->pgbuf].clrfound = true;
- t->is_searching[req->pgbuf] = true;
- return 0;
- }
-
- case VTXIOCGETSTAT:
- {
- vtx_pagereq_t *req = arg;
- u8 infobits[10];
- vtx_pageinfo_t info;
- int a;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- if (!t->vdau[req->pgbuf].stopped) {
- if (i2c_senddata(t, 2, 0, -1) ||
- i2c_sendbuf(t, 3, sizeof(t->vdau[0].sregs), t->vdau[req->pgbuf].sregs) ||
- i2c_senddata(t, 8, 0, 25, 0, ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', -1) ||
- i2c_senddata(t, 2, 0, t->vdau[req->pgbuf].sregs[0] | 8, -1) ||
- i2c_senddata(t, 8, 0, 25, 0, -1))
- return -EIO;
- jdelay(PAGE_WAIT);
- if (i2c_getdata(t, 10, infobits))
- return -EIO;
-
- if (!(infobits[8] & 0x10) && !(infobits[7] & 0xf0) && /* check FOUND-bit */
- (memcmp(infobits, t->vdau[req->pgbuf].laststat, sizeof(infobits)) ||
- time_after_eq(jiffies, t->vdau[req->pgbuf].expire)))
- { /* check if new page arrived */
- if (i2c_senddata(t, 8, 0, 0, 0, -1) ||
- i2c_getdata(t, VTX_PAGESIZE, t->vdau[req->pgbuf].pgbuf))
- return -EIO;
- t->vdau[req->pgbuf].expire = jiffies + PGBUF_EXPIRE;
- memset(t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE, ' ', VTX_VIRTUALSIZE - VTX_PAGESIZE);
- if (t->virtual_mode) {
- /* Packet X/24 */
- if (i2c_senddata(t, 8, 0, 0x20, 0, -1) ||
- i2c_getdata(t, 40, t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE + 20 * 40))
- return -EIO;
- /* Packet X/27/0 */
- if (i2c_senddata(t, 8, 0, 0x21, 0, -1) ||
- i2c_getdata(t, 40, t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE + 16 * 40))
- return -EIO;
- /* Packet 8/30/0...8/30/15
- * FIXME: AFAIK, the 5249 does hamming-decoding for some bytes in packet 8/30,
- * so we should undo this here.
- */
- if (i2c_senddata(t, 8, 0, 0x22, 0, -1) ||
- i2c_getdata(t, 40, t->vdau[req->pgbuf].pgbuf + VTX_PAGESIZE + 23 * 40))
- return -EIO;
- }
- t->vdau[req->pgbuf].clrfound = false;
- memcpy(t->vdau[req->pgbuf].laststat, infobits, sizeof(infobits));
- } else {
- memcpy(infobits, t->vdau[req->pgbuf].laststat, sizeof(infobits));
- }
- } else {
- memcpy(infobits, t->vdau[req->pgbuf].laststat, sizeof(infobits));
- }
-
- info.pagenum = ((infobits[8] << 8) & 0x700) | ((infobits[1] << 4) & 0xf0) | (infobits[0] & 0x0f);
- if (info.pagenum < 0x100)
- info.pagenum += 0x800;
- info.hour = ((infobits[5] << 4) & 0x30) | (infobits[4] & 0x0f);
- info.minute = ((infobits[3] << 4) & 0x70) | (infobits[2] & 0x0f);
- info.charset = ((infobits[7] >> 1) & 7);
- info.delete = !!(infobits[3] & 8);
- info.headline = !!(infobits[5] & 4);
- info.subtitle = !!(infobits[5] & 8);
- info.supp_header = !!(infobits[6] & 1);
- info.update = !!(infobits[6] & 2);
- info.inter_seq = !!(infobits[6] & 4);
- info.dis_disp = !!(infobits[6] & 8);
- info.serial = !!(infobits[7] & 1);
- info.notfound = !!(infobits[8] & 0x10);
- info.pblf = !!(infobits[9] & 0x20);
- info.hamming = 0;
- for (a = 0; a <= 7; a++) {
- if (infobits[a] & 0xf0) {
- info.hamming = 1;
- break;
- }
- }
- if (t->vdau[req->pgbuf].clrfound)
- info.notfound = 1;
- if (copy_to_user(req->buffer, &info, sizeof(vtx_pageinfo_t)))
- return -EFAULT;
- if (!info.hamming && !info.notfound)
- t->is_searching[req->pgbuf] = false;
- return 0;
- }
-
- case VTXIOCGETPAGE:
- {
- vtx_pagereq_t *req = arg;
- int start, end;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS || req->start < 0 ||
- req->start > req->end || req->end >= (virtual_mode ? VTX_VIRTUALSIZE : VTX_PAGESIZE))
- return -EINVAL;
- if (copy_to_user(req->buffer, &t->vdau[req->pgbuf].pgbuf[req->start], req->end - req->start + 1))
- return -EFAULT;
-
- /*
- * Always read the time directly from SAA5249
- */
-
- if (req->start <= 39 && req->end >= 32) {
- int len;
- char buf[16];
- start = max(req->start, 32);
- end = min(req->end, 39);
- len = end - start + 1;
- if (i2c_senddata(t, 8, 0, 0, start, -1) ||
- i2c_getdata(t, len, buf))
- return -EIO;
- if (copy_to_user(req->buffer + start - req->start, buf, len))
- return -EFAULT;
- }
- /* Insert the current header if DAU is still searching for a page */
- if (req->start <= 31 && req->end >= 7 && t->is_searching[req->pgbuf]) {
- char buf[32];
- int len;
-
- start = max(req->start, 7);
- end = min(req->end, 31);
- len = end - start + 1;
- if (i2c_senddata(t, 8, 0, 0, start, -1) ||
- i2c_getdata(t, len, buf))
- return -EIO;
- if (copy_to_user(req->buffer + start - req->start, buf, len))
- return -EFAULT;
- }
- return 0;
- }
-
- case VTXIOCSTOPDAU:
- {
- vtx_pagereq_t *req = arg;
-
- if (req->pgbuf < 0 || req->pgbuf >= NUM_DAUS)
- return -EINVAL;
- t->vdau[req->pgbuf].stopped = true;
- t->is_searching[req->pgbuf] = false;
- return 0;
- }
-
- case VTXIOCPUTPAGE:
- case VTXIOCSETDISP:
- case VTXIOCPUTSTAT:
- return 0;
-
- case VTXIOCCLRCACHE:
- {
- if (i2c_senddata(t, 0, NUM_DAUS, 0, 8, -1) || i2c_senddata(t, 11,
- ' ', ' ', ' ', ' ', ' ', ' ',
- ' ', ' ', ' ', ' ', ' ', ' ',
- ' ', ' ', ' ', ' ', ' ', ' ',
- ' ', ' ', ' ', ' ', ' ', ' ',
- -1))
- return -EIO;
- if (i2c_senddata(t, 3, 0x20, -1))
- return -EIO;
- jdelay(10 * CLEAR_DELAY); /* I have no idea how long we have to wait here */
- return 0;
- }
-
- case VTXIOCSETVIRT:
- {
- /* The SAA5249 has virtual-row reception turned on always */
- t->virtual_mode = (int)(long)arg;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-/*
- * Translates old vtx IOCTLs to new ones
- *
- * This keeps new kernel versions compatible with old userspace programs.
- */
-static inline unsigned int vtx_fix_command(unsigned int cmd)
-{
- switch (cmd) {
- case VTXIOCGETINFO_OLD:
- cmd = VTXIOCGETINFO;
- break;
- case VTXIOCCLRPAGE_OLD:
- cmd = VTXIOCCLRPAGE;
- break;
- case VTXIOCCLRFOUND_OLD:
- cmd = VTXIOCCLRFOUND;
- break;
- case VTXIOCPAGEREQ_OLD:
- cmd = VTXIOCPAGEREQ;
- break;
- case VTXIOCGETSTAT_OLD:
- cmd = VTXIOCGETSTAT;
- break;
- case VTXIOCGETPAGE_OLD:
- cmd = VTXIOCGETPAGE;
- break;
- case VTXIOCSTOPDAU_OLD:
- cmd = VTXIOCSTOPDAU;
- break;
- case VTXIOCPUTPAGE_OLD:
- cmd = VTXIOCPUTPAGE;
- break;
- case VTXIOCSETDISP_OLD:
- cmd = VTXIOCSETDISP;
- break;
- case VTXIOCPUTSTAT_OLD:
- cmd = VTXIOCPUTSTAT;
- break;
- case VTXIOCCLRCACHE_OLD:
- cmd = VTXIOCCLRCACHE;
- break;
- case VTXIOCSETVIRT_OLD:
- cmd = VTXIOCSETVIRT;
- break;
- }
- return cmd;
-}
-
-/*
- * Handle the locking
- */
-
-static long saa5249_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct saa5249_device *t = video_drvdata(file);
- long err;
-
- cmd = vtx_fix_command(cmd);
- mutex_lock(&t->lock);
- err = video_usercopy(file, cmd, arg, do_saa5249_ioctl);
- mutex_unlock(&t->lock);
- return err;
-}
-
-static int saa5249_open(struct file *file)
-{
- struct saa5249_device *t = video_drvdata(file);
- int pgbuf;
-
- if (test_and_set_bit(0, &t->in_use))
- return -EBUSY;
-
- if (i2c_senddata(t, 0, 0, -1) || /* Select R11 */
- /* Turn off parity checks (we do this ourselves) */
- i2c_senddata(t, 1, disp_modes[t->disp_mode][0], 0, -1) ||
- /* Display TV-picture, no virtual rows */
- i2c_senddata(t, 4, NUM_DAUS, disp_modes[t->disp_mode][1], disp_modes[t->disp_mode][2], 7, -1))
- /* Set display to page 4 */
- {
- clear_bit(0, &t->in_use);
- return -EIO;
- }
-
- for (pgbuf = 0; pgbuf < NUM_DAUS; pgbuf++) {
- memset(t->vdau[pgbuf].pgbuf, ' ', sizeof(t->vdau[0].pgbuf));
- memset(t->vdau[pgbuf].sregs, 0, sizeof(t->vdau[0].sregs));
- memset(t->vdau[pgbuf].laststat, 0, sizeof(t->vdau[0].laststat));
- t->vdau[pgbuf].expire = 0;
- t->vdau[pgbuf].clrfound = true;
- t->vdau[pgbuf].stopped = true;
- t->is_searching[pgbuf] = false;
- }
- t->virtual_mode = false;
- return 0;
-}
-
-
-
-static int saa5249_release(struct file *file)
-{
- struct saa5249_device *t = video_drvdata(file);
-
- i2c_senddata(t, 1, 0x20, -1); /* Turn off CCT */
- i2c_senddata(t, 5, 3, 3, -1); /* Turn off TV-display */
- clear_bit(0, &t->in_use);
- return 0;
-}
-
-static const struct v4l2_file_operations saa_fops = {
- .owner = THIS_MODULE,
- .open = saa5249_open,
- .release = saa5249_release,
- .ioctl = saa5249_ioctl,
-};
-
-static struct video_device saa_template =
-{
- .name = "saa5249",
- .fops = &saa_fops,
- .release = video_device_release,
-};
-
-static int saa5249_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA5249, 0);
-}
-
-static const struct v4l2_subdev_core_ops saa5249_core_ops = {
- .g_chip_ident = saa5249_g_chip_ident,
-};
-
-static const struct v4l2_subdev_ops saa5249_ops = {
- .core = &saa5249_core_ops,
-};
-
-static int saa5249_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int pgbuf;
- int err;
- struct saa5249_device *t;
- struct v4l2_subdev *sd;
-
- v4l_info(client, "chip found @ 0x%x (%s)\n",
- client->addr << 1, client->adapter->name);
- v4l_info(client, "VideoText version %d.%d\n",
- VTX_VER_MAJ, VTX_VER_MIN);
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL)
- return -ENOMEM;
- sd = &t->sd;
- v4l2_i2c_subdev_init(sd, client, &saa5249_ops);
- mutex_init(&t->lock);
-
- /* Now create a video4linux device */
- t->vdev = video_device_alloc();
- if (t->vdev == NULL) {
- kfree(t);
- kfree(client);
- return -ENOMEM;
- }
- memcpy(t->vdev, &saa_template, sizeof(*t->vdev));
-
- for (pgbuf = 0; pgbuf < NUM_DAUS; pgbuf++) {
- memset(t->vdau[pgbuf].pgbuf, ' ', sizeof(t->vdau[0].pgbuf));
- memset(t->vdau[pgbuf].sregs, 0, sizeof(t->vdau[0].sregs));
- memset(t->vdau[pgbuf].laststat, 0, sizeof(t->vdau[0].laststat));
- t->vdau[pgbuf].expire = 0;
- t->vdau[pgbuf].clrfound = true;
- t->vdau[pgbuf].stopped = true;
- t->is_searching[pgbuf] = false;
- }
- video_set_drvdata(t->vdev, t);
-
- /* Register it */
- err = video_register_device(t->vdev, VFL_TYPE_VTX, -1);
- if (err < 0) {
- video_device_release(t->vdev);
- kfree(t);
- return err;
- }
- return 0;
-}
-
-static int saa5249_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct saa5249_device *t = to_dev(sd);
-
- video_unregister_device(t->vdev);
- v4l2_device_unregister_subdev(sd);
- kfree(t);
- return 0;
-}
-
-static const struct i2c_device_id saa5249_id[] = {
- { "saa5249", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, saa5249_id);
-
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa5249",
- .probe = saa5249_probe,
- .remove = saa5249_remove,
- .id_table = saa5249_id,
-};
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index c3e96f070973..984c0feb2a4e 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -34,7 +34,6 @@
#include <media/rds.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
/* insmod options */
@@ -430,7 +429,7 @@ static int saa6588_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct saa6588 *s = to_saa6588(sd);
- vt->capability |= V4L2_TUNER_CAP_RDS;
+ vt->capability |= V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
if (s->sync)
vt->rxsubchans |= V4L2_TUNER_SUB_RDS;
return 0;
@@ -530,9 +529,25 @@ static const struct i2c_device_id saa6588_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa6588_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa6588",
- .probe = saa6588_probe,
- .remove = saa6588_remove,
- .id_table = saa6588_id,
+static struct i2c_driver saa6588_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa6588",
+ },
+ .probe = saa6588_probe,
+ .remove = saa6588_remove,
+ .id_table = saa6588_id,
};
+
+static __init int init_saa6588(void)
+{
+ return i2c_add_driver(&saa6588_driver);
+}
+
+static __exit void exit_saa6588(void)
+{
+ i2c_del_driver(&saa6588_driver);
+}
+
+module_init(init_saa6588);
+module_exit(exit_saa6588);
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index 3bca744e43af..7913f93979b8 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -36,7 +36,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Philips SAA7110 video decoder driver");
MODULE_AUTHOR("Pauline Middelink");
@@ -505,9 +504,25 @@ static const struct i2c_device_id saa7110_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7110_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7110",
- .probe = saa7110_probe,
- .remove = saa7110_remove,
- .id_table = saa7110_id,
+static struct i2c_driver saa7110_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7110",
+ },
+ .probe = saa7110_probe,
+ .remove = saa7110_remove,
+ .id_table = saa7110_id,
};
+
+static __init int init_saa7110(void)
+{
+ return i2c_add_driver(&saa7110_driver);
+}
+
+static __exit void exit_saa7110(void)
+{
+ i2c_del_driver(&saa7110_driver);
+}
+
+module_init(init_saa7110);
+module_exit(exit_saa7110);
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index ee963f4d01bc..301c62b88cad 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -47,7 +47,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/saa7115.h>
#include <asm/div64.h>
@@ -1676,7 +1675,7 @@ static int saa711x_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id saa7115_id[] = {
+static const struct i2c_device_id saa711x_id[] = {
{ "saa7115_auto", 1 }, /* autodetect */
{ "saa7111", 0 },
{ "saa7113", 0 },
@@ -1685,11 +1684,27 @@ static const struct i2c_device_id saa7115_id[] = {
{ "saa7118", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, saa7115_id);
-
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7115",
- .probe = saa711x_probe,
- .remove = saa711x_remove,
- .id_table = saa7115_id,
+MODULE_DEVICE_TABLE(i2c, saa711x_id);
+
+static struct i2c_driver saa711x_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7115",
+ },
+ .probe = saa711x_probe,
+ .remove = saa711x_remove,
+ .id_table = saa711x_id,
};
+
+static __init int init_saa711x(void)
+{
+ return i2c_add_driver(&saa711x_driver);
+}
+
+static __exit void exit_saa711x(void)
+{
+ i2c_del_driver(&saa711x_driver);
+}
+
+module_init(init_saa711x);
+module_exit(exit_saa711x);
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 79fffcf39ba8..ad964616c9d2 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -55,7 +55,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/saa7127.h>
static int debug;
@@ -843,9 +842,25 @@ static struct i2c_device_id saa7127_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7127_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7127",
- .probe = saa7127_probe,
- .remove = saa7127_remove,
- .id_table = saa7127_id,
+static struct i2c_driver saa7127_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7127",
+ },
+ .probe = saa7127_probe,
+ .remove = saa7127_remove,
+ .id_table = saa7127_id,
};
+
+static __init int init_saa7127(void)
+{
+ return i2c_add_driver(&saa7127_driver);
+}
+
+static __exit void exit_saa7127(void)
+{
+ i2c_del_driver(&saa7127_driver);
+}
+
+module_init(init_saa7127);
+module_exit(exit_saa7127);
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index fda005e01670..3fe71be41a1f 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -1,8 +1,7 @@
config VIDEO_SAA7134
tristate "Philips SAA7134 support"
- depends on VIDEO_DEV && PCI && I2C && INPUT
+ depends on VIDEO_DEV && PCI && I2C
select VIDEOBUF_DMA_SG
- depends on VIDEO_IR
select VIDEO_TUNER
select VIDEO_TVEEPROM
select CRC32
@@ -25,6 +24,14 @@ config VIDEO_SAA7134_ALSA
To compile this driver as a module, choose M here: the
module will be called saa7134-alsa.
+config VIDEO_SAA7134_RC
+ bool "Philips SAA7134 Remote Controller support"
+ depends on VIDEO_IR
+ depends on VIDEO_SAA7134
+ default y
+ ---help---
+ Enables Remote Controller support on saa7134 driver.
+
config VIDEO_SAA7134_DVB
tristate "DVB/ATSC Support for saa7134 based TV cards"
depends on VIDEO_SAA7134 && DVB_CORE
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index 604158a8c235..8a5ff4d3cf15 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -1,7 +1,8 @@
-saa7134-objs := saa7134-cards.o saa7134-core.o saa7134-i2c.o \
- saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o \
- saa7134-video.o saa7134-input.o
+saa7134-y := saa7134-cards.o saa7134-core.o saa7134-i2c.o
+saa7134-y += saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o
+saa7134-y += saa7134-video.o
+saa7134-$(CONFIG_VIDEO_SAA7134_RC) += saa7134-input.o
obj-$(CONFIG_VIDEO_SAA7134) += saa6752hs.o saa7134.o saa7134-empress.o
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index 40fd31ca7716..f9f29cc93a8a 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -36,7 +36,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -992,13 +991,29 @@ static const struct i2c_device_id saa6752hs_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa6752hs_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa6752hs",
- .probe = saa6752hs_probe,
- .remove = saa6752hs_remove,
- .id_table = saa6752hs_id,
+static struct i2c_driver saa6752hs_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa6752hs",
+ },
+ .probe = saa6752hs_probe,
+ .remove = saa6752hs_remove,
+ .id_table = saa6752hs_id,
};
+static __init int init_saa6752hs(void)
+{
+ return i2c_add_driver(&saa6752hs_driver);
+}
+
+static __exit void exit_saa6752hs(void)
+{
+ i2c_del_driver(&saa6752hs_driver);
+}
+
+module_init(init_saa6752hs);
+module_exit(exit_saa6752hs);
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index bb8d83d8ddaf..10a6cbf6a790 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -7551,22 +7551,22 @@ int saa7134_board_init2(struct saa7134_dev *dev)
so we do not need to probe for a radio tuner device. */
if (dev->radio_type != UNSET)
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, NULL, "tuner",
dev->radio_addr, NULL);
if (has_demod)
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
if (dev->tuner_addr == ADDR_UNSET) {
enum v4l2_i2c_tuner_type type =
has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, NULL, "tuner",
0, v4l2_i2c_tuner_addrs(type));
} else {
v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "tuner", "tuner",
+ &dev->i2c_adap, NULL, "tuner",
dev->tuner_addr, NULL);
}
}
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 40bc635e8a3f..764d7d219fed 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -255,7 +255,7 @@ void saa7134_dma_free(struct videobuf_queue *q,struct saa7134_buf *buf)
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb,0,0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
@@ -991,7 +991,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
if (card_is_empress(dev)) {
struct v4l2_subdev *sd =
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "saa6752hs", "saa6752hs",
+ NULL, "saa6752hs",
saa7134_boards[dev->board].empress_addr, NULL);
if (sd)
@@ -1002,7 +1002,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
struct v4l2_subdev *sd;
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
- &dev->i2c_adap, "saa6588", "saa6588",
+ &dev->i2c_adap, NULL, "saa6588",
0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr));
if (sd) {
printk(KERN_INFO "%s: found RDS decoder\n", dev->name);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index f26fe7661a1d..beb95e21d109 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1111,7 +1111,7 @@ static int dvb_init(struct saa7134_dev *dev)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_ALTERNATE,
sizeof(struct saa7134_buf),
- dev);
+ dev, NULL);
switch (dev->board) {
case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL:
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index e763f9fd0133..1467a30a434f 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -542,7 +542,7 @@ static int empress_init(struct saa7134_dev *dev)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_ALTERNATE,
sizeof(struct saa7134_buf),
- dev);
+ dev, NULL);
empress_signal_update(&dev->empress_workqueue);
return 0;
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index da41b6b1e64a..2d3f6d265bbf 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -328,7 +328,6 @@ static struct i2c_algorithm saa7134_algo = {
static struct i2c_adapter saa7134_adap_template = {
.owner = THIS_MODULE,
.name = "saa7134",
- .id = I2C_HW_SAA7134,
.algo = &saa7134_algo,
};
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 0b336ca6d55b..46d31dfca7a3 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -429,7 +429,7 @@ static void saa7134_input_timer(unsigned long data)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-void ir_raw_decode_timer_end(unsigned long data)
+static void ir_raw_decode_timer_end(unsigned long data)
{
struct saa7134_dev *dev = (struct saa7134_dev *)data;
struct card_ir *ir = dev->remote;
@@ -550,7 +550,7 @@ static void saa7134_ir_close(void *priv)
}
-int saa7134_ir_change_protocol(void *priv, u64 ir_type)
+static int saa7134_ir_change_protocol(void *priv, u64 ir_type)
{
struct saa7134_dev *dev = priv;
struct card_ir *ir = dev->remote;
@@ -772,8 +772,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
case SAA7134_BOARD_ASUSTeK_P7131_ANALOG:
ir_codes = RC_MAP_ASUS_PC39;
- mask_keydown = 0x0040000;
- rc5_gpio = 1;
+ mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = 1;
break;
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
@@ -959,6 +961,11 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "MSI TV@nywhere Plus";
dev->init_data.get_key = get_key_msi_tvanywhere_plus;
dev->init_data.ir_codes = RC_MAP_MSI_TVANYWHERE_PLUS;
+ /*
+ * MSI TV@nyware Plus requires more frequent polling
+ * otherwise it will miss some keypresses
+ */
+ dev->init_data.polling_interval = 50;
info.addr = 0x30;
/* MSI TV@nywhere Plus controller doesn't seem to
respond to probes unless we read something from
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 45f0ac8f3c0f..f0b1573137f4 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1366,13 +1366,13 @@ static int video_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct saa7134_buf),
- fh);
+ fh, NULL);
videobuf_queue_sg_init(&fh->vbi, &saa7134_vbi_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
sizeof(struct saa7134_buf),
- fh);
+ fh, NULL);
saa7134_pgtable_alloc(dev->pci,&fh->pt_cap);
saa7134_pgtable_alloc(dev->pci,&fh->pt_vbi);
@@ -1825,7 +1825,7 @@ static int saa7134_querycap(struct file *file, void *priv,
if ((tuner_type == TUNER_ABSENT) || (tuner_type == UNSET))
cap->capabilities &= ~V4L2_CAP_TUNER;
- return 0;
+ return 0;
}
int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_std_id *id)
@@ -1871,9 +1871,12 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
else
fixup = V4L2_STD_SECAM;
}
- for (i = 0; i < TVNORMS; i++)
+ for (i = 0; i < TVNORMS; i++) {
if (fixup == tvnorms[i].id)
break;
+ }
+ if (i == TVNORMS)
+ return -EINVAL;
}
*id = tvnorms[i].id;
@@ -1997,9 +2000,12 @@ static int saa7134_g_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
memset(t, 0, sizeof(*t));
- for (n = 0; n < SAA7134_INPUT_MAX; n++)
+ for (n = 0; n < SAA7134_INPUT_MAX; n++) {
if (card_in(dev, n).tv)
break;
+ }
+ if (n == SAA7134_INPUT_MAX)
+ return -EINVAL;
if (NULL != card_in(dev, n).name) {
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index c040a1808542..d3b6a196e5dc 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -810,16 +810,18 @@ void saa7134_irq_oss_done(struct saa7134_dev *dev, unsigned long status);
/* ----------------------------------------------------------- */
/* saa7134-input.c */
+#if defined(CONFIG_VIDEO_SAA7134_RC)
int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
int saa7134_ir_start(struct saa7134_dev *dev);
void saa7134_ir_stop(struct saa7134_dev *dev);
-
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+#else
+#define saa7134_input_init1(dev) (0)
+#define saa7134_input_fini(dev) (0)
+#define saa7134_input_irq(dev) (0)
+#define saa7134_probe_i2c_ir(dev) (0)
+#define saa7134_ir_start(dev) (0)
+#define saa7134_ir_stop(dev) (0)
+#endif
diff --git a/drivers/media/video/saa7164/Makefile b/drivers/media/video/saa7164/Makefile
index 4b329fd42add..6303a8e60eac 100644
--- a/drivers/media/video/saa7164/Makefile
+++ b/drivers/media/video/saa7164/Makefile
@@ -1,6 +1,6 @@
saa7164-objs := saa7164-cards.o saa7164-core.o saa7164-i2c.o saa7164-dvb.o \
saa7164-fw.o saa7164-bus.o saa7164-cmd.o saa7164-api.o \
- saa7164-buffer.o
+ saa7164-buffer.o saa7164-encoder.o saa7164-vbi.o
obj-$(CONFIG_VIDEO_SAA7164) += saa7164.o
diff --git a/drivers/media/video/saa7164/saa7164-api.c b/drivers/media/video/saa7164/saa7164-api.c
index 3f1262b00cc0..ad3bc4154176 100644
--- a/drivers/media/video/saa7164/saa7164-api.c
+++ b/drivers/media/video/saa7164/saa7164-api.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,14 +24,750 @@
#include "saa7164.h"
-int saa7164_api_transition_port(struct saa7164_tsport *port, u8 mode)
+int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i)
{
int ret;
+ if (!(saa_debug & DBGLVL_CPU))
+ return 0;
+
+ dprintk(DBGLVL_API, "%s()\n", __func__);
+
+ i->deviceinst = 0;
+ i->devicespec = 0;
+ i->mode = 0;
+ i->status = 0;
+
+ ret = saa7164_cmd_send(dev, 0, GET_CUR,
+ GET_FW_STATUS_CONTROL, sizeof(struct tmFwInfoStruct), i);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+
+ printk(KERN_INFO "saa7164[%d]-CPU: %d percent", dev->nr, i->CPULoad);
+
+ return ret;
+}
+
+int saa7164_api_collect_debug(struct saa7164_dev *dev)
+{
+ struct tmComResDebugGetData d;
+ u8 more = 255;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s()\n", __func__);
+
+ while (more--) {
+
+ memset(&d, 0, sizeof(d));
+
+ ret = saa7164_cmd_send(dev, 0, GET_CUR,
+ GET_DEBUG_DATA_CONTROL, sizeof(d), &d);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+
+ if (d.dwResult != SAA_OK)
+ break;
+
+ printk(KERN_INFO "saa7164[%d]-FWMSG: %s", dev->nr, d.ucDebugData);
+ }
+
+ return 0;
+}
+
+int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level)
+{
+ struct tmComResDebugSetLevel lvl;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(level=%d)\n", __func__, level);
+
+ /* Retrieve current state */
+ ret = saa7164_cmd_send(dev, 0, GET_CUR,
+ SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+ dprintk(DBGLVL_API, "%s() Was %d\n", __func__, lvl.dwDebugLevel);
+
+ lvl.dwDebugLevel = level;
+
+ /* set new state */
+ ret = saa7164_cmd_send(dev, 0, SET_CUR,
+ SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+int saa7164_api_set_vbi_format(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResProbeCommit fmt, rsp;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(nr=%d, unitid=0x%x)\n", __func__,
+ port->nr, port->hwcfg.unitid);
+
+ fmt.bmHint = 0;
+ fmt.bFormatIndex = 1;
+ fmt.bFrameIndex = 1;
+
+ /* Probe, see if it can support this format */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ SET_CUR, SAA_PROBE_CONTROL, sizeof(fmt), &fmt);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() set error, ret = 0x%x\n", __func__, ret);
+
+ /* See of the format change was successful */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ GET_CUR, SAA_PROBE_CONTROL, sizeof(rsp), &rsp);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() get error, ret = 0x%x\n", __func__, ret);
+ } else {
+ /* Compare requested vs received, should be same */
+ if (memcmp(&fmt, &rsp, sizeof(rsp)) == 0) {
+ dprintk(DBGLVL_API, "SET/PROBE Verified\n");
+
+ /* Ask the device to select the negotiated format */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ SET_CUR, SAA_COMMIT_CONTROL, sizeof(fmt), &fmt);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() commit error, ret = 0x%x\n",
+ __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
+ GET_CUR, SAA_COMMIT_CONTROL, sizeof(rsp), &rsp);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() GET commit error, ret = 0x%x\n",
+ __func__, ret);
+
+ if (memcmp(&fmt, &rsp, sizeof(rsp)) != 0) {
+ printk(KERN_ERR "%s() memcmp error, ret = 0x%x\n",
+ __func__, ret);
+ } else
+ dprintk(DBGLVL_API, "SET/COMMIT Verified\n");
+
+ dprintk(DBGLVL_API, "rsp.bmHint = 0x%x\n", rsp.bmHint);
+ dprintk(DBGLVL_API, "rsp.bFormatIndex = 0x%x\n", rsp.bFormatIndex);
+ dprintk(DBGLVL_API, "rsp.bFrameIndex = 0x%x\n", rsp.bFrameIndex);
+ } else
+ printk(KERN_ERR "%s() compare failed\n", __func__);
+ }
+
+ if (ret == SAA_OK)
+ dprintk(DBGLVL_API, "%s(nr=%d) Success\n", __func__, port->nr);
+
+ return ret;
+}
+
+int saa7164_api_set_gop_size(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoGopStructure gs;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ gs.ucRefFrameDist = port->encoder_params.refdist;
+ gs.ucGOPSize = port->encoder_params.gop_size;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_VIDEO_GOP_STRUCTURE_CONTROL,
+ sizeof(gs), &gs);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_set_encoder(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoBitRate vb;
+ struct tmComResEncAudioBitRate ab;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__,
+ port->hwcfg.sourceid);
+
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS)
+ port->encoder_profile = EU_PROFILE_PS_DVD;
+ else
+ port->encoder_profile = EU_PROFILE_TS_HQ;
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Resolution */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Establish video bitrates */
+ if (port->encoder_params.bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_CONSTANT;
+ else
+ vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK;
+ vb.dwVideoBitRate = port->encoder_params.bitrate;
+ vb.dwVideoBitRatePeak = port->encoder_params.bitrate_peak;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_VIDEO_BIT_RATE_CONTROL, sizeof(struct tmComResEncVideoBitRate), &vb);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Establish audio bitrates */
+ ab.ucAudioBitRateMode = 0;
+ ab.dwAudioBitRate = 384000;
+ ab.dwAudioBitRatePeak = ab.dwAudioBitRate;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_AUDIO_BIT_RATE_CONTROL, sizeof(struct tmComResEncAudioBitRate), &ab);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ saa7164_api_set_aspect_ratio(port);
+ saa7164_api_set_gop_size(port);
+
+ return ret;
+}
+
+int saa7164_api_get_encoder(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoBitRate v;
+ struct tmComResEncAudioBitRate a;
+ struct tmComResEncVideoInputAspectRatio ar;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid);
+
+ port->encoder_profile = 0;
+ port->video_format = 0;
+ port->video_resolution = 0;
+ port->audio_format = 0;
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_RESOLUTION_CONTROL, sizeof(u8), &port->video_resolution);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_FORMAT_CONTROL, sizeof(u8), &port->video_format);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_BIT_RATE_CONTROL, sizeof(v), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_AUDIO_FORMAT_CONTROL, sizeof(u8), &port->audio_format);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_AUDIO_BIT_RATE_CONTROL, sizeof(a), &a);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Aspect Ratio */
+ ar.width = 0;
+ ar.height = 0;
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
+ EU_VIDEO_INPUT_ASPECT_CONTROL,
+ sizeof(struct tmComResEncVideoInputAspectRatio), &ar);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_ENC, "encoder_profile = %d\n", port->encoder_profile);
+ dprintk(DBGLVL_ENC, "video_format = %d\n", port->video_format);
+ dprintk(DBGLVL_ENC, "audio_format = %d\n", port->audio_format);
+ dprintk(DBGLVL_ENC, "video_resolution= %d\n", port->video_resolution);
+ dprintk(DBGLVL_ENC, "v.ucVideoBitRateMode = %d\n", v.ucVideoBitRateMode);
+ dprintk(DBGLVL_ENC, "v.dwVideoBitRate = %d\n", v.dwVideoBitRate);
+ dprintk(DBGLVL_ENC, "v.dwVideoBitRatePeak = %d\n", v.dwVideoBitRatePeak);
+ dprintk(DBGLVL_ENC, "a.ucVideoBitRateMode = %d\n", a.ucAudioBitRateMode);
+ dprintk(DBGLVL_ENC, "a.dwVideoBitRate = %d\n", a.dwAudioBitRate);
+ dprintk(DBGLVL_ENC, "a.dwVideoBitRatePeak = %d\n", a.dwAudioBitRatePeak);
+ dprintk(DBGLVL_ENC, "aspect.width / height = %d:%d\n", ar.width, ar.height);
+
+ return ret;
+}
+
+int saa7164_api_set_aspect_ratio(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResEncVideoInputAspectRatio ar;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s(%d)\n", __func__,
+ port->encoder_params.ctl_aspect);
+
+ switch (port->encoder_params.ctl_aspect) {
+ case V4L2_MPEG_VIDEO_ASPECT_1x1:
+ ar.width = 1;
+ ar.height = 1;
+ break;
+ case V4L2_MPEG_VIDEO_ASPECT_4x3:
+ ar.width = 4;
+ ar.height = 3;
+ break;
+ case V4L2_MPEG_VIDEO_ASPECT_16x9:
+ ar.width = 16;
+ ar.height = 9;
+ break;
+ case V4L2_MPEG_VIDEO_ASPECT_221x100:
+ ar.width = 221;
+ ar.height = 100;
+ break;
+ default:
+ BUG();
+ }
+
+ dprintk(DBGLVL_ENC, "%s(%d) now %d:%d\n", __func__,
+ port->encoder_params.ctl_aspect,
+ ar.width, ar.height);
+
+ /* Aspect Ratio */
+ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
+ EU_VIDEO_INPUT_ASPECT_CONTROL,
+ sizeof(struct tmComResEncVideoInputAspectRatio), &ar);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+ u16 val;
+
+ if (ctl == PU_BRIGHTNESS_CONTROL)
+ val = port->ctl_brightness;
+ else
+ if (ctl == PU_CONTRAST_CONTROL)
+ val = port->ctl_contrast;
+ else
+ if (ctl == PU_HUE_CONTROL)
+ val = port->ctl_hue;
+ else
+ if (ctl == PU_SATURATION_CONTROL)
+ val = port->ctl_saturation;
+ else
+ if (ctl == PU_SHARPNESS_CONTROL)
+ val = port->ctl_sharpness;
+ else
+ return -EINVAL;
+
+ dprintk(DBGLVL_ENC, "%s() unitid=0x%x ctl=%d, val=%d\n",
+ __func__, port->encunit.vsourceid, ctl, val);
+
+ ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, SET_CUR,
+ ctl, sizeof(u16), &val);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+ u16 val;
+
+ ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, GET_CUR,
+ ctl, sizeof(u16), &val);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ return ret;
+ }
+
+ dprintk(DBGLVL_ENC, "%s() ctl=%d, val=%d\n",
+ __func__, ctl, val);
+
+ if (ctl == PU_BRIGHTNESS_CONTROL)
+ port->ctl_brightness = val;
+ else
+ if (ctl == PU_CONTRAST_CONTROL)
+ port->ctl_contrast = val;
+ else
+ if (ctl == PU_HUE_CONTROL)
+ port->ctl_hue = val;
+ else
+ if (ctl == PU_SATURATION_CONTROL)
+ port->ctl_saturation = val;
+ else
+ if (ctl == PU_SHARPNESS_CONTROL)
+ port->ctl_sharpness = val;
+
+ return ret;
+}
+
+int saa7164_api_set_videomux(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ u8 inputs[] = { 1, 2, 2, 2, 5, 5, 5 };
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s() v_mux=%d a_mux=%d\n",
+ __func__, port->mux_input, inputs[port->mux_input - 1]);
+
+ /* Audio Mute */
+ ret = saa7164_api_audio_mute(port, 1);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Video Mux */
+ ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, SET_CUR,
+ SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Audio Mux */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.sourceid, SET_CUR,
+ SU_INPUT_SELECT_CONTROL, sizeof(u8), &inputs[port->mux_input - 1]);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Audio UnMute */
+ ret = saa7164_api_audio_mute(port, 0);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_audio_mute(struct saa7164_port *port, int mute)
+{
+ struct saa7164_dev *dev = port->dev;
+ u8 v = mute;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(%d)\n", __func__, mute);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ MUTE_CONTROL, sizeof(u8), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+/* 0 = silence, 0xff = full */
+int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level)
+{
+ struct saa7164_dev *dev = port->dev;
+ s16 v, min, max;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(%d)\n", __func__, level);
+
+ /* Obtain the min/max ranges */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MIN,
+ VOLUME_CONTROL, sizeof(u16), &min);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MAX,
+ VOLUME_CONTROL, sizeof(u16), &max);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR,
+ (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v);
+
+ v = level;
+ if (v < min)
+ v = min;
+ if (v > max)
+ v = max;
+
+ /* Left */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ (0x01 << 8) | VOLUME_CONTROL, sizeof(s16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Right */
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ (0x02 << 8) | VOLUME_CONTROL, sizeof(s16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR,
+ (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v);
+
+ return ret;
+}
+
+int saa7164_api_set_audio_std(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResAudioDefaults lvl;
+ struct tmComResTunerStandard tvaudio;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s()\n", __func__);
+
+ /* Establish default levels */
+ lvl.ucDecoderLevel = TMHW_LEV_ADJ_DECLEV_DEFAULT;
+ lvl.ucDecoderFM_Level = TMHW_LEV_ADJ_DECLEV_DEFAULT;
+ lvl.ucMonoLevel = TMHW_LEV_ADJ_MONOLEV_DEFAULT;
+ lvl.ucNICAM_Level = TMHW_LEV_ADJ_NICLEV_DEFAULT;
+ lvl.ucSAP_Level = TMHW_LEV_ADJ_SAPLEV_DEFAULT;
+ lvl.ucADC_Level = TMHW_LEV_ADJ_ADCLEV_DEFAULT;
+ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
+ AUDIO_DEFAULT_CONTROL, sizeof(struct tmComResAudioDefaults), &lvl);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ /* Manually select the appropriate TV audio standard */
+ if (port->encodernorm.id & V4L2_STD_NTSC) {
+ tvaudio.std = TU_STANDARD_NTSC_M;
+ tvaudio.country = 1;
+ } else {
+ tvaudio.std = TU_STANDARD_PAL_I;
+ tvaudio.country = 44;
+ }
+
+ ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR,
+ TU_STANDARD_CONTROL, sizeof(tvaudio), &tvaudio);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() TU_STANDARD_CONTROL error, ret = 0x%x\n", __func__, ret);
+ return ret;
+}
+
+int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct tmComResTunerStandardAuto p;
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(%d)\n", __func__, autodetect);
+
+ /* Disable TV Audio autodetect if not already set (buggy) */
+ if (autodetect)
+ p.mode = TU_STANDARD_AUTO;
+ else
+ p.mode = TU_STANDARD_MANUAL;
+ ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR,
+ TU_STANDARD_AUTO_CONTROL, sizeof(p), &p);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() TU_STANDARD_AUTO_CONTROL error, ret = 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+int saa7164_api_get_videomux(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, GET_CUR,
+ SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+
+ dprintk(DBGLVL_ENC, "%s() v_mux=%d\n",
+ __func__, port->mux_input);
+
+ return ret;
+}
+
+int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ u16 len = 0;
+ u8 buf[256];
+ int ret;
+ u8 mas;
+
+ dprintk(DBGLVL_API, "%s(nr=%d type=%d val=%x)\n", __func__,
+ port->nr, port->type, val);
+
+ if (port->nr == 0)
+ mas = 0xd0;
+ else
+ mas = 0xe0;
+
+ memset(buf, 0, sizeof(buf));
+
+ buf[0x00] = 0x04;
+ buf[0x01] = 0x00;
+ buf[0x02] = 0x00;
+ buf[0x03] = 0x00;
+
+ buf[0x04] = 0x04;
+ buf[0x05] = 0x00;
+ buf[0x06] = 0x00;
+ buf[0x07] = 0x00;
+
+ buf[0x08] = reg;
+ buf[0x09] = 0x26;
+ buf[0x0a] = mas;
+ buf[0x0b] = 0xb0;
+
+ buf[0x0c] = val;
+ buf[0x0d] = 0x00;
+ buf[0x0e] = 0x00;
+ buf[0x0f] = 0x00;
+
+ ret = saa7164_cmd_send(dev, port->ifunit.unitid, GET_LEN,
+ EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret);
+ return -EIO;
+ }
+
+ ret = saa7164_cmd_send(dev, port->ifunit.unitid, SET_CUR,
+ EXU_REGISTER_ACCESS_CONTROL, len, &buf);
+ if (ret != SAA_OK)
+ printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret);
+
+ //saa7164_dumphex16(dev, buf, 16);
+
+ return ret == SAA_OK ? 0 : -EIO;
+}
+
+/* Disable the IF block AGC controls */
+int saa7164_api_configure_dif(struct saa7164_port *port, u32 std)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+ u8 agc_disable;
+
+ dprintk(DBGLVL_API, "%s(nr=%d, 0x%x)\n", __func__, port->nr, std);
+
+ if (std & V4L2_STD_NTSC) {
+ dprintk(DBGLVL_API, " NTSC\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_I) {
+ dprintk(DBGLVL_API, " PAL-I\n");
+ saa7164_api_set_dif(port, 0x00, 0x08); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_M) {
+ dprintk(DBGLVL_API, " PAL-M\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_N) {
+ dprintk(DBGLVL_API, " PAL-N\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_Nc) {
+ dprintk(DBGLVL_API, " PAL-Nc\n");
+ saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_B) {
+ dprintk(DBGLVL_API, " PAL-B\n");
+ saa7164_api_set_dif(port, 0x00, 0x02); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_PAL_DK) {
+ dprintk(DBGLVL_API, " PAL-DK\n");
+ saa7164_api_set_dif(port, 0x00, 0x10); /* Video Standard */
+ agc_disable = 0;
+ } else if (std & V4L2_STD_SECAM_L) {
+ dprintk(DBGLVL_API, " SECAM-L\n");
+ saa7164_api_set_dif(port, 0x00, 0x20); /* Video Standard */
+ agc_disable = 0;
+ } else {
+ /* Unknown standard, assume DTV */
+ dprintk(DBGLVL_API, " Unknown (assuming DTV)\n");
+ saa7164_api_set_dif(port, 0x00, 0x80); /* Undefined Video Standard */
+ agc_disable = 1;
+ }
+
+ saa7164_api_set_dif(port, 0x48, 0xa0); /* AGC Functions 1 */
+ saa7164_api_set_dif(port, 0xc0, agc_disable); /* AGC Output Disable */
+ saa7164_api_set_dif(port, 0x7c, 0x04); /* CVBS EQ */
+ saa7164_api_set_dif(port, 0x04, 0x01); /* Active */
+ msleep(100);
+ saa7164_api_set_dif(port, 0x04, 0x00); /* Active (again) */
+ msleep(100);
+
+ return ret;
+}
+
+/* Ensure the dif is in the correct state for the operating mode
+ * (analog / dtv). We only configure the diff through the analog encoder
+ * so when we're in digital mode we need to find the appropriate encoder
+ * and use it to configure the DIF.
+ */
+int saa7164_api_initialize_dif(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_port *p = 0;
+ int ret = -EINVAL;
+ u32 std = 0;
+
+ dprintk(DBGLVL_API, "%s(nr=%d type=%d)\n", __func__,
+ port->nr, port->type);
+
+ if (port->type == SAA7164_MPEG_ENCODER) {
+ /* Pick any analog standard to init the diff.
+ * we'll come back during encoder_init'
+ * and set the correct standard if requried.
+ */
+ std = V4L2_STD_NTSC;
+ } else
+ if (port->type == SAA7164_MPEG_DVB) {
+ if (port->nr == SAA7164_PORT_TS1)
+ p = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ p = &dev->ports[SAA7164_PORT_ENC2];
+ } else
+ if (port->type == SAA7164_MPEG_VBI) {
+ std = V4L2_STD_NTSC;
+ if (port->nr == SAA7164_PORT_VBI1)
+ p = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ p = &dev->ports[SAA7164_PORT_ENC2];
+ } else
+ BUG();
+
+ if (p)
+ ret = saa7164_api_configure_dif(p, std);
+
+ return ret;
+}
+
+int saa7164_api_transition_port(struct saa7164_port *port, u8 mode)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ int ret;
+
+ dprintk(DBGLVL_API, "%s(nr=%d unitid=0x%x,%d)\n",
+ __func__, port->nr, port->hwcfg.unitid, mode);
+
ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR,
SAA_STATE_CONTROL, sizeof(mode), &mode);
if (ret != SAA_OK)
- printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
+ printk(KERN_ERR "%s(portnr %d unitid 0x%x) error, ret = 0x%x\n",
+ __func__, port->nr, port->hwcfg.unitid, ret);
return ret;
}
@@ -61,10 +797,45 @@ int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen)
&reg[0], 128, buf);
}
+int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
+ struct saa7164_port *port)
+{
+ struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc;
+
+ dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
+ dprintk(DBGLVL_API, " VideoStandard = 0x%x\n", fmt->VideoStandard);
+ dprintk(DBGLVL_API, " StartLine = %d\n", fmt->StartLine);
+ dprintk(DBGLVL_API, " EndLine = %d\n", fmt->EndLine);
+ dprintk(DBGLVL_API, " FieldRate = %d\n", fmt->FieldRate);
+ dprintk(DBGLVL_API, " bNumLines = %d\n", fmt->bNumLines);
+
+ /* Cache the hardware configuration in the port */
+
+ port->bufcounter = port->hwcfg.BARLocation;
+ port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32));
+ port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32));
+ port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32));
+ port->bufptr32l = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32);
+ port->bufptr32h = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ port->bufptr64 = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n",
+ port->hwcfg.BARLocation);
+
+ dprintk(DBGLVL_API, " = VS_FORMAT_VBI (becomes dev->en[%d])\n",
+ port->nr);
+
+ return 0;
+}
int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
- struct saa7164_tsport *port,
- tmComResTSFormatDescrHeader_t *tsfmt)
+ struct saa7164_port *port,
+ struct tmComResTSFormatDescrHeader *tsfmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex);
dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset);
@@ -96,27 +867,68 @@ int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
return 0;
}
+int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResPSFormatDescrHeader *fmt)
+{
+ dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
+ dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength);
+ dprintk(DBGLVL_API, " wPackLength= 0x%x\n", fmt->wPackLength);
+ dprintk(DBGLVL_API, " bPackDataType= 0x%x\n", fmt->bPackDataType);
+
+ /* Cache the hardware configuration in the port */
+ /* TODO: CHECK THIS in the port config */
+ port->bufcounter = port->hwcfg.BARLocation;
+ port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32));
+ port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32));
+ port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32));
+ port->bufptr32l = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32);
+ port->bufptr32h = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ port->bufptr64 = port->hwcfg.BARLocation +
+ (4 * sizeof(u32)) +
+ (sizeof(u32) * port->hwcfg.buffercount);
+ dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n",
+ port->hwcfg.BARLocation);
+
+ dprintk(DBGLVL_API, " = VS_FORMAT_MPEGPS (becomes dev->enc[%d])\n",
+ port->nr);
+
+ return 0;
+}
+
int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
{
- struct saa7164_tsport *port = 0;
+ struct saa7164_port *tsport = 0;
+ struct saa7164_port *encport = 0;
+ struct saa7164_port *vbiport = 0;
u32 idx, next_offset;
int i;
- tmComResDescrHeader_t *hdr, *t;
- tmComResExtDevDescrHeader_t *exthdr;
- tmComResPathDescrHeader_t *pathhdr;
- tmComResAntTermDescrHeader_t *anttermhdr;
- tmComResTunerDescrHeader_t *tunerunithdr;
- tmComResDMATermDescrHeader_t *vcoutputtermhdr;
- tmComResTSFormatDescrHeader_t *tsfmt;
+ struct tmComResDescrHeader *hdr, *t;
+ struct tmComResExtDevDescrHeader *exthdr;
+ struct tmComResPathDescrHeader *pathhdr;
+ struct tmComResAntTermDescrHeader *anttermhdr;
+ struct tmComResTunerDescrHeader *tunerunithdr;
+ struct tmComResDMATermDescrHeader *vcoutputtermhdr;
+ struct tmComResTSFormatDescrHeader *tsfmt;
+ struct tmComResPSFormatDescrHeader *psfmt;
+ struct tmComResSelDescrHeader *psel;
+ struct tmComResProcDescrHeader *pdh;
+ struct tmComResAFeatureDescrHeader *afd;
+ struct tmComResEncoderDescrHeader *edh;
+ struct tmComResVBIFormatDescrHeader *vbifmt;
u32 currpath = 0;
dprintk(DBGLVL_API,
- "%s(?,?,%d) sizeof(tmComResDescrHeader_t) = %d bytes\n",
- __func__, len, (u32)sizeof(tmComResDescrHeader_t));
+ "%s(?,?,%d) sizeof(struct tmComResDescrHeader) = %d bytes\n",
+ __func__, len, (u32)sizeof(struct tmComResDescrHeader));
- for (idx = 0; idx < (len - sizeof(tmComResDescrHeader_t)); ) {
+ for (idx = 0; idx < (len - sizeof(struct tmComResDescrHeader));) {
- hdr = (tmComResDescrHeader_t *)(buf + idx);
+ hdr = (struct tmComResDescrHeader *)(buf + idx);
if (hdr->type != CS_INTERFACE)
return SAA_ERR_NOT_SUPPORTED;
@@ -128,7 +940,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
break;
case VC_TUNER_PATH:
dprintk(DBGLVL_API, " VC_TUNER_PATH\n");
- pathhdr = (tmComResPathDescrHeader_t *)(buf + idx);
+ pathhdr = (struct tmComResPathDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " pathid = 0x%x\n",
pathhdr->pathid);
currpath = pathhdr->pathid;
@@ -136,7 +948,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
case VC_INPUT_TERMINAL:
dprintk(DBGLVL_API, " VC_INPUT_TERMINAL\n");
anttermhdr =
- (tmComResAntTermDescrHeader_t *)(buf + idx);
+ (struct tmComResAntTermDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " terminalid = 0x%x\n",
anttermhdr->terminalid);
dprintk(DBGLVL_API, " terminaltype = 0x%x\n",
@@ -179,7 +991,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
case VC_OUTPUT_TERMINAL:
dprintk(DBGLVL_API, " VC_OUTPUT_TERMINAL\n");
vcoutputtermhdr =
- (tmComResDMATermDescrHeader_t *)(buf + idx);
+ (struct tmComResDMATermDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
vcoutputtermhdr->unitid);
dprintk(DBGLVL_API, " terminaltype = 0x%x\n",
@@ -233,32 +1045,49 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
dprintk(DBGLVL_API, " numformats = 0x%x\n",
vcoutputtermhdr->numformats);
- t = (tmComResDescrHeader_t *)
- ((tmComResDMATermDescrHeader_t *)(buf + idx));
+ t = (struct tmComResDescrHeader *)
+ ((struct tmComResDMATermDescrHeader *)(buf + idx));
next_offset = idx + (vcoutputtermhdr->len);
for (i = 0; i < vcoutputtermhdr->numformats; i++) {
- t = (tmComResDescrHeader_t *)
+ t = (struct tmComResDescrHeader *)
(buf + next_offset);
switch (t->subtype) {
case VS_FORMAT_MPEG2TS:
tsfmt =
- (tmComResTSFormatDescrHeader_t *)t;
+ (struct tmComResTSFormatDescrHeader *)t;
if (currpath == 1)
- port = &dev->ts1;
+ tsport = &dev->ports[SAA7164_PORT_TS1];
else
- port = &dev->ts2;
- memcpy(&port->hwcfg, vcoutputtermhdr,
+ tsport = &dev->ports[SAA7164_PORT_TS2];
+ memcpy(&tsport->hwcfg, vcoutputtermhdr,
sizeof(*vcoutputtermhdr));
saa7164_api_configure_port_mpeg2ts(dev,
- port, tsfmt);
+ tsport, tsfmt);
break;
case VS_FORMAT_MPEG2PS:
- dprintk(DBGLVL_API,
- " = VS_FORMAT_MPEG2PS\n");
+ psfmt =
+ (struct tmComResPSFormatDescrHeader *)t;
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->hwcfg, vcoutputtermhdr,
+ sizeof(*vcoutputtermhdr));
+ saa7164_api_configure_port_mpeg2ps(dev,
+ encport, psfmt);
break;
case VS_FORMAT_VBI:
- dprintk(DBGLVL_API,
- " = VS_FORMAT_VBI\n");
+ vbifmt =
+ (struct tmComResVBIFormatDescrHeader *)t;
+ if (currpath == 1)
+ vbiport = &dev->ports[SAA7164_PORT_VBI1];
+ else
+ vbiport = &dev->ports[SAA7164_PORT_VBI2];
+ memcpy(&vbiport->hwcfg, vcoutputtermhdr,
+ sizeof(*vcoutputtermhdr));
+ memcpy(&vbiport->vbi_fmt_ntsc, vbifmt, sizeof(*vbifmt));
+ saa7164_api_configure_port_vbi(dev,
+ vbiport);
break;
case VS_FORMAT_RDS:
dprintk(DBGLVL_API,
@@ -284,7 +1113,7 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
case TUNER_UNIT:
dprintk(DBGLVL_API, " TUNER_UNIT\n");
tunerunithdr =
- (tmComResTunerDescrHeader_t *)(buf + idx);
+ (struct tmComResTunerDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
tunerunithdr->unitid);
dprintk(DBGLVL_API, " sourceid = 0x%x\n",
@@ -297,22 +1126,84 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
tunerunithdr->controlsize);
dprintk(DBGLVL_API, " controls = 0x%x\n",
tunerunithdr->controls);
+
+ if (tunerunithdr->unitid == tunerunithdr->iunit) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->tunerunit, tunerunithdr,
+ sizeof(struct tmComResTunerDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d] tuner)\n", encport->nr);
+ }
break;
case VC_SELECTOR_UNIT:
+ psel = (struct tmComResSelDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " VC_SELECTOR_UNIT\n");
+ dprintk(DBGLVL_API, " unitid = 0x%x\n",
+ psel->unitid);
+ dprintk(DBGLVL_API, " nrinpins = 0x%x\n",
+ psel->nrinpins);
+ dprintk(DBGLVL_API, " sourceid = 0x%x\n",
+ psel->sourceid);
break;
case VC_PROCESSING_UNIT:
+ pdh = (struct tmComResProcDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " VC_PROCESSING_UNIT\n");
+ dprintk(DBGLVL_API, " unitid = 0x%x\n",
+ pdh->unitid);
+ dprintk(DBGLVL_API, " sourceid = 0x%x\n",
+ pdh->sourceid);
+ dprintk(DBGLVL_API, " controlsize = 0x%x\n",
+ pdh->controlsize);
+ if (pdh->controlsize == 0x04) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->vidproc, pdh,
+ sizeof(struct tmComResProcDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
+ }
break;
case FEATURE_UNIT:
+ afd = (struct tmComResAFeatureDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " FEATURE_UNIT\n");
+ dprintk(DBGLVL_API, " unitid = 0x%x\n",
+ afd->unitid);
+ dprintk(DBGLVL_API, " sourceid = 0x%x\n",
+ afd->sourceid);
+ dprintk(DBGLVL_API, " controlsize = 0x%x\n",
+ afd->controlsize);
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->audfeat, afd,
+ sizeof(struct tmComResAFeatureDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
break;
case ENCODER_UNIT:
+ edh = (struct tmComResEncoderDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " ENCODER_UNIT\n");
+ dprintk(DBGLVL_API, " subtype = 0x%x\n", edh->subtype);
+ dprintk(DBGLVL_API, " unitid = 0x%x\n", edh->unitid);
+ dprintk(DBGLVL_API, " vsourceid = 0x%x\n", edh->vsourceid);
+ dprintk(DBGLVL_API, " asourceid = 0x%x\n", edh->asourceid);
+ dprintk(DBGLVL_API, " iunit = 0x%x\n", edh->iunit);
+ if (edh->iunit == edh->unitid) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->encunit, edh,
+ sizeof(struct tmComResEncoderDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
+ }
break;
case EXTENSION_UNIT:
dprintk(DBGLVL_API, " EXTENSION_UNIT\n");
- exthdr = (tmComResExtDevDescrHeader_t *)(buf + idx);
+ exthdr = (struct tmComResExtDevDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
exthdr->unitid);
dprintk(DBGLVL_API, " deviceid = 0x%x\n",
@@ -364,6 +1255,15 @@ int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
exthdr->numgpiogroups);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
exthdr->controlsize);
+ if (exthdr->devicetype & 0x80) {
+ if (currpath == 1)
+ encport = &dev->ports[SAA7164_PORT_ENC1];
+ else
+ encport = &dev->ports[SAA7164_PORT_ENC2];
+ memcpy(&encport->ifunit, exthdr,
+ sizeof(struct tmComResExtDevDescrHeader));
+ dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr);
+ }
break;
case PVC_INFRARED_UNIT:
dprintk(DBGLVL_API, " PVC_INFRARED_UNIT\n");
@@ -560,12 +1460,11 @@ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen,
return ret == SAA_OK ? 0 : -EIO;
}
-
int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
u8 pin, u8 state)
{
int ret;
- tmComResGPIO_t t;
+ struct tmComResGPIO t;
dprintk(DBGLVL_API, "%s(0x%x, %d, %d)\n",
__func__, unitid, pin, state);
@@ -597,5 +1496,3 @@ int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid,
return saa7164_api_modify_gpio(dev, unitid, pin, 0);
}
-
-
diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c
index ddd25d32723d..7230912acc7d 100644
--- a/drivers/media/video/saa7164/saa7164-buffer.c
+++ b/drivers/media/video/saa7164/saa7164-buffer.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -66,12 +66,33 @@
| etc
*/
+void saa7164_buffer_display(struct saa7164_buffer *buf)
+{
+ struct saa7164_dev *dev = buf->port->dev;
+ int i;
+
+ dprintk(DBGLVL_BUF, "%s() buffer @ 0x%p nr=%d\n",
+ __func__, buf, buf->idx);
+ dprintk(DBGLVL_BUF, " pci_cpu @ 0x%p dma @ 0x%08llx len = 0x%x\n",
+ buf->cpu, (long long)buf->dma, buf->pci_size);
+ dprintk(DBGLVL_BUF, " pt_cpu @ 0x%p pt_dma @ 0x%08llx len = 0x%x\n",
+ buf->pt_cpu, (long long)buf->pt_dma, buf->pt_size);
+
+ /* Format the Page Table Entries to point into the data buffer */
+ for (i = 0 ; i < SAA7164_PT_ENTRIES; i++) {
+
+ dprintk(DBGLVL_BUF, " pt[%02d] = 0x%p -> 0x%llx\n",
+ i, buf->pt_cpu, (u64)*(buf->pt_cpu));
+
+ }
+}
/* Allocate a new buffer structure and associated PCI space in bytes.
* len must be a multiple of sizeof(u64)
*/
-struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
+struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
u32 len)
{
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
struct saa7164_buffer *buf = 0;
struct saa7164_dev *dev = port->dev;
int i;
@@ -87,8 +108,12 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
goto ret;
}
+ buf->idx = -1;
buf->port = port;
buf->flags = SAA7164_BUFFER_FREE;
+ buf->pos = 0;
+ buf->actual_size = params->pitch * params->numberoflines;
+ buf->crc = 0;
/* TODO: arg len is being ignored */
buf->pci_size = SAA7164_PT_ENTRIES * 0x1000;
buf->pt_size = (SAA7164_PT_ENTRIES * sizeof(u64)) + 0x1000;
@@ -105,19 +130,23 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
goto fail2;
/* init the buffers to a known pattern, easier during debugging */
- memset(buf->cpu, 0xff, buf->pci_size);
- memset(buf->pt_cpu, 0xff, buf->pt_size);
+ memset_io(buf->cpu, 0xff, buf->pci_size);
+ buf->crc = crc32(0, buf->cpu, buf->actual_size);
+ memset_io(buf->pt_cpu, 0xff, buf->pt_size);
- dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p\n", __func__, buf);
+ dprintk(DBGLVL_BUF, "%s() allocated buffer @ 0x%p (%d pageptrs)\n",
+ __func__, buf, params->numpagetables);
dprintk(DBGLVL_BUF, " pci_cpu @ 0x%p dma @ 0x%08lx len = 0x%x\n",
buf->cpu, (long)buf->dma, buf->pci_size);
dprintk(DBGLVL_BUF, " pt_cpu @ 0x%p pt_dma @ 0x%08lx len = 0x%x\n",
buf->pt_cpu, (long)buf->pt_dma, buf->pt_size);
/* Format the Page Table Entries to point into the data buffer */
- for (i = 0 ; i < SAA7164_PT_ENTRIES; i++) {
+ for (i = 0 ; i < params->numpagetables; i++) {
*(buf->pt_cpu + i) = buf->dma + (i * 0x1000); /* TODO */
+ dprintk(DBGLVL_BUF, " pt[%02d] = 0x%p -> 0x%llx\n",
+ i, buf->pt_cpu, (u64)*(buf->pt_cpu));
}
@@ -133,26 +162,163 @@ ret:
return buf;
}
-int saa7164_buffer_dealloc(struct saa7164_tsport *port,
- struct saa7164_buffer *buf)
+int saa7164_buffer_dealloc(struct saa7164_buffer *buf)
{
struct saa7164_dev *dev;
- if (!buf || !port)
+ if (!buf || !buf->port)
return SAA_ERR_BAD_PARAMETER;
- dev = port->dev;
+ dev = buf->port->dev;
- dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf);
+ dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n",
+ __func__, buf);
if (buf->flags != SAA7164_BUFFER_FREE)
log_warn(" freeing a non-free buffer\n");
- pci_free_consistent(port->dev->pci, buf->pci_size, buf->cpu, buf->dma);
- pci_free_consistent(port->dev->pci, buf->pt_size, buf->pt_cpu,
- buf->pt_dma);
+ pci_free_consistent(dev->pci, buf->pci_size, buf->cpu, buf->dma);
+ pci_free_consistent(dev->pci, buf->pt_size, buf->pt_cpu, buf->pt_dma);
kfree(buf);
return SAA_OK;
}
+int saa7164_buffer_zero_offsets(struct saa7164_port *port, int i)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ if ((i < 0) || (i >= port->hwcfg.buffercount))
+ return -EINVAL;
+
+ dprintk(DBGLVL_BUF, "%s(idx = %d)\n", __func__, i);
+
+ saa7164_writel(port->bufoffset + (sizeof(u32) * i), 0);
+
+ return 0;
+}
+
+/* Write a buffer into the hardware */
+int saa7164_buffer_activate(struct saa7164_buffer *buf, int i)
+{
+ struct saa7164_port *port = buf->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if ((i < 0) || (i >= port->hwcfg.buffercount))
+ return -EINVAL;
+
+ dprintk(DBGLVL_BUF, "%s(idx = %d)\n", __func__, i);
+
+ buf->idx = i; /* Note of which buffer list index position we occupy */
+ buf->flags = SAA7164_BUFFER_BUSY;
+ buf->pos = 0;
+
+ /* TODO: Review this in light of 32v64 assignments */
+ saa7164_writel(port->bufoffset + (sizeof(u32) * i), 0);
+ saa7164_writel(port->bufptr32h + ((sizeof(u32) * 2) * i), buf->pt_dma);
+ saa7164_writel(port->bufptr32l + ((sizeof(u32) * 2) * i), 0);
+
+ dprintk(DBGLVL_BUF, " buf[%d] offset 0x%llx (0x%x) "
+ "buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
+ buf->idx,
+ (u64)port->bufoffset + (i * sizeof(u32)),
+ saa7164_readl(port->bufoffset + (sizeof(u32) * i)),
+ (u64)port->bufptr32h + ((sizeof(u32) * 2) * i),
+ (u64)port->bufptr32l + ((sizeof(u32) * 2) * i),
+ saa7164_readl(port->bufptr32h + ((sizeof(u32) * i) * 2)),
+ saa7164_readl(port->bufptr32l + ((sizeof(u32) * i) * 2)),
+ buf->idx);
+
+ return 0;
+}
+
+int saa7164_buffer_cfg_port(struct saa7164_port *port)
+{
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct list_head *c, *n;
+ int i = 0;
+
+ dprintk(DBGLVL_BUF, "%s(port=%d)\n", __func__, port->nr);
+
+ saa7164_writel(port->bufcounter, 0);
+ saa7164_writel(port->pitch, params->pitch);
+ saa7164_writel(port->bufsize, params->pitch * params->numberoflines);
+
+ dprintk(DBGLVL_BUF, " configured:\n");
+ dprintk(DBGLVL_BUF, " lmmio 0x%p\n", dev->lmmio);
+ dprintk(DBGLVL_BUF, " bufcounter 0x%x = 0x%x\n", port->bufcounter,
+ saa7164_readl(port->bufcounter));
+
+ dprintk(DBGLVL_BUF, " pitch 0x%x = %d\n", port->pitch,
+ saa7164_readl(port->pitch));
+
+ dprintk(DBGLVL_BUF, " bufsize 0x%x = %d\n", port->bufsize,
+ saa7164_readl(port->bufsize));
+
+ dprintk(DBGLVL_BUF, " buffercount = %d\n", port->hwcfg.buffercount);
+ dprintk(DBGLVL_BUF, " bufoffset = 0x%x\n", port->bufoffset);
+ dprintk(DBGLVL_BUF, " bufptr32h = 0x%x\n", port->bufptr32h);
+ dprintk(DBGLVL_BUF, " bufptr32l = 0x%x\n", port->bufptr32l);
+
+ /* Poke the buffers and offsets into PCI space */
+ mutex_lock(&port->dmaqueue_lock);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+
+ if (buf->flags != SAA7164_BUFFER_FREE)
+ BUG();
+
+ /* Place the buffer in the h/w queue */
+ saa7164_buffer_activate(buf, i);
+
+ /* Don't exceed the device maximum # bufs */
+ if (i++ > port->hwcfg.buffercount)
+ BUG();
+
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+
+ return 0;
+}
+
+struct saa7164_user_buffer *saa7164_buffer_alloc_user(struct saa7164_dev *dev, u32 len)
+{
+ struct saa7164_user_buffer *buf;
+
+ buf = kzalloc(sizeof(struct saa7164_user_buffer), GFP_KERNEL);
+ if (buf == 0)
+ return 0;
+
+ buf->data = kzalloc(len, GFP_KERNEL);
+
+ if (buf->data == 0) {
+ kfree(buf);
+ return 0;
+ }
+
+ buf->actual_size = len;
+ buf->pos = 0;
+ buf->crc = 0;
+
+ dprintk(DBGLVL_BUF, "%s() allocated user buffer @ 0x%p\n",
+ __func__, buf);
+
+ return buf;
+}
+
+void saa7164_buffer_dealloc_user(struct saa7164_user_buffer *buf)
+{
+ if (!buf)
+ return;
+
+ if (buf->data) {
+ kfree(buf->data);
+ buf->data = 0;
+ }
+
+ if (buf)
+ kfree(buf);
+}
+
diff --git a/drivers/media/video/saa7164/saa7164-bus.c b/drivers/media/video/saa7164/saa7164-bus.c
index 83a04640a25a..30d5283da41e 100644
--- a/drivers/media/video/saa7164/saa7164-bus.c
+++ b/drivers/media/video/saa7164/saa7164-bus.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,7 +26,7 @@
*/
int saa7164_bus_setup(struct saa7164_dev *dev)
{
- tmComResBusInfo_t *b = &dev->bus;
+ struct tmComResBusInfo *b = &dev->bus;
mutex_init(&b->lock);
@@ -43,24 +43,18 @@ int saa7164_bus_setup(struct saa7164_dev *dev)
b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
- b->m_pdwSetWritePos = (u32 *)((u8 *)(dev->bmmio +
- ((u32)dev->intfdesc.BARLocation) + (2 * sizeof(u64))));
+ b->m_dwSetWritePos = ((u32)dev->intfdesc.BARLocation) + (2 * sizeof(u64));
+ b->m_dwSetReadPos = b->m_dwSetWritePos + (1 * sizeof(u32));
- b->m_pdwSetReadPos = (u32 *)((u8 *)b->m_pdwSetWritePos +
- 1 * sizeof(u32));
-
- b->m_pdwGetWritePos = (u32 *)((u8 *)b->m_pdwSetWritePos +
- 2 * sizeof(u32));
-
- b->m_pdwGetReadPos = (u32 *)((u8 *)b->m_pdwSetWritePos +
- 3 * sizeof(u32));
+ b->m_dwGetWritePos = b->m_dwSetWritePos + (2 * sizeof(u32));
+ b->m_dwGetReadPos = b->m_dwSetWritePos + (3 * sizeof(u32));
return 0;
}
void saa7164_bus_dump(struct saa7164_dev *dev)
{
- tmComResBusInfo_t *b = &dev->bus;
+ struct tmComResBusInfo *b = &dev->bus;
dprintk(DBGLVL_BUS, "Dumping the bus structure:\n");
dprintk(DBGLVL_BUS, " .type = %d\n", b->Type);
@@ -71,20 +65,47 @@ void saa7164_bus_dump(struct saa7164_dev *dev)
dprintk(DBGLVL_BUS, " .m_pdwGetRing = 0x%p\n", b->m_pdwGetRing);
dprintk(DBGLVL_BUS, " .m_dwSizeGetRing = 0x%x\n", b->m_dwSizeGetRing);
- dprintk(DBGLVL_BUS, " .m_pdwSetWritePos = 0x%p (0x%08x)\n",
- b->m_pdwSetWritePos, *b->m_pdwSetWritePos);
+ dprintk(DBGLVL_BUS, " .m_dwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+
+ dprintk(DBGLVL_BUS, " .m_dwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ dprintk(DBGLVL_BUS, " .m_dwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ dprintk(DBGLVL_BUS, " .m_dwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+
+}
+
+/* Intensionally throw a BUG() if the state of the message bus looks corrupt */
+void saa7164_bus_verify(struct saa7164_dev *dev)
+{
+ struct tmComResBusInfo *b = &dev->bus;
+ int bug = 0;
- dprintk(DBGLVL_BUS, " .m_pdwSetReadPos = 0x%p (0x%08x)\n",
- b->m_pdwSetReadPos, *b->m_pdwSetReadPos);
+ if (saa7164_readl(b->m_dwSetReadPos) > b->m_dwSizeSetRing)
+ bug++;
- dprintk(DBGLVL_BUS, " .m_pdwGetWritePos = 0x%p (0x%08x)\n",
- b->m_pdwGetWritePos, *b->m_pdwGetWritePos);
+ if (saa7164_readl(b->m_dwSetWritePos) > b->m_dwSizeSetRing)
+ bug++;
- dprintk(DBGLVL_BUS, " .m_pdwGetReadPos = 0x%p (0x%08x)\n",
- b->m_pdwGetReadPos, *b->m_pdwGetReadPos);
+ if (saa7164_readl(b->m_dwGetReadPos) > b->m_dwSizeGetRing)
+ bug++;
+
+ if (saa7164_readl(b->m_dwGetWritePos) > b->m_dwSizeGetRing)
+ bug++;
+
+ if (bug) {
+ saa_debug = 0xffff; /* Ensure we get the bus dump */
+ saa7164_bus_dump(dev);
+ saa_debug = 1024; /* Ensure we get the bus dump */
+ BUG();
+ }
}
-void saa7164_bus_dumpmsg(struct saa7164_dev *dev, tmComResInfo_t* m, void *buf)
+void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo* m, void *buf)
{
dprintk(DBGLVL_BUS, "Dumping msg structure:\n");
dprintk(DBGLVL_BUS, " .id = %d\n", m->id);
@@ -100,7 +121,7 @@ void saa7164_bus_dumpmsg(struct saa7164_dev *dev, tmComResInfo_t* m, void *buf)
/*
* Places a command or a response on the bus. The implementation does not
* know if it is a command or a response it just places the data on the
- * bus depending on the bus information given in the tmComResBusInfo_t
+ * bus depending on the bus information given in the struct tmComResBusInfo
* structure. If the command or response does not fit into the bus ring
* buffer it will be refused.
*
@@ -108,10 +129,10 @@ void saa7164_bus_dumpmsg(struct saa7164_dev *dev, tmComResInfo_t* m, void *buf)
* SAA_OK The function executed successfully.
* < 0 One or more members are not initialized.
*/
-int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
+int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf)
{
- tmComResBusInfo_t *bus = &dev->bus;
- u32 bytes_to_write, read_distance, timeout, curr_srp, curr_swp;
+ struct tmComResBusInfo *bus = &dev->bus;
+ u32 bytes_to_write, free_write_space, timeout, curr_srp, curr_swp;
u32 new_swp, space_rem;
int ret = SAA_ERR_BAD_PARAMETER;
@@ -122,6 +143,8 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
dprintk(DBGLVL_BUS, "%s()\n", __func__);
+ saa7164_bus_verify(dev);
+
msg->size = cpu_to_le16(msg->size);
msg->command = cpu_to_le16(msg->command);
msg->controlselector = cpu_to_le16(msg->controlselector);
@@ -141,30 +164,30 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
mutex_lock(&bus->lock);
bytes_to_write = sizeof(*msg) + msg->size;
- read_distance = 0;
+ free_write_space = 0;
timeout = SAA_BUS_TIMEOUT;
- curr_srp = le32_to_cpu(*bus->m_pdwSetReadPos);
- curr_swp = le32_to_cpu(*bus->m_pdwSetWritePos);
+ curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos));
+ curr_swp = le32_to_cpu(saa7164_readl(bus->m_dwSetWritePos));
/* Deal with ring wrapping issues */
if (curr_srp > curr_swp)
- /* The ring has not wrapped yet */
- read_distance = curr_srp - curr_swp;
- else
/* Deal with the wrapped ring */
- read_distance = (curr_srp + bus->m_dwSizeSetRing) - curr_swp;
+ free_write_space = curr_srp - curr_swp;
+ else
+ /* The ring has not wrapped yet */
+ free_write_space = (curr_srp + bus->m_dwSizeSetRing) - curr_swp;
dprintk(DBGLVL_BUS, "%s() bytes_to_write = %d\n", __func__,
bytes_to_write);
- dprintk(DBGLVL_BUS, "%s() read_distance = %d\n", __func__,
- read_distance);
+ dprintk(DBGLVL_BUS, "%s() free_write_space = %d\n", __func__,
+ free_write_space);
dprintk(DBGLVL_BUS, "%s() curr_srp = %x\n", __func__, curr_srp);
dprintk(DBGLVL_BUS, "%s() curr_swp = %x\n", __func__, curr_swp);
/* Process the msg and write the content onto the bus */
- while (bytes_to_write >= read_distance) {
+ while (bytes_to_write >= free_write_space) {
if (timeout-- == 0) {
printk(KERN_ERR "%s() bus timeout\n", __func__);
@@ -177,15 +200,15 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
mdelay(1);
/* Check the space usage again */
- curr_srp = le32_to_cpu(*bus->m_pdwSetReadPos);
+ curr_srp = le32_to_cpu(saa7164_readl(bus->m_dwSetReadPos));
/* Deal with ring wrapping issues */
if (curr_srp > curr_swp)
- /* Read didn't wrap around the buffer */
- read_distance = curr_srp - curr_swp;
- else
/* Deal with the wrapped ring */
- read_distance = (curr_srp + bus->m_dwSizeSetRing) -
+ free_write_space = curr_srp - curr_swp;
+ else
+ /* Read didn't wrap around the buffer */
+ free_write_space = (curr_srp + bus->m_dwSizeSetRing) -
curr_swp;
}
@@ -257,37 +280,37 @@ int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
dprintk(DBGLVL_BUS, "%s() new_swp = %x\n", __func__, new_swp);
- /* TODO: Convert all of the direct PCI writes into
- * saa7164_writel/b calls for consistency.
- */
-
/* Update the bus write position */
- *bus->m_pdwSetWritePos = cpu_to_le32(new_swp);
+ saa7164_writel(bus->m_dwSetWritePos, cpu_to_le32(new_swp));
ret = SAA_OK;
out:
+ saa7164_bus_dump(dev);
mutex_unlock(&bus->lock);
+ saa7164_bus_verify(dev);
return ret;
}
/*
* Receive a command or a response from the bus. The implementation does not
* know if it is a command or a response it simply dequeues the data,
- * depending on the bus information given in the tmComResBusInfo_t structure.
+ * depending on the bus information given in the struct tmComResBusInfo structure.
*
* Return Value:
* 0 The function executed successfully.
* < 0 One or more members are not initialized.
*/
-int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf,
+int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf,
int peekonly)
{
- tmComResBusInfo_t *bus = &dev->bus;
+ struct tmComResBusInfo *bus = &dev->bus;
u32 bytes_to_read, write_distance, curr_grp, curr_gwp,
new_grp, buf_size, space_rem;
- tmComResInfo_t msg_tmp;
+ struct tmComResInfo msg_tmp;
int ret = SAA_ERR_BAD_PARAMETER;
+ saa7164_bus_verify(dev);
+
if (msg == 0)
return ret;
@@ -309,11 +332,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf,
/* Peek the bus to see if a msg exists, if it's not what we're expecting
* then return cleanly else read the message from the bus.
*/
- curr_gwp = le32_to_cpu(*bus->m_pdwGetWritePos);
- curr_grp = le32_to_cpu(*bus->m_pdwGetReadPos);
+ curr_gwp = le32_to_cpu(saa7164_readl(bus->m_dwGetWritePos));
+ curr_grp = le32_to_cpu(saa7164_readl(bus->m_dwGetReadPos));
if (curr_gwp == curr_grp) {
- dprintk(DBGLVL_BUS, "%s() No message on the bus\n", __func__);
ret = SAA_ERR_EMPTY;
goto out;
}
@@ -434,7 +456,7 @@ int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf,
}
/* Update the read positions, adjusting the ring */
- *bus->m_pdwGetReadPos = cpu_to_le32(new_grp);
+ saa7164_writel(bus->m_dwGetReadPos, cpu_to_le32(new_grp));
peekout:
msg->size = le16_to_cpu(msg->size);
@@ -443,6 +465,7 @@ peekout:
ret = SAA_OK;
out:
mutex_unlock(&bus->lock);
+ saa7164_bus_verify(dev);
return ret;
}
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index a3c299405f46..4cb634e952a6 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -55,6 +55,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x1d,
@@ -97,6 +101,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV2,
.unit = {{
.id = 0x06,
@@ -139,6 +147,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV2,
.unit = {{
.id = 0x1d,
@@ -195,6 +207,12 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x22,
@@ -251,6 +269,12 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x28,
@@ -307,6 +331,10 @@ struct saa7164_board saa7164_boards[] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
+ .portc = SAA7164_MPEG_ENCODER,
+ .portd = SAA7164_MPEG_ENCODER,
+ .porte = SAA7164_MPEG_VBI,
+ .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x26,
@@ -437,8 +465,6 @@ void saa7164_card_list(struct saa7164_dev *dev)
void saa7164_gpio_setup(struct saa7164_dev *dev)
{
-
-
switch (dev->board) {
case SAA7164_BOARD_HAUPPAUGE_HVR2200:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
@@ -462,7 +488,6 @@ void saa7164_gpio_setup(struct saa7164_dev *dev)
saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 3);
break;
}
-
}
static void hauppauge_eeprom(struct saa7164_dev *dev, u8 *eeprom_data)
diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
index 9c1d3ac43869..301a9e302f45 100644
--- a/drivers/media/video/saa7164/saa7164-cmd.c
+++ b/drivers/media/video/saa7164/saa7164-cmd.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,16 +82,17 @@ u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
* -bus/c running buffer. */
int saa7164_irq_dequeue(struct saa7164_dev *dev)
{
- int ret = SAA_OK;
+ int ret = SAA_OK, i = 0;
u32 timeout;
wait_queue_head_t *q = 0;
+ u8 tmp[512];
dprintk(DBGLVL_CMD, "%s()\n", __func__);
/* While any outstand message on the bus exists... */
do {
/* Peek the msg bus */
- tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
+ struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
ret = saa7164_bus_get(dev, &tRsp, NULL, 1);
if (ret != SAA_OK)
break;
@@ -109,8 +110,22 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
printk(KERN_ERR
"%s() found timed out command on the bus\n",
__func__);
+
+ /* Clean the bus */
+ ret = saa7164_bus_get(dev, &tRsp, &tmp, 0);
+ printk(KERN_ERR "%s() ret = %x\n", __func__, ret);
+ if (ret == SAA_ERR_EMPTY)
+ /* Someone else already fetched the response */
+ return SAA_OK;
+
+ if (ret != SAA_OK)
+ return ret;
}
- } while (0);
+
+ /* It's unlikely to have more than 4 or 5 pending messages, ensure we exit
+ * at some point regardles.
+ */
+ } while (i++ < 32);
return ret;
}
@@ -128,7 +143,7 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
while (loop) {
- tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
+ struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
ret = saa7164_bus_get(dev, &tRsp, NULL, 1);
if (ret == SAA_ERR_EMPTY)
return SAA_OK;
@@ -171,9 +186,9 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
return SAA_OK;
}
-int saa7164_cmd_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf)
+int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf)
{
- tmComResBusInfo_t *bus = &dev->bus;
+ struct tmComResBusInfo *bus = &dev->bus;
u8 cmd_sent;
u16 size, idx;
u32 cmds;
@@ -324,11 +339,11 @@ void saa7164_cmd_signal(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, tmComResCmd_t command,
+int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
u16 controlselector, u16 size, void *buf)
{
- tmComResInfo_t command_t, *pcommand_t;
- tmComResInfo_t response_t, *presponse_t;
+ struct tmComResInfo command_t, *pcommand_t;
+ struct tmComResInfo response_t, *presponse_t;
u8 errdata[256];
u16 resp_dsize;
u16 data_recd;
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
index e6aa0fbd1e91..e1bac5051460 100644
--- a/drivers/media/video/saa7164/saa7164-core.c
+++ b/drivers/media/video/saa7164/saa7164-core.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,6 +30,9 @@
#include <linux/delay.h>
#include <asm/div64.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif
#include "saa7164.h"
MODULE_DESCRIPTION("Driver for NXP SAA7164 based TV cards");
@@ -49,14 +52,38 @@ unsigned int saa_debug;
module_param_named(debug, saa_debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
+unsigned int fw_debug;
+module_param(fw_debug, int, 0644);
+MODULE_PARM_DESC(fw_debug, "Firware debug level def:2");
+
+unsigned int encoder_buffers = SAA7164_MAX_ENCODER_BUFFERS;
+module_param(encoder_buffers, int, 0644);
+MODULE_PARM_DESC(encoder_buffers, "Total buffers in read queue 16-512 def:64");
+
+unsigned int vbi_buffers = SAA7164_MAX_VBI_BUFFERS;
+module_param(vbi_buffers, int, 0644);
+MODULE_PARM_DESC(vbi_buffers, "Total buffers in read queue 16-512 def:64");
+
unsigned int waitsecs = 10;
module_param(waitsecs, int, 0644);
-MODULE_PARM_DESC(debug, "timeout on firmware messages");
+MODULE_PARM_DESC(waitsecs, "timeout on firmware messages");
static unsigned int card[] = {[0 ... (SAA7164_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
+unsigned int print_histogram = 64;
+module_param(print_histogram, int, 0644);
+MODULE_PARM_DESC(print_histogram, "print histogram values once");
+
+unsigned int crc_checking = 1;
+module_param(crc_checking, int, 0644);
+MODULE_PARM_DESC(crc_checking, "enable crc sanity checking on buffers");
+
+unsigned int guard_checking = 1;
+module_param(guard_checking, int, 0644);
+MODULE_PARM_DESC(guard_checking, "enable dma sanity checking for buffer overruns");
+
static unsigned int saa7164_devcount;
static DEFINE_MUTEX(devlist);
@@ -64,6 +91,444 @@ LIST_HEAD(saa7164_devlist);
#define INT_SIZE 16
+void saa7164_dumphex16FF(struct saa7164_dev *dev, u8 *buf, int len)
+{
+ int i;
+ u8 tmp[16];
+ memset(&tmp[0], 0xff, sizeof(tmp));
+
+ printk(KERN_INFO "--------------------> "
+ "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+
+ for (i = 0; i < len; i += 16) {
+ if (memcmp(&tmp, buf + i, sizeof(tmp)) != 0) {
+ printk(KERN_INFO " [0x%08x] "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ *(buf+i+0), *(buf+i+1), *(buf+i+2), *(buf+i+3),
+ *(buf+i+4), *(buf+i+5), *(buf+i+6), *(buf+i+7),
+ *(buf+i+8), *(buf+i+9), *(buf+i+10), *(buf+i+11),
+ *(buf+i+12), *(buf+i+13), *(buf+i+14), *(buf+i+15));
+ }
+ }
+}
+
+static void saa7164_pack_verifier(struct saa7164_buffer *buf)
+{
+ u8 *p = (u8 *)buf->cpu;
+ int i;
+
+ for (i = 0; i < buf->actual_size; i += 2048) {
+
+ if ((*(p + i + 0) != 0x00) || (*(p + i + 1) != 0x00) ||
+ (*(p + i + 2) != 0x01) || (*(p + i + 3) != 0xBA)) {
+ printk(KERN_ERR "No pack at 0x%x\n", i);
+// saa7164_dumphex16FF(buf->port->dev, (p + i), 32);
+ }
+ }
+}
+
+#define FIXED_VIDEO_PID 0xf1
+#define FIXED_AUDIO_PID 0xf2
+
+static void saa7164_ts_verifier(struct saa7164_buffer *buf)
+{
+ struct saa7164_port *port = buf->port;
+ u32 i;
+ u8 cc, a;
+ u16 pid;
+ u8 __iomem *bufcpu = (u8 *)buf->cpu;
+
+ port->sync_errors = 0;
+ port->v_cc_errors = 0;
+ port->a_cc_errors = 0;
+
+ for (i = 0; i < buf->actual_size; i += 188) {
+ if (*(bufcpu + i) != 0x47)
+ port->sync_errors++;
+
+ /* TODO: Query pid lower 8 bits, ignoring upper bits intensionally */
+ pid = ((*(bufcpu + i + 1) & 0x1f) << 8) | *(bufcpu + i + 2);
+ cc = *(bufcpu + i + 3) & 0x0f;
+
+ if (pid == FIXED_VIDEO_PID) {
+ a = ((port->last_v_cc + 1) & 0x0f);
+ if (a != cc) {
+ printk(KERN_ERR "video cc last = %x current = %x i = %d\n",
+ port->last_v_cc, cc, i);
+ port->v_cc_errors++;
+ }
+
+ port->last_v_cc = cc;
+ } else
+ if (pid == FIXED_AUDIO_PID) {
+ a = ((port->last_a_cc + 1) & 0x0f);
+ if (a != cc) {
+ printk(KERN_ERR "audio cc last = %x current = %x i = %d\n",
+ port->last_a_cc, cc, i);
+ port->a_cc_errors++;
+ }
+
+ port->last_a_cc = cc;
+ }
+
+ }
+
+ /* Only report errors if we've been through this function atleast
+ * once already and the cached cc values are primed. First time through
+ * always generates errors.
+ */
+ if (port->v_cc_errors && (port->done_first_interrupt > 1))
+ printk(KERN_ERR "video pid cc, %d errors\n", port->v_cc_errors);
+
+ if (port->a_cc_errors && (port->done_first_interrupt > 1))
+ printk(KERN_ERR "audio pid cc, %d errors\n", port->a_cc_errors);
+
+ if (port->sync_errors && (port->done_first_interrupt > 1))
+ printk(KERN_ERR "sync_errors = %d\n", port->sync_errors);
+
+ if (port->done_first_interrupt == 1)
+ port->done_first_interrupt++;
+}
+
+static void saa7164_histogram_reset(struct saa7164_histogram *hg, char *name)
+{
+ int i;
+
+ memset(hg, 0, sizeof(struct saa7164_histogram));
+ strcpy(hg->name, name);
+
+ /* First 30ms x 1ms */
+ for (i = 0; i < 30; i++) {
+ hg->counter1[0 + i].val = i;
+ }
+
+ /* 30 - 200ms x 10ms */
+ for (i = 0; i < 18; i++) {
+ hg->counter1[30 + i].val = 30 + (i * 10);
+ }
+
+ /* 200 - 2000ms x 100ms */
+ for (i = 0; i < 15; i++) {
+ hg->counter1[48 + i].val = 200 + (i * 200);
+ }
+
+ /* Catch all massive value (2secs) */
+ hg->counter1[55].val = 2000;
+
+ /* Catch all massive value (4secs) */
+ hg->counter1[56].val = 4000;
+
+ /* Catch all massive value (8secs) */
+ hg->counter1[57].val = 8000;
+
+ /* Catch all massive value (15secs) */
+ hg->counter1[58].val = 15000;
+
+ /* Catch all massive value (30secs) */
+ hg->counter1[59].val = 30000;
+
+ /* Catch all massive value (60secs) */
+ hg->counter1[60].val = 60000;
+
+ /* Catch all massive value (5mins) */
+ hg->counter1[61].val = 300000;
+
+ /* Catch all massive value (15mins) */
+ hg->counter1[62].val = 900000;
+
+ /* Catch all massive values (1hr) */
+ hg->counter1[63].val = 3600000;
+}
+
+void saa7164_histogram_update(struct saa7164_histogram *hg, u32 val)
+{
+ int i;
+ for (i = 0; i < 64; i++) {
+ if (val <= hg->counter1[i].val) {
+ hg->counter1[i].count++;
+ hg->counter1[i].update_time = jiffies;
+ break;
+ }
+ }
+}
+
+static void saa7164_histogram_print(struct saa7164_port *port,
+ struct saa7164_histogram *hg)
+{
+ u32 entries = 0;
+ int i;
+
+ printk(KERN_ERR "Histogram named %s (ms, count, last_update_jiffy)\n", hg->name);
+ for (i = 0; i < 64; i++) {
+ if (hg->counter1[i].count == 0)
+ continue;
+
+ printk(KERN_ERR " %4d %12d %Ld\n",
+ hg->counter1[i].val,
+ hg->counter1[i].count,
+ hg->counter1[i].update_time);
+
+ entries++;
+ }
+ printk(KERN_ERR "Total: %d\n", entries);
+}
+
+static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf = 0;
+ struct saa7164_user_buffer *ubuf = 0;
+ struct list_head *c, *n;
+ int i = 0;
+ u8 __iomem *p;
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+
+ buf = list_entry(c, struct saa7164_buffer, list);
+ if (i++ > port->hwcfg.buffercount) {
+ printk(KERN_ERR "%s() illegal i count %d\n",
+ __func__, i);
+ break;
+ }
+
+ if (buf->idx == bufnr) {
+
+ /* Found the buffer, deal with it */
+ dprintk(DBGLVL_IRQ, "%s() bufnr: %d\n", __func__, bufnr);
+
+ if (crc_checking) {
+ /* Throw a new checksum on the dma buffer */
+ buf->crc = crc32(0, buf->cpu, buf->actual_size);
+ }
+
+ if (guard_checking) {
+ p = (u8 *)buf->cpu;
+ if ((*(p + buf->actual_size + 0) != 0xff) ||
+ (*(p + buf->actual_size + 1) != 0xff) ||
+ (*(p + buf->actual_size + 2) != 0xff) ||
+ (*(p + buf->actual_size + 3) != 0xff) ||
+ (*(p + buf->actual_size + 0x10) != 0xff) ||
+ (*(p + buf->actual_size + 0x11) != 0xff) ||
+ (*(p + buf->actual_size + 0x12) != 0xff) ||
+ (*(p + buf->actual_size + 0x13) != 0xff)) {
+ printk(KERN_ERR "%s() buf %p guard buffer breach\n",
+ __func__, buf);
+// saa7164_dumphex16FF(dev, (p + buf->actual_size) - 32 , 64);
+ }
+ }
+
+ if ((port->nr != SAA7164_PORT_VBI1) && (port->nr != SAA7164_PORT_VBI2)) {
+ /* Validate the incoming buffer content */
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_TS)
+ saa7164_ts_verifier(buf);
+ else if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS)
+ saa7164_pack_verifier(buf);
+ }
+
+ /* find a free user buffer and clone to it */
+ if (!list_empty(&port->list_buf_free.list)) {
+
+ /* Pull the first buffer from the used list */
+ ubuf = list_first_entry(&port->list_buf_free.list,
+ struct saa7164_user_buffer, list);
+
+ if (buf->actual_size <= ubuf->actual_size) {
+
+ memcpy_fromio(ubuf->data, buf->cpu,
+ ubuf->actual_size);
+
+ if (crc_checking) {
+ /* Throw a new checksum on the read buffer */
+ ubuf->crc = crc32(0, ubuf->data, ubuf->actual_size);
+ }
+
+ /* Requeue the buffer on the free list */
+ ubuf->pos = 0;
+
+ list_move_tail(&ubuf->list,
+ &port->list_buf_used.list);
+
+ /* Flag any userland waiters */
+ wake_up_interruptible(&port->wait_read);
+
+ } else {
+ printk(KERN_ERR "buf %p bufsize fails match\n", buf);
+ }
+
+ } else
+ printk(KERN_ERR "encirq no free buffers, increase param encoder_buffers\n");
+
+ /* Ensure offset into buffer remains 0, fill buffer
+ * with known bad data. We check for this data at a later point
+ * in time. */
+ saa7164_buffer_zero_offsets(port, bufnr);
+ memset_io(buf->cpu, 0xff, buf->pci_size);
+ if (crc_checking) {
+ /* Throw yet aanother new checksum on the dma buffer */
+ buf->crc = crc32(0, buf->cpu, buf->actual_size);
+ }
+
+ break;
+ }
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+}
+
+static void saa7164_work_enchandler(struct work_struct *w)
+{
+ struct saa7164_port *port =
+ container_of(w, struct saa7164_port, workenc);
+ struct saa7164_dev *dev = port->dev;
+
+ u32 wp, mcb, rp, cnt = 0;
+
+ port->last_svc_msecs_diff = port->last_svc_msecs;
+ port->last_svc_msecs = jiffies_to_msecs(jiffies);
+
+ port->last_svc_msecs_diff = port->last_svc_msecs -
+ port->last_svc_msecs_diff;
+
+ saa7164_histogram_update(&port->svc_interval,
+ port->last_svc_msecs_diff);
+
+ port->last_irq_svc_msecs_diff = port->last_svc_msecs -
+ port->last_irq_msecs;
+
+ saa7164_histogram_update(&port->irq_svc_interval,
+ port->last_irq_svc_msecs_diff);
+
+ dprintk(DBGLVL_IRQ,
+ "%s() %Ldms elapsed irq->deferred %Ldms wp: %d rp: %d\n",
+ __func__,
+ port->last_svc_msecs_diff,
+ port->last_irq_svc_msecs_diff,
+ port->last_svc_wp,
+ port->last_svc_rp
+ );
+
+ /* Current write position */
+ wp = saa7164_readl(port->bufcounter);
+ if (wp > (port->hwcfg.buffercount - 1)) {
+ printk(KERN_ERR "%s() illegal buf count %d\n", __func__, wp);
+ return;
+ }
+
+ /* Most current complete buffer */
+ if (wp == 0)
+ mcb = (port->hwcfg.buffercount - 1);
+ else
+ mcb = wp - 1;
+
+ while (1) {
+ if (port->done_first_interrupt == 0) {
+ port->done_first_interrupt++;
+ rp = mcb;
+ } else
+ rp = (port->last_svc_rp + 1) % 8;
+
+ if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
+ break;
+ }
+
+ saa7164_work_enchandler_helper(port, rp);
+ port->last_svc_rp = rp;
+ cnt++;
+
+ if (rp == mcb)
+ break;
+ }
+
+ /* TODO: Convert this into a /proc/saa7164 style readable file */
+ if (print_histogram == port->nr) {
+ saa7164_histogram_print(port, &port->irq_interval);
+ saa7164_histogram_print(port, &port->svc_interval);
+ saa7164_histogram_print(port, &port->irq_svc_interval);
+ saa7164_histogram_print(port, &port->read_interval);
+ saa7164_histogram_print(port, &port->poll_interval);
+ /* TODO: fix this to preserve any previous state */
+ print_histogram = 64 + port->nr;
+ }
+}
+
+static void saa7164_work_vbihandler(struct work_struct *w)
+{
+ struct saa7164_port *port =
+ container_of(w, struct saa7164_port, workenc);
+ struct saa7164_dev *dev = port->dev;
+
+ u32 wp, mcb, rp, cnt = 0;
+
+ port->last_svc_msecs_diff = port->last_svc_msecs;
+ port->last_svc_msecs = jiffies_to_msecs(jiffies);
+ port->last_svc_msecs_diff = port->last_svc_msecs -
+ port->last_svc_msecs_diff;
+
+ saa7164_histogram_update(&port->svc_interval,
+ port->last_svc_msecs_diff);
+
+ port->last_irq_svc_msecs_diff = port->last_svc_msecs -
+ port->last_irq_msecs;
+
+ saa7164_histogram_update(&port->irq_svc_interval,
+ port->last_irq_svc_msecs_diff);
+
+ dprintk(DBGLVL_IRQ,
+ "%s() %Ldms elapsed irq->deferred %Ldms wp: %d rp: %d\n",
+ __func__,
+ port->last_svc_msecs_diff,
+ port->last_irq_svc_msecs_diff,
+ port->last_svc_wp,
+ port->last_svc_rp
+ );
+
+ /* Current write position */
+ wp = saa7164_readl(port->bufcounter);
+ if (wp > (port->hwcfg.buffercount - 1)) {
+ printk(KERN_ERR "%s() illegal buf count %d\n", __func__, wp);
+ return;
+ }
+
+ /* Most current complete buffer */
+ if (wp == 0)
+ mcb = (port->hwcfg.buffercount - 1);
+ else
+ mcb = wp - 1;
+
+ while (1) {
+ if (port->done_first_interrupt == 0) {
+ port->done_first_interrupt++;
+ rp = mcb;
+ } else
+ rp = (port->last_svc_rp + 1) % 8;
+
+ if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
+ break;
+ }
+
+ saa7164_work_enchandler_helper(port, rp);
+ port->last_svc_rp = rp;
+ cnt++;
+
+ if (rp == mcb)
+ break;
+ }
+
+ /* TODO: Convert this into a /proc/saa7164 style readable file */
+ if (print_histogram == port->nr) {
+ saa7164_histogram_print(port, &port->irq_interval);
+ saa7164_histogram_print(port, &port->svc_interval);
+ saa7164_histogram_print(port, &port->irq_svc_interval);
+ saa7164_histogram_print(port, &port->read_interval);
+ saa7164_histogram_print(port, &port->poll_interval);
+ /* TODO: fix this to preserve any previous state */
+ print_histogram = 64 + port->nr;
+ }
+}
+
static void saa7164_work_cmdhandler(struct work_struct *w)
{
struct saa7164_dev *dev = container_of(w, struct saa7164_dev, workcmd);
@@ -74,7 +539,7 @@ static void saa7164_work_cmdhandler(struct work_struct *w)
static void saa7164_buffer_deliver(struct saa7164_buffer *buf)
{
- struct saa7164_tsport *port = buf->port;
+ struct saa7164_port *port = buf->port;
/* Feed the transport payload into the kernel demux */
dvb_dmx_swfilter_packets(&port->dvb.demux, (u8 *)buf->cpu,
@@ -82,7 +547,56 @@ static void saa7164_buffer_deliver(struct saa7164_buffer *buf)
}
-static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
+static irqreturn_t saa7164_irq_vbi(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ /* Store old time */
+ port->last_irq_msecs_diff = port->last_irq_msecs;
+
+ /* Collect new stats */
+ port->last_irq_msecs = jiffies_to_msecs(jiffies);
+
+ /* Calculate stats */
+ port->last_irq_msecs_diff = port->last_irq_msecs -
+ port->last_irq_msecs_diff;
+
+ saa7164_histogram_update(&port->irq_interval,
+ port->last_irq_msecs_diff);
+
+ dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed\n", __func__,
+ port->last_irq_msecs_diff);
+
+ /* Tis calls the vbi irq handler */
+ schedule_work(&port->workenc);
+ return 0;
+}
+
+static irqreturn_t saa7164_irq_encoder(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ /* Store old time */
+ port->last_irq_msecs_diff = port->last_irq_msecs;
+
+ /* Collect new stats */
+ port->last_irq_msecs = jiffies_to_msecs(jiffies);
+
+ /* Calculate stats */
+ port->last_irq_msecs_diff = port->last_irq_msecs -
+ port->last_irq_msecs_diff;
+
+ saa7164_histogram_update(&port->irq_interval,
+ port->last_irq_msecs_diff);
+
+ dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed\n", __func__,
+ port->last_irq_msecs_diff);
+
+ schedule_work(&port->workenc);
+ return 0;
+}
+
+static irqreturn_t saa7164_irq_ts(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct saa7164_buffer *buf;
@@ -96,7 +610,7 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
/* Find the previous buffer to the current write point */
if (wp == 0)
- rp = 7;
+ rp = (port->hwcfg.buffercount - 1);
else
rp = wp - 1;
@@ -107,7 +621,7 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
if (i++ > port->hwcfg.buffercount)
BUG();
- if (buf->nr == rp) {
+ if (buf->idx == rp) {
/* Found the buffer, deal with it */
dprintk(DBGLVL_IRQ, "%s() wp: %d processing: %d\n",
__func__, wp, rp);
@@ -123,6 +637,13 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_tsport *port)
static irqreturn_t saa7164_irq(int irq, void *dev_id)
{
struct saa7164_dev *dev = dev_id;
+ struct saa7164_port *porta = &dev->ports[SAA7164_PORT_TS1];
+ struct saa7164_port *portb = &dev->ports[SAA7164_PORT_TS2];
+ struct saa7164_port *portc = &dev->ports[SAA7164_PORT_ENC1];
+ struct saa7164_port *portd = &dev->ports[SAA7164_PORT_ENC2];
+ struct saa7164_port *porte = &dev->ports[SAA7164_PORT_VBI1];
+ struct saa7164_port *portf = &dev->ports[SAA7164_PORT_VBI2];
+
u32 intid, intstat[INT_SIZE/4];
int i, handled = 0, bit;
@@ -168,17 +689,35 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id)
if (intid == dev->intfdesc.bInterruptId) {
/* A response to an cmd/api call */
schedule_work(&dev->workcmd);
- } else if (intid ==
- dev->ts1.hwcfg.interruptid) {
+ } else if (intid == porta->hwcfg.interruptid) {
/* Transport path 1 */
- saa7164_irq_ts(&dev->ts1);
+ saa7164_irq_ts(porta);
- } else if (intid ==
- dev->ts2.hwcfg.interruptid) {
+ } else if (intid == portb->hwcfg.interruptid) {
/* Transport path 2 */
- saa7164_irq_ts(&dev->ts2);
+ saa7164_irq_ts(portb);
+
+ } else if (intid == portc->hwcfg.interruptid) {
+
+ /* Encoder path 1 */
+ saa7164_irq_encoder(portc);
+
+ } else if (intid == portd->hwcfg.interruptid) {
+
+ /* Encoder path 2 */
+ saa7164_irq_encoder(portd);
+
+ } else if (intid == porte->hwcfg.interruptid) {
+
+ /* VBI path 1 */
+ saa7164_irq_vbi(porte);
+
+ } else if (intid == portf->hwcfg.interruptid) {
+
+ /* VBI path 2 */
+ saa7164_irq_vbi(portf);
} else {
/* Find the function */
@@ -286,8 +825,8 @@ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr)
static void saa7164_dump_hwdesc(struct saa7164_dev *dev)
{
- dprintk(1, "@0x%p hwdesc sizeof(tmComResHWDescr_t) = %d bytes\n",
- &dev->hwdesc, (u32)sizeof(tmComResHWDescr_t));
+ dprintk(1, "@0x%p hwdesc sizeof(struct tmComResHWDescr) = %d bytes\n",
+ &dev->hwdesc, (u32)sizeof(struct tmComResHWDescr));
dprintk(1, " .bLength = 0x%x\n", dev->hwdesc.bLength);
dprintk(1, " .bDescriptorType = 0x%x\n", dev->hwdesc.bDescriptorType);
@@ -317,8 +856,8 @@ static void saa7164_dump_hwdesc(struct saa7164_dev *dev)
static void saa7164_dump_intfdesc(struct saa7164_dev *dev)
{
dprintk(1, "@0x%p intfdesc "
- "sizeof(tmComResInterfaceDescr_t) = %d bytes\n",
- &dev->intfdesc, (u32)sizeof(tmComResInterfaceDescr_t));
+ "sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
+ &dev->intfdesc, (u32)sizeof(struct tmComResInterfaceDescr));
dprintk(1, " .bLength = 0x%x\n", dev->intfdesc.bLength);
dprintk(1, " .bDescriptorType = 0x%x\n", dev->intfdesc.bDescriptorType);
@@ -338,8 +877,8 @@ static void saa7164_dump_intfdesc(struct saa7164_dev *dev)
static void saa7164_dump_busdesc(struct saa7164_dev *dev)
{
- dprintk(1, "@0x%p busdesc sizeof(tmComResBusDescr_t) = %d bytes\n",
- &dev->busdesc, (u32)sizeof(tmComResBusDescr_t));
+ dprintk(1, "@0x%p busdesc sizeof(struct tmComResBusDescr) = %d bytes\n",
+ &dev->busdesc, (u32)sizeof(struct tmComResBusDescr));
dprintk(1, " .CommandRing = 0x%016Lx\n", dev->busdesc.CommandRing);
dprintk(1, " .ResponseRing = 0x%016Lx\n", dev->busdesc.ResponseRing);
@@ -356,23 +895,23 @@ static void saa7164_dump_busdesc(struct saa7164_dev *dev)
*/
static void saa7164_get_descriptors(struct saa7164_dev *dev)
{
- memcpy(&dev->hwdesc, dev->bmmio, sizeof(tmComResHWDescr_t));
- memcpy(&dev->intfdesc, dev->bmmio + sizeof(tmComResHWDescr_t),
- sizeof(tmComResInterfaceDescr_t));
- memcpy(&dev->busdesc, dev->bmmio + dev->intfdesc.BARLocation,
- sizeof(tmComResBusDescr_t));
-
- if (dev->hwdesc.bLength != sizeof(tmComResHWDescr_t)) {
- printk(KERN_ERR "Structure tmComResHWDescr_t is mangled\n");
+ memcpy_fromio(&dev->hwdesc, dev->bmmio, sizeof(struct tmComResHWDescr));
+ memcpy_fromio(&dev->intfdesc, dev->bmmio + sizeof(struct tmComResHWDescr),
+ sizeof(struct tmComResInterfaceDescr));
+ memcpy_fromio(&dev->busdesc, dev->bmmio + dev->intfdesc.BARLocation,
+ sizeof(struct tmComResBusDescr));
+
+ if (dev->hwdesc.bLength != sizeof(struct tmComResHWDescr)) {
+ printk(KERN_ERR "Structure struct tmComResHWDescr is mangled\n");
printk(KERN_ERR "Need %x got %d\n", dev->hwdesc.bLength,
- (u32)sizeof(tmComResHWDescr_t));
+ (u32)sizeof(struct tmComResHWDescr));
} else
saa7164_dump_hwdesc(dev);
- if (dev->intfdesc.bLength != sizeof(tmComResInterfaceDescr_t)) {
- printk(KERN_ERR "struct tmComResInterfaceDescr_t is mangled\n");
+ if (dev->intfdesc.bLength != sizeof(struct tmComResInterfaceDescr)) {
+ printk(KERN_ERR "struct struct tmComResInterfaceDescr is mangled\n");
printk(KERN_ERR "Need %x got %d\n", dev->intfdesc.bLength,
- (u32)sizeof(tmComResInterfaceDescr_t));
+ (u32)sizeof(struct tmComResInterfaceDescr));
} else
saa7164_dump_intfdesc(dev);
@@ -402,6 +941,58 @@ static int get_resources(struct saa7164_dev *dev)
return -EBUSY;
}
+static int saa7164_port_init(struct saa7164_dev *dev, int portnr)
+{
+ struct saa7164_port *port = 0;
+
+ if ((portnr < 0) || (portnr >= SAA7164_MAX_PORTS))
+ BUG();
+
+ port = &dev->ports[portnr];
+
+ port->dev = dev;
+ port->nr = portnr;
+
+ if ((portnr == SAA7164_PORT_TS1) || (portnr == SAA7164_PORT_TS2))
+ port->type = SAA7164_MPEG_DVB;
+ else
+ if ((portnr == SAA7164_PORT_ENC1) || (portnr == SAA7164_PORT_ENC2)) {
+ port->type = SAA7164_MPEG_ENCODER;
+
+ /* We need a deferred interrupt handler for cmd handling */
+ INIT_WORK(&port->workenc, saa7164_work_enchandler);
+ }
+ else
+ if ((portnr == SAA7164_PORT_VBI1) || (portnr == SAA7164_PORT_VBI2)) {
+ port->type = SAA7164_MPEG_VBI;
+
+ /* We need a deferred interrupt handler for cmd handling */
+ INIT_WORK(&port->workenc, saa7164_work_vbihandler);
+ } else
+ BUG();
+
+ /* Init all the critical resources */
+ mutex_init(&port->dvb.lock);
+ INIT_LIST_HEAD(&port->dmaqueue.list);
+ mutex_init(&port->dmaqueue_lock);
+
+ INIT_LIST_HEAD(&port->list_buf_used.list);
+ INIT_LIST_HEAD(&port->list_buf_free.list);
+ init_waitqueue_head(&port->wait_read);
+
+
+ saa7164_histogram_reset(&port->irq_interval, "irq intervals");
+ saa7164_histogram_reset(&port->svc_interval, "deferred intervals");
+ saa7164_histogram_reset(&port->irq_svc_interval,
+ "irq to deferred intervals");
+ saa7164_histogram_reset(&port->read_interval,
+ "encoder/vbi read() intervals");
+ saa7164_histogram_reset(&port->poll_interval,
+ "encoder/vbi poll() intervals");
+
+ return 0;
+}
+
static int saa7164_dev_setup(struct saa7164_dev *dev)
{
int i;
@@ -443,23 +1034,13 @@ static int saa7164_dev_setup(struct saa7164_dev *dev)
dev->i2c_bus[2].dev = dev;
dev->i2c_bus[2].nr = 2;
- /* Transport port A Defaults / setup */
- dev->ts1.dev = dev;
- dev->ts1.nr = 0;
- mutex_init(&dev->ts1.dvb.lock);
- INIT_LIST_HEAD(&dev->ts1.dmaqueue.list);
- INIT_LIST_HEAD(&dev->ts1.dummy_dmaqueue.list);
- mutex_init(&dev->ts1.dmaqueue_lock);
- mutex_init(&dev->ts1.dummy_dmaqueue_lock);
-
- /* Transport port B Defaults / setup */
- dev->ts2.dev = dev;
- dev->ts2.nr = 1;
- mutex_init(&dev->ts2.dvb.lock);
- INIT_LIST_HEAD(&dev->ts2.dmaqueue.list);
- INIT_LIST_HEAD(&dev->ts2.dummy_dmaqueue.list);
- mutex_init(&dev->ts2.dmaqueue_lock);
- mutex_init(&dev->ts2.dummy_dmaqueue_lock);
+ /* Transport + Encoder ports 1, 2, 3, 4 - Defaults / setup */
+ saa7164_port_init(dev, SAA7164_PORT_TS1);
+ saa7164_port_init(dev, SAA7164_PORT_TS2);
+ saa7164_port_init(dev, SAA7164_PORT_ENC1);
+ saa7164_port_init(dev, SAA7164_PORT_ENC2);
+ saa7164_port_init(dev, SAA7164_PORT_VBI1);
+ saa7164_port_init(dev, SAA7164_PORT_VBI2);
if (get_resources(dev) < 0) {
printk(KERN_ERR "CORE %s No more PCIe resources for "
@@ -516,6 +1097,132 @@ static void saa7164_dev_unregister(struct saa7164_dev *dev)
return;
}
+#ifdef CONFIG_PROC_FS
+static int saa7164_proc_show(struct seq_file *m, void *v)
+{
+ struct saa7164_dev *dev;
+ struct tmComResBusInfo *b;
+ struct list_head *list;
+ int i, c;
+
+ if (saa7164_devcount == 0)
+ return 0;
+
+ list_for_each(list, &saa7164_devlist) {
+ dev = list_entry(list, struct saa7164_dev, devlist);
+ seq_printf(m, "%s = %p\n", dev->name, dev);
+
+ /* Lock the bus from any other access */
+ b = &dev->bus;
+ mutex_lock(&b->lock);
+
+ seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+
+ seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+ c = 0;
+ seq_printf(m, "\n Set Ring:\n");
+ seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeSetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", *(b->m_pdwSetRing + i));
+
+ if (++c == 16) {
+ seq_printf(m, "\n");
+ c = 0;
+ }
+ }
+
+ c = 0;
+ seq_printf(m, "\n Get Ring:\n");
+ seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeGetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", *(b->m_pdwGetRing + i));
+
+ if (++c == 16) {
+ seq_printf(m, "\n");
+ c = 0;
+ }
+ }
+
+ mutex_unlock(&b->lock);
+
+ }
+
+ return 0;
+}
+
+static int saa7164_proc_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, saa7164_proc_show, NULL);
+}
+
+static struct file_operations saa7164_proc_fops = {
+ .open = saa7164_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int saa7164_proc_create(void)
+{
+ struct proc_dir_entry *pe;
+
+ pe = proc_create("saa7164", S_IRUGO, NULL, &saa7164_proc_fops);
+ if (!pe)
+ return -ENOMEM;
+
+ return 0;
+}
+#endif
+
+static int saa7164_thread_function(void *data)
+{
+ struct saa7164_dev *dev = data;
+ struct tmFwInfoStruct fwinfo;
+ u64 last_poll_time = 0;
+
+ dprintk(DBGLVL_THR, "thread started\n");
+
+ set_freezable();
+
+ while (1) {
+ msleep_interruptible(100);
+ if (kthread_should_stop())
+ break;
+ try_to_freeze();
+
+ dprintk(DBGLVL_THR, "thread running\n");
+
+ /* Dump the firmware debug message to console */
+ /* Polling this costs us 1-2% of the arm CPU */
+ /* convert this into a respnde to interrupt 0x7a */
+ saa7164_api_collect_debug(dev);
+
+ /* Monitor CPU load every 1 second */
+ if ((last_poll_time + 1000 /* ms */) < jiffies_to_msecs(jiffies)) {
+ saa7164_api_get_load_info(dev, &fwinfo);
+ last_poll_time = jiffies_to_msecs(jiffies);
+ }
+
+ }
+
+ dprintk(DBGLVL_THR, "thread exiting\n");
+ return 0;
+}
+
static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
@@ -622,7 +1329,6 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
saa7164_gpio_setup(dev);
saa7164_card_setup(dev);
-
/* Parse the dynamic device configuration, find various
* media endpoints (MPEG, WMV, PS, TS) and cache their
* configuration details into the driver, so we can
@@ -633,7 +1339,7 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
/* Begin to create the video sub-systems and register funcs */
if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB) {
- if (saa7164_dvb_register(&dev->ts1) < 0) {
+ if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS1]) < 0) {
printk(KERN_ERR "%s() Failed to register "
"dvb adapters on porta\n",
__func__);
@@ -641,13 +1347,50 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
}
if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB) {
- if (saa7164_dvb_register(&dev->ts2) < 0) {
+ if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS2]) < 0) {
printk(KERN_ERR"%s() Failed to register "
"dvb adapters on portb\n",
__func__);
}
}
+ if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER) {
+ if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC1]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "mpeg encoder\n", __func__);
+ }
+ }
+
+ if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER) {
+ if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC2]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "mpeg encoder\n", __func__);
+ }
+ }
+
+ if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI) {
+ if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI1]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "vbi device\n", __func__);
+ }
+ }
+
+ if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI) {
+ if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI2]) < 0) {
+ printk(KERN_ERR"%s() Failed to register "
+ "vbi device\n", __func__);
+ }
+ }
+ saa7164_api_set_debug(dev, fw_debug);
+
+ if (fw_debug) {
+ dev->kthread = kthread_run(saa7164_thread_function, dev,
+ "saa7164 debug");
+ if (!dev->kthread)
+ printk(KERN_ERR "%s() Failed to create "
+ "debug kernel thread\n", __func__);
+ }
+
} /* != BOARD_UNKNOWN */
else
printk(KERN_ERR "%s() Unsupported board detected, "
@@ -675,13 +1418,49 @@ static void __devexit saa7164_finidev(struct pci_dev *pci_dev)
{
struct saa7164_dev *dev = pci_get_drvdata(pci_dev);
+ if (dev->board != SAA7164_BOARD_UNKNOWN) {
+ if (fw_debug && dev->kthread) {
+ kthread_stop(dev->kthread);
+ dev->kthread = NULL;
+ }
+ if (dev->firmwareloaded)
+ saa7164_api_set_debug(dev, 0x00);
+ }
+
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].irq_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].svc_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].irq_svc_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].read_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1],
+ &dev->ports[SAA7164_PORT_ENC1].poll_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_VBI1],
+ &dev->ports[SAA7164_PORT_VBI1].read_interval);
+ saa7164_histogram_print(&dev->ports[SAA7164_PORT_VBI2],
+ &dev->ports[SAA7164_PORT_VBI2].poll_interval);
+
saa7164_shutdown(dev);
if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB)
- saa7164_dvb_unregister(&dev->ts1);
+ saa7164_dvb_unregister(&dev->ports[SAA7164_PORT_TS1]);
if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB)
- saa7164_dvb_unregister(&dev->ts2);
+ saa7164_dvb_unregister(&dev->ports[SAA7164_PORT_TS2]);
+
+ if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER)
+ saa7164_encoder_unregister(&dev->ports[SAA7164_PORT_ENC1]);
+
+ if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER)
+ saa7164_encoder_unregister(&dev->ports[SAA7164_PORT_ENC2]);
+
+ if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI)
+ saa7164_vbi_unregister(&dev->ports[SAA7164_PORT_VBI1]);
+
+ if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI)
+ saa7164_vbi_unregister(&dev->ports[SAA7164_PORT_VBI2]);
saa7164_i2c_unregister(&dev->i2c_bus[0]);
saa7164_i2c_unregister(&dev->i2c_bus[1]);
@@ -727,11 +1506,18 @@ static struct pci_driver saa7164_pci_driver = {
static int __init saa7164_init(void)
{
printk(KERN_INFO "saa7164 driver loaded\n");
+
+#ifdef CONFIG_PROC_FS
+ saa7164_proc_create();
+#endif
return pci_register_driver(&saa7164_pci_driver);
}
static void __exit saa7164_fini(void)
{
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("saa7164", NULL);
+#endif
pci_unregister_driver(&saa7164_pci_driver);
}
diff --git a/drivers/media/video/saa7164/saa7164-dvb.c b/drivers/media/video/saa7164/saa7164-dvb.c
index cf099c59b38e..b305a01b3bde 100644
--- a/drivers/media/video/saa7164/saa7164-dvb.c
+++ b/drivers/media/video/saa7164/saa7164-dvb.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,7 +82,7 @@ static struct s5h1411_config hauppauge_s5h1411_config = {
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
-static int saa7164_dvb_stop_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_stop_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret;
@@ -100,7 +100,7 @@ static int saa7164_dvb_stop_tsport(struct saa7164_tsport *port)
return ret;
}
-static int saa7164_dvb_acquire_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_acquire_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret;
@@ -118,7 +118,7 @@ static int saa7164_dvb_acquire_tsport(struct saa7164_tsport *port)
return ret;
}
-static int saa7164_dvb_pause_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_pause_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret;
@@ -140,90 +140,38 @@ static int saa7164_dvb_pause_tsport(struct saa7164_tsport *port)
* the part through AVStream / KS Windows stages, forwards or backwards.
* States are: stopped, acquired (h/w), paused, started.
*/
-static int saa7164_dvb_stop_streaming(struct saa7164_tsport *port)
+static int saa7164_dvb_stop_streaming(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
- int ret;
-
- dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
-
- ret = saa7164_dvb_pause_tsport(port);
- ret = saa7164_dvb_acquire_tsport(port);
- ret = saa7164_dvb_stop_tsport(port);
-
- return ret;
-}
-
-static int saa7164_dvb_cfg_tsport(struct saa7164_tsport *port)
-{
- tmHWStreamParameters_t *params = &port->hw_streamingparams;
- struct saa7164_dev *dev = port->dev;
struct saa7164_buffer *buf;
- struct list_head *c, *n;
- int i = 0;
+ struct list_head *p, *q;
+ int ret;
dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
- saa7164_writel(port->pitch, params->pitch);
- saa7164_writel(port->bufsize, params->pitch * params->numberoflines);
+ ret = saa7164_dvb_pause_port(port);
+ ret = saa7164_dvb_acquire_port(port);
+ ret = saa7164_dvb_stop_port(port);
- dprintk(DBGLVL_DVB, " configured:\n");
- dprintk(DBGLVL_DVB, " lmmio 0x%p\n", dev->lmmio);
- dprintk(DBGLVL_DVB, " bufcounter 0x%x = 0x%x\n", port->bufcounter,
- saa7164_readl(port->bufcounter));
-
- dprintk(DBGLVL_DVB, " pitch 0x%x = %d\n", port->pitch,
- saa7164_readl(port->pitch));
-
- dprintk(DBGLVL_DVB, " bufsize 0x%x = %d\n", port->bufsize,
- saa7164_readl(port->bufsize));
-
- dprintk(DBGLVL_DVB, " buffercount = %d\n", port->hwcfg.buffercount);
- dprintk(DBGLVL_DVB, " bufoffset = 0x%x\n", port->bufoffset);
- dprintk(DBGLVL_DVB, " bufptr32h = 0x%x\n", port->bufptr32h);
- dprintk(DBGLVL_DVB, " bufptr32l = 0x%x\n", port->bufptr32l);
-
- /* Poke the buffers and offsets into PCI space */
+ /* Mark the hardware buffers as free */
mutex_lock(&port->dmaqueue_lock);
- list_for_each_safe(c, n, &port->dmaqueue.list) {
- buf = list_entry(c, struct saa7164_buffer, list);
-
- /* TODO: Review this in light of 32v64 assignments */
- saa7164_writel(port->bufoffset + (sizeof(u32) * i), 0);
- saa7164_writel(port->bufptr32h + ((sizeof(u32) * 2) * i),
- buf->pt_dma);
- saa7164_writel(port->bufptr32l + ((sizeof(u32) * 2) * i), 0);
-
- dprintk(DBGLVL_DVB,
- " buf[%d] offset 0x%llx (0x%x) "
- "buf 0x%llx/%llx (0x%x/%x)\n",
- i,
- (u64)port->bufoffset + (i * sizeof(u32)),
- saa7164_readl(port->bufoffset + (sizeof(u32) * i)),
- (u64)port->bufptr32h + ((sizeof(u32) * 2) * i),
- (u64)port->bufptr32l + ((sizeof(u32) * 2) * i),
- saa7164_readl(port->bufptr32h + ((sizeof(u32) * i)
- * 2)),
- saa7164_readl(port->bufptr32l + ((sizeof(u32) * i)
- * 2)));
-
- if (i++ > port->hwcfg.buffercount)
- BUG();
-
+ list_for_each_safe(p, q, &port->dmaqueue.list) {
+ buf = list_entry(p, struct saa7164_buffer, list);
+ buf->flags = SAA7164_BUFFER_FREE;
}
mutex_unlock(&port->dmaqueue_lock);
- return 0;
+ return ret;
}
-static int saa7164_dvb_start_tsport(struct saa7164_tsport *port)
+static int saa7164_dvb_start_port(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret = 0, result;
dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
- saa7164_dvb_cfg_tsport(port);
+ saa7164_buffer_cfg_port(port);
/* Acquire the hardware */
result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
@@ -284,7 +232,7 @@ out:
static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct saa7164_tsport *port = (struct saa7164_tsport *) demux->priv;
+ struct saa7164_port *port = (struct saa7164_port *) demux->priv;
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
int ret = 0;
@@ -298,7 +246,7 @@ static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
mutex_lock(&dvb->lock);
if (dvb->feeding++ == 0) {
/* Start transport */
- ret = saa7164_dvb_start_tsport(port);
+ ret = saa7164_dvb_start_port(port);
}
mutex_unlock(&dvb->lock);
dprintk(DBGLVL_DVB, "%s(port=%d) now feeding = %d\n",
@@ -311,7 +259,7 @@ static int saa7164_dvb_start_feed(struct dvb_demux_feed *feed)
static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
- struct saa7164_tsport *port = (struct saa7164_tsport *) demux->priv;
+ struct saa7164_port *port = (struct saa7164_port *) demux->priv;
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
int ret = 0;
@@ -332,7 +280,7 @@ static int saa7164_dvb_stop_feed(struct dvb_demux_feed *feed)
return ret;
}
-static int dvb_register(struct saa7164_tsport *port)
+static int dvb_register(struct saa7164_port *port)
{
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
@@ -341,6 +289,9 @@ static int dvb_register(struct saa7164_tsport *port)
dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
+ if (port->type != SAA7164_MPEG_DVB)
+ BUG();
+
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
result = -ENOMEM;
@@ -378,7 +329,6 @@ static int dvb_register(struct saa7164_tsport *port)
DRIVER_NAME, result);
goto fail_adapter;
}
- buf->nr = i;
mutex_lock(&port->dmaqueue_lock);
list_add_tail(&buf->list, &port->dmaqueue.list);
@@ -473,7 +423,7 @@ fail_adapter:
return result;
}
-int saa7164_dvb_unregister(struct saa7164_tsport *port)
+int saa7164_dvb_unregister(struct saa7164_port *port)
{
struct saa7164_dvb *dvb = &port->dvb;
struct saa7164_dev *dev = port->dev;
@@ -482,12 +432,15 @@ int saa7164_dvb_unregister(struct saa7164_tsport *port)
dprintk(DBGLVL_DVB, "%s()\n", __func__);
+ if (port->type != SAA7164_MPEG_DVB)
+ BUG();
+
/* Remove any allocated buffers */
mutex_lock(&port->dmaqueue_lock);
list_for_each_safe(c, n, &port->dmaqueue.list) {
b = list_entry(c, struct saa7164_buffer, list);
list_del(c);
- saa7164_buffer_dealloc(port, b);
+ saa7164_buffer_dealloc(b);
}
mutex_unlock(&port->dmaqueue_lock);
@@ -508,7 +461,7 @@ int saa7164_dvb_unregister(struct saa7164_tsport *port)
/* All the DVB attach calls go here, this function get's modified
* for each new card.
*/
-int saa7164_dvb_register(struct saa7164_tsport *port)
+int saa7164_dvb_register(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct saa7164_dvb *dvb = &port->dvb;
@@ -588,8 +541,6 @@ int saa7164_dvb_register(struct saa7164_tsport *port)
return -1;
}
- /* Put the analog decoder in standby to keep it quiet */
-
/* register everything */
ret = dvb_register(port);
if (ret < 0) {
diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
new file mode 100644
index 000000000000..cbb53d0ee979
--- /dev/null
+++ b/drivers/media/video/saa7164/saa7164-encoder.c
@@ -0,0 +1,1503 @@
+/*
+ * Driver for the NXP SAA7164 PCIe bridge
+ *
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "saa7164.h"
+
+#define ENCODER_MAX_BITRATE 6500000
+#define ENCODER_MIN_BITRATE 1000000
+#define ENCODER_DEF_BITRATE 5000000
+
+static struct saa7164_tvnorm saa7164_tvnorms[] = {
+ {
+ .name = "NTSC-M",
+ .id = V4L2_STD_NTSC_M,
+ }, {
+ .name = "NTSC-JP",
+ .id = V4L2_STD_NTSC_M_JP,
+ }
+};
+
+static const u32 saa7164_v4l2_ctrls[] = {
+ V4L2_CID_BRIGHTNESS,
+ V4L2_CID_CONTRAST,
+ V4L2_CID_SATURATION,
+ V4L2_CID_HUE,
+ V4L2_CID_AUDIO_VOLUME,
+ V4L2_CID_SHARPNESS,
+ V4L2_CID_MPEG_STREAM_TYPE,
+ V4L2_CID_MPEG_VIDEO_ASPECT,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ V4L2_CID_MPEG_AUDIO_MUTE,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ 0
+};
+
+/* Take the encoder configuration form the port struct and
+ * flush it to the hardware.
+ */
+static void saa7164_encoder_configure(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ port->encoder_params.width = port->width;
+ port->encoder_params.height = port->height;
+ port->encoder_params.is_50hz =
+ (port->encodernorm.id & V4L2_STD_625_50) != 0;
+
+ /* Set up the DIF (enable it) for analog mode by default */
+ saa7164_api_initialize_dif(port);
+
+ /* Configure the correct video standard */
+ saa7164_api_configure_dif(port, port->encodernorm.id);
+
+ /* Ensure the audio decoder is correct configured */
+ saa7164_api_set_audio_std(port);
+}
+
+static int saa7164_encoder_buffers_dealloc(struct saa7164_port *port)
+{
+ struct list_head *c, *n, *p, *q, *l, *v;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+
+ /* Remove any allocated buffers */
+ mutex_lock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) dmaqueue\n", __func__, port->nr);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ list_del(c);
+ saa7164_buffer_dealloc(buf);
+ }
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) used\n", __func__, port->nr);
+ list_for_each_safe(p, q, &port->list_buf_used.list) {
+ ubuf = list_entry(p, struct saa7164_user_buffer, list);
+ list_del(p);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) free\n", __func__, port->nr);
+ list_for_each_safe(l, v, &port->list_buf_free.list) {
+ ubuf = list_entry(l, struct saa7164_user_buffer, list);
+ list_del(l);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+ dprintk(DBGLVL_ENC, "%s(port=%d) done\n", __func__, port->nr);
+
+ return 0;
+}
+
+/* Dynamic buffer switch at encoder start time */
+static int saa7164_encoder_buffers_alloc(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
+ int result = -ENODEV, i;
+ int len = 0;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) {
+ dprintk(DBGLVL_ENC, "%s() type=V4L2_MPEG_STREAM_TYPE_MPEG2_PS\n", __func__);
+ params->samplesperline = 128;
+ params->numberoflines = 256;
+ params->pitch = 128;
+ params->numpagetables = 2 +
+ ((SAA7164_PS_NUMBER_OF_LINES * 128) / PAGE_SIZE);
+ } else
+ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_TS) {
+ dprintk(DBGLVL_ENC, "%s() type=V4L2_MPEG_STREAM_TYPE_MPEG2_TS\n", __func__);
+ params->samplesperline = 188;
+ params->numberoflines = 312;
+ params->pitch = 188;
+ params->numpagetables = 2 +
+ ((SAA7164_TS_NUMBER_OF_LINES * 188) / PAGE_SIZE);
+ } else
+ BUG();
+
+ /* Init and establish defaults */
+ params->bitspersample = 8;
+ params->linethreshold = 0;
+ params->pagetablelistvirt = 0;
+ params->pagetablelistphys = 0;
+ params->numpagetableentries = port->hwcfg.buffercount;
+
+ /* Allocate the PCI resources, buffers (hard) */
+ for (i = 0; i < port->hwcfg.buffercount; i++) {
+ buf = saa7164_buffer_alloc(port,
+ params->numberoflines *
+ params->pitch);
+
+ if (!buf) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), unable to allocate buffer\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ } else {
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&buf->list, &port->dmaqueue.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ }
+ }
+
+ /* Allocate some kenrel kernel buffers for copying
+ * to userpsace.
+ */
+ len = params->numberoflines * params->pitch;
+
+ if (encoder_buffers < 16)
+ encoder_buffers = 16;
+ if (encoder_buffers > 512)
+ encoder_buffers = 512;
+
+ for (i = 0; i < encoder_buffers; i++) {
+
+ ubuf = saa7164_buffer_alloc_user(dev, len);
+ if (ubuf) {
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+ }
+
+ }
+
+ result = 0;
+
+failed:
+ return result;
+}
+
+static int saa7164_encoder_initialize(struct saa7164_port *port)
+{
+ saa7164_encoder_configure(port);
+ return 0;
+}
+
+/* -- V4L2 --------------------------------------------------------- */
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ unsigned int i;
+
+ dprintk(DBGLVL_ENC, "%s(id=0x%x)\n", __func__, (u32)*id);
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_tvnorms); i++) {
+ if (*id & saa7164_tvnorms[i].id)
+ break;
+ }
+ if (i == ARRAY_SIZE(saa7164_tvnorms))
+ return -EINVAL;
+
+ port->encodernorm = saa7164_tvnorms[i];
+
+ /* Update the audio decoder while is not running in
+ * auto detect mode.
+ */
+ saa7164_api_set_audio_std(port);
+
+ dprintk(DBGLVL_ENC, "%s(id=0x%x) OK\n", __func__, (u32)*id);
+
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ int n;
+
+ char *inputs[] = { "tuner", "composite", "svideo", "aux",
+ "composite 2", "svideo 2", "aux 2" };
+
+ if (i->index >= 7)
+ return -EINVAL;
+
+ strcpy(i->name, inputs[i->index]);
+
+ if (i->index == 0)
+ i->type = V4L2_INPUT_TYPE_TUNER;
+ else
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ for (n = 0; n < ARRAY_SIZE(saa7164_tvnorms); n++)
+ i->std |= saa7164_tvnorms[n].id;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (saa7164_api_get_videomux(port) != SAA_OK)
+ return -EIO;
+
+ *i = (port->mux_input - 1);
+
+ dprintk(DBGLVL_ENC, "%s() input=%d\n", __func__, *i);
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s() input=%d\n", __func__, i);
+
+ if (i >= 7)
+ return -EINVAL;
+
+ port->mux_input = i + 1;
+
+ if (saa7164_api_set_videomux(port) != SAA_OK)
+ return -EIO;
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (0 != t->index)
+ return -EINVAL;
+
+ strcpy(t->name, "tuner");
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
+
+ dprintk(DBGLVL_ENC, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
+
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ /* Update the A/V core */
+ return 0;
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = port->freq;
+
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_port *tsport;
+ struct dvb_frontend *fe;
+
+ /* TODO: Pull this for the std */
+ struct analog_parameters params = {
+ .mode = V4L2_TUNER_ANALOG_TV,
+ .audmode = V4L2_TUNER_MODE_STEREO,
+ .std = port->encodernorm.id,
+ .frequency = f->frequency
+ };
+
+ /* Stop the encoder */
+ dprintk(DBGLVL_ENC, "%s() frequency=%d tuner=%d\n", __func__,
+ f->frequency, f->tuner);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+
+ if (f->type != V4L2_TUNER_ANALOG_TV)
+ return -EINVAL;
+
+ port->freq = f->frequency;
+
+ /* Update the hardware */
+ if (port->nr == SAA7164_PORT_ENC1)
+ tsport = &dev->ports[SAA7164_PORT_TS1];
+ else
+ if (port->nr == SAA7164_PORT_ENC2)
+ tsport = &dev->ports[SAA7164_PORT_TS2];
+ else
+ BUG();
+
+ fe = tsport->dvb.frontend;
+
+ if (fe && fe->ops.tuner_ops.set_analog_params)
+ fe->ops.tuner_ops.set_analog_params(fe, &params);
+ else
+ printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+
+ saa7164_encoder_initialize(port);
+
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctl->value = port->ctl_brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctl->value = port->ctl_contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctl->value = port->ctl_saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctl->value = port->ctl_hue;
+ break;
+ case V4L2_CID_SHARPNESS:
+ ctl->value = port->ctl_sharpness;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ ctl->value = port->ctl_volume;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+
+ dprintk(DBGLVL_ENC, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_brightness = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_BRIGHTNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_CONTRAST:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_contrast = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SATURATION:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_saturation = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_SATURATION_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_HUE:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_hue = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SHARPNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_sharpness = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ if ((ctl->value >= -83) && (ctl->value <= 24)) {
+ port->ctl_volume = ctl->value;
+ saa7164_api_set_audio_volume(port, port->ctl_volume);
+ } else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int saa7164_get_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_encoder_params *params = &port->encoder_params;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctrl->value = params->bitrate;
+ break;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ ctrl->value = params->stream_type;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ ctrl->value = params->ctl_mute;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ ctrl->value = params->ctl_aspect;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ ctrl->value = params->bitrate_mode;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ ctrl->value = params->refdist;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ ctrl->value = params->bitrate_peak;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctrl->value = params->gop_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_get_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
+{
+ int ret = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ if ((ctrl->value >= ENCODER_MIN_BITRATE) &&
+ (ctrl->value <= ENCODER_MAX_BITRATE))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ if ((ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) ||
+ (ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_TS))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 1))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ if ((ctrl->value >= V4L2_MPEG_VIDEO_ASPECT_1x1) &&
+ (ctrl->value <= V4L2_MPEG_VIDEO_ASPECT_221x100))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 255))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ if ((ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) ||
+ (ctrl->value == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ if ((ctrl->value >= 1) &&
+ (ctrl->value <= 3))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ if ((ctrl->value >= ENCODER_MIN_BITRATE) &&
+ (ctrl->value <= ENCODER_MAX_BITRATE))
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vidioc_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_set_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_encoder_params *params = &port->encoder_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ params->bitrate = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ params->stream_type = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ params->ctl_mute = ctrl->value;
+ ret = saa7164_api_audio_mute(port, params->ctl_mute);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ params->ctl_aspect = ctrl->value;
+ ret = saa7164_api_set_aspect_ratio(port);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ params->bitrate_mode = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ params->refdist = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ params->bitrate_peak = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ params->gop_size = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* TODO: Update the hardware */
+
+ return ret;
+}
+
+static int vidioc_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ err = saa7164_set_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ strcpy(cap->driver, dev->name);
+ strlcpy(cap->card, saa7164_boards[dev->board].name,
+ sizeof(cap->card));
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+
+ cap->capabilities =
+ V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE |
+ 0;
+
+ cap->capabilities |= V4L2_CAP_TUNER;
+ cap->version = 0;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ strlcpy(f->description, "MPEG", sizeof(f->description));
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ f->fmt.pix.width = port->width;
+ f->fmt.pix.height = port->height;
+
+ dprintk(DBGLVL_ENC, "VIDIOC_G_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ dprintk(DBGLVL_ENC, "VIDIOC_TRY_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+
+ dprintk(DBGLVL_ENC, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
+
+ return 0;
+}
+
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ return 0;
+}
+
+static int fill_queryctrl(struct saa7164_encoder_params *params,
+ struct v4l2_queryctrl *c)
+{
+ switch (c->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 127);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 66);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 62);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 128);
+ case V4L2_CID_SHARPNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x0f, 1, 8);
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x01, 1, 0);
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(c, -83, 24, 1, 20);
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ return v4l2_ctrl_query_fill(c,
+ ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
+ 100000, ENCODER_DEF_BITRATE);
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_TS,
+ 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_VIDEO_ASPECT_1x1,
+ V4L2_MPEG_VIDEO_ASPECT_221x100,
+ 1, V4L2_MPEG_VIDEO_ASPECT_4x3);
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ return v4l2_ctrl_query_fill(c, 1, 255, 1, 15);
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ 1, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ return v4l2_ctrl_query_fill(c,
+ 1, 3, 1, 1);
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ return v4l2_ctrl_query_fill(c,
+ ENCODER_MIN_BITRATE, ENCODER_MAX_BITRATE,
+ 100000, ENCODER_DEF_BITRATE);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *c)
+{
+ struct saa7164_encoder_fh *fh = priv;
+ struct saa7164_port *port = fh->port;
+ int i, next;
+ u32 id = c->id;
+
+ memset(c, 0, sizeof(*c));
+
+ next = !!(id & V4L2_CTRL_FLAG_NEXT_CTRL);
+ c->id = id & ~V4L2_CTRL_FLAG_NEXT_CTRL;
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_v4l2_ctrls); i++) {
+ if (next) {
+ if (c->id < saa7164_v4l2_ctrls[i])
+ c->id = saa7164_v4l2_ctrls[i];
+ else
+ continue;
+ }
+
+ if (c->id == saa7164_v4l2_ctrls[i])
+ return fill_queryctrl(&port->encoder_params, c);
+
+ if (c->id < saa7164_v4l2_ctrls[i])
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_encoder_stop_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() stop transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_ENC, "%s() Stopped\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_encoder_acquire_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_ENC, "%s() Acquired\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_encoder_pause_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_ENC, "%s() Paused\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* Firmware is very windows centric, meaning you have to transition
+ * the part through AVStream / KS Windows stages, forwards or backwards.
+ * States are: stopped, acquired (h/w), paused, started.
+ * We have to leave here will all of the soft buffers on the free list,
+ * else the cfg_post() func won't have soft buffers to correctly configure.
+ */
+static int saa7164_encoder_stop_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct list_head *c, *n;
+ int ret;
+
+ dprintk(DBGLVL_ENC, "%s(port=%d)\n", __func__, port->nr);
+
+ ret = saa7164_encoder_pause_port(port);
+ ret = saa7164_encoder_acquire_port(port);
+ ret = saa7164_encoder_stop_port(port);
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) Hardware stopped\n", __func__,
+ port->nr);
+
+ /* Reset the state of any allocated buffer resources */
+ mutex_lock(&port->dmaqueue_lock);
+
+ /* Reset the hard and soft buffer state */
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ buf->flags = SAA7164_BUFFER_FREE;
+ buf->pos = 0;
+ }
+
+ list_for_each_safe(c, n, &port->list_buf_used.list) {
+ ubuf = list_entry(c, struct saa7164_user_buffer, list);
+ ubuf->pos = 0;
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Free any allocated resources */
+ saa7164_encoder_buffers_dealloc(port);
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) Released\n", __func__, port->nr);
+
+ return ret;
+}
+
+static int saa7164_encoder_start_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result, ret = 0;
+
+ dprintk(DBGLVL_ENC, "%s(port=%d)\n", __func__, port->nr);
+
+ port->done_first_interrupt = 0;
+
+ /* allocate all of the PCIe DMA buffer resources on the fly,
+ * allowing switching between TS and PS payloads without
+ * requiring a complete driver reload.
+ */
+ saa7164_encoder_buffers_alloc(port);
+
+ /* Configure the encoder with any cache values */
+ saa7164_api_set_encoder(port);
+ saa7164_api_get_encoder(port);
+
+ /* Place the empty buffers on the hardware */
+ saa7164_buffer_cfg_port(port);
+
+ /* Acquire the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, res = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_ENC, "%s() Acquired\n", __func__);
+
+ /* Pause the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, res = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_ENC, "%s() Paused\n", __func__);
+
+ /* Start the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_RUN);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run transition failed, result = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ } else
+ dprintk(DBGLVL_ENC, "%s() Running\n", __func__);
+
+out:
+ return ret;
+}
+
+static int fops_open(struct file *file)
+{
+ struct saa7164_dev *dev;
+ struct saa7164_port *port;
+ struct saa7164_encoder_fh *fh;
+
+ port = (struct saa7164_port *)video_get_drvdata(video_devdata(file));
+ if (!port)
+ return -ENODEV;
+
+ dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh)
+ return -ENOMEM;
+
+ file->private_data = fh;
+ fh->port = port;
+
+ return 0;
+}
+
+static int fops_release(struct file *file)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ /* Shut device down on last close */
+ if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
+ if (atomic_dec_return(&port->v4l_reader_count) == 0) {
+ /* stop mpeg capture then cancel buffers */
+ saa7164_encoder_stop_streaming(port);
+ }
+ }
+
+ file->private_data = NULL;
+ kfree(fh);
+
+ return 0;
+}
+
+struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
+{
+ struct saa7164_user_buffer *ubuf = 0;
+ struct saa7164_dev *dev = port->dev;
+ u32 crc;
+
+ mutex_lock(&port->dmaqueue_lock);
+ if (!list_empty(&port->list_buf_used.list)) {
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (crc_checking) {
+ crc = crc32(0, ubuf->data, ubuf->actual_size);
+ if (crc != ubuf->crc) {
+ printk(KERN_ERR "%s() ubuf %p crc became invalid, was 0x%x became 0x%x\n", __func__,
+ ubuf, ubuf->crc, crc);
+ }
+ }
+
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_ENC, "%s() returns %p\n", __func__, ubuf);
+
+ return ubuf;
+}
+
+static ssize_t fops_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf = NULL;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+ int rem, cnt;
+ u8 *p;
+
+ port->last_read_msecs_diff = port->last_read_msecs;
+ port->last_read_msecs = jiffies_to_msecs(jiffies);
+ port->last_read_msecs_diff = port->last_read_msecs -
+ port->last_read_msecs_diff;
+
+ saa7164_histogram_update(&port->read_interval,
+ port->last_read_msecs_diff);
+
+ if (*pos) {
+ printk(KERN_ERR "%s() ESPIPE\n", __func__);
+ return -ESPIPE;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+
+ if (saa7164_encoder_initialize(port) < 0) {
+ printk(KERN_ERR "%s() EINVAL\n", __func__);
+ return -EINVAL;
+ }
+
+ saa7164_encoder_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_enc_next_buf(port))) {
+ printk(KERN_ERR "%s() ERESTARTSYS\n", __func__);
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = saa7164_enc_next_buf(port);
+
+ while ((count > 0) && ubuf) {
+
+ /* set remaining bytes to copy */
+ rem = ubuf->actual_size - ubuf->pos;
+ cnt = rem > count ? count : rem;
+
+ p = ubuf->data + ubuf->pos;
+
+ dprintk(DBGLVL_ENC,
+ "%s() count=%d cnt=%d rem=%d buf=%p buf->pos=%d\n",
+ __func__, (int)count, cnt, rem, ubuf, ubuf->pos);
+
+ if (copy_to_user(buffer, p, cnt)) {
+ printk(KERN_ERR "%s() copy_to_user failed\n", __func__);
+ if (!ret) {
+ printk(KERN_ERR "%s() EFAULT\n", __func__);
+ ret = -EFAULT;
+ }
+ goto err;
+ }
+
+ ubuf->pos += cnt;
+ count -= cnt;
+ buffer += cnt;
+ ret += cnt;
+
+ if (ubuf->pos > ubuf->actual_size) {
+ printk(KERN_ERR "read() pos > actual, huh?\n");
+ }
+
+ if (ubuf->pos == ubuf->actual_size) {
+
+ /* finished with current buffer, take next buffer */
+
+ /* Requeue the buffer on the free list */
+ ubuf->pos = 0;
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Dequeue next */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_enc_next_buf(port))) {
+ break;
+ }
+ }
+ ubuf = saa7164_enc_next_buf(port);
+ }
+ }
+err:
+ if (!ret && !ubuf) {
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+static unsigned int fops_poll(struct file *file, poll_table *wait)
+{
+ struct saa7164_encoder_fh *fh = (struct saa7164_encoder_fh *)file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf;
+ unsigned int mask = 0;
+
+ port->last_poll_msecs_diff = port->last_poll_msecs;
+ port->last_poll_msecs = jiffies_to_msecs(jiffies);
+ port->last_poll_msecs_diff = port->last_poll_msecs -
+ port->last_poll_msecs_diff;
+
+ saa7164_histogram_update(&port->poll_interval,
+ port->last_poll_msecs_diff);
+
+ if (!video_is_registered(port->v4l_device)) {
+ return -EIO;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+ if (saa7164_encoder_initialize(port) < 0)
+ return -EINVAL;
+ saa7164_encoder_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_enc_next_buf(port))) {
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (ubuf)
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static const struct v4l2_file_operations mpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_open,
+ .release = fops_release,
+ .read = fops_read,
+ .poll = fops_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+int saa7164_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ return 0;
+}
+
+int saa7164_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return 0;
+}
+
+int saa7164_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_chip_ident = saa7164_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = saa7164_g_register,
+ .vidioc_s_register = saa7164_s_register,
+#endif
+};
+
+static struct video_device saa7164_mpeg_template = {
+ .name = "saa7164",
+ .fops = &mpeg_fops,
+ .ioctl_ops = &mpeg_ioctl_ops,
+ .minor = -1,
+ .tvnorms = SAA7164_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static struct video_device *saa7164_encoder_alloc(
+ struct saa7164_port *port,
+ struct pci_dev *pci,
+ struct video_device *template,
+ char *type)
+{
+ struct video_device *vfd;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ vfd = video_device_alloc();
+ if (NULL == vfd)
+ return NULL;
+
+ *vfd = *template;
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
+ type, saa7164_boards[dev->board].name);
+
+ vfd->parent = &pci->dev;
+ vfd->release = video_device_release;
+ return vfd;
+}
+
+int saa7164_encoder_register(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result = -ENODEV;
+
+ dprintk(DBGLVL_ENC, "%s()\n", __func__);
+
+ if (port->type != SAA7164_MPEG_ENCODER)
+ BUG();
+
+ /* Sanity check that the PCI configuration space is active */
+ if (port->hwcfg.BARLocation == 0) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), NO PCI configuration\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ /* Establish encoder defaults here */
+ /* Set default TV standard */
+ port->encodernorm = saa7164_tvnorms[0];
+ port->width = 720;
+ port->mux_input = 1; /* Composite */
+ port->video_format = EU_VIDEO_FORMAT_MPEG_2;
+ port->audio_format = 0;
+ port->video_resolution = 0;
+ port->ctl_brightness = 127;
+ port->ctl_contrast = 66;
+ port->ctl_hue = 128;
+ port->ctl_saturation = 62;
+ port->ctl_sharpness = 8;
+ port->encoder_params.bitrate = ENCODER_DEF_BITRATE;
+ port->encoder_params.bitrate_peak = ENCODER_DEF_BITRATE;
+ port->encoder_params.bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+ port->encoder_params.stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS;
+ port->encoder_params.ctl_mute = 0;
+ port->encoder_params.ctl_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3;
+ port->encoder_params.refdist = 1;
+ port->encoder_params.gop_size = SAA7164_ENCODER_DEFAULT_GOP_SIZE;
+
+ if (port->encodernorm.id & V4L2_STD_525_60)
+ port->height = 480;
+ else
+ port->height = 576;
+
+ /* Allocate and register the video device node */
+ port->v4l_device = saa7164_encoder_alloc(port,
+ dev->pci, &saa7164_mpeg_template, "mpeg");
+
+ if (port->v4l_device == NULL) {
+ printk(KERN_INFO "%s: can't allocate mpeg device\n",
+ dev->name);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ video_set_drvdata(port->v4l_device, port);
+ result = video_register_device(port->v4l_device,
+ VFL_TYPE_GRABBER, -1);
+ if (result < 0) {
+ printk(KERN_INFO "%s: can't register mpeg device\n",
+ dev->name);
+ /* TODO: We're going to leak here if we don't dealloc
+ The buffers above. The unreg function can't deal wit it.
+ */
+ goto failed;
+ }
+
+ printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
+ dev->name, port->v4l_device->num);
+
+ /* Configure the hardware defaults */
+ saa7164_api_set_videomux(port);
+ saa7164_api_set_usercontrol(port, PU_BRIGHTNESS_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_SATURATION_CONTROL);
+ saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
+ saa7164_api_audio_mute(port, 0);
+ saa7164_api_set_audio_volume(port, 20);
+ saa7164_api_set_aspect_ratio(port);
+
+ /* Disable audio standard detection, it's buggy */
+ saa7164_api_set_audio_detection(port, 0);
+
+ saa7164_api_set_encoder(port);
+ saa7164_api_get_encoder(port);
+
+ result = 0;
+failed:
+ return result;
+}
+
+void saa7164_encoder_unregister(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_ENC, "%s(port=%d)\n", __func__, port->nr);
+
+ if (port->type != SAA7164_MPEG_ENCODER)
+ BUG();
+
+ if (port->v4l_device) {
+ if (port->v4l_device->minor != -1)
+ video_unregister_device(port->v4l_device);
+ else
+ video_device_release(port->v4l_device);
+
+ port->v4l_device = NULL;
+ }
+
+ dprintk(DBGLVL_ENC, "%s(port=%d) done\n", __func__, port->nr);
+}
+
diff --git a/drivers/media/video/saa7164/saa7164-fw.c b/drivers/media/video/saa7164/saa7164-fw.c
index 270245d275ab..484533c32bb1 100644
--- a/drivers/media/video/saa7164/saa7164-fw.c
+++ b/drivers/media/video/saa7164/saa7164-fw.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,11 +24,11 @@
#include "saa7164.h"
-#define SAA7164_REV2_FIRMWARE "v4l-saa7164-1.0.2.fw"
-#define SAA7164_REV2_FIRMWARE_SIZE 3978608
+#define SAA7164_REV2_FIRMWARE "NXP7164-2010-03-10.1.fw"
+#define SAA7164_REV2_FIRMWARE_SIZE 4019072
-#define SAA7164_REV3_FIRMWARE "v4l-saa7164-1.0.3.fw"
-#define SAA7164_REV3_FIRMWARE_SIZE 3978608
+#define SAA7164_REV3_FIRMWARE "NXP7164-2010-03-10.1.fw"
+#define SAA7164_REV3_FIRMWARE_SIZE 4019072
struct fw_header {
u32 firmwaresize;
@@ -604,6 +604,7 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
}
}
+ dev->firmwareloaded = 1;
ret = 0;
out:
diff --git a/drivers/media/video/saa7164/saa7164-i2c.c b/drivers/media/video/saa7164/saa7164-i2c.c
index e1ae9b01bf0f..b5167d33650a 100644
--- a/drivers/media/video/saa7164/saa7164-i2c.c
+++ b/drivers/media/video/saa7164/saa7164-i2c.c
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/saa7164/saa7164-reg.h b/drivers/media/video/saa7164/saa7164-reg.h
index 06be4c13d5b1..2bbf81583d33 100644
--- a/drivers/media/video/saa7164/saa7164-reg.h
+++ b/drivers/media/video/saa7164/saa7164-reg.h
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,6 +60,7 @@
#define GET_STRING_CONTROL 0x03
#define GET_LANGUAGE_CONTROL 0x05
#define SET_POWER_CONTROL 0x07
+#define GET_FW_STATUS_CONTROL 0x08
#define GET_FW_VERSION_CONTROL 0x09
#define SET_DEBUG_LEVEL_CONTROL 0x0B
#define GET_DEBUG_DATA_CONTROL 0x0C
@@ -156,11 +157,63 @@
#define EXU_INTERRUPT_CONTROL 0x03
/* State Transition and args */
+#define SAA_PROBE_CONTROL 0x01
+#define SAA_COMMIT_CONTROL 0x02
#define SAA_STATE_CONTROL 0x03
#define SAA_DMASTATE_STOP 0x00
#define SAA_DMASTATE_ACQUIRE 0x01
#define SAA_DMASTATE_PAUSE 0x02
#define SAA_DMASTATE_RUN 0x03
-/* Hardware registers */
-
+/* A/V Mux Input Selector */
+#define SU_INPUT_SELECT_CONTROL 0x01
+
+/* Encoder Profiles */
+#define EU_PROFILE_PS_DVD 0x06
+#define EU_PROFILE_TS_HQ 0x09
+#define EU_VIDEO_FORMAT_MPEG_2 0x02
+
+/* Tuner */
+#define TU_AUDIO_MODE_CONTROL 0x17
+
+/* Video Formats */
+#define TU_STANDARD_CONTROL 0x00
+#define TU_STANDARD_AUTO_CONTROL 0x01
+#define TU_STANDARD_NONE 0x00
+#define TU_STANDARD_NTSC_M 0x01
+#define TU_STANDARD_PAL_I 0x08
+#define TU_STANDARD_MANUAL 0x00
+#define TU_STANDARD_AUTO 0x01
+
+/* Video Controls */
+#define PU_BRIGHTNESS_CONTROL 0x02
+#define PU_CONTRAST_CONTROL 0x03
+#define PU_HUE_CONTROL 0x06
+#define PU_SATURATION_CONTROL 0x07
+#define PU_SHARPNESS_CONTROL 0x08
+
+/* Audio Controls */
+#define MUTE_CONTROL 0x01
+#define VOLUME_CONTROL 0x02
+#define AUDIO_DEFAULT_CONTROL 0x0D
+
+/* Default Volume Levels */
+#define TMHW_LEV_ADJ_DECLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_MONOLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_NICLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_SAPLEV_DEFAULT 0x00
+#define TMHW_LEV_ADJ_ADCLEV_DEFAULT 0x00
+
+/* Encoder Related Commands */
+#define EU_PROFILE_CONTROL 0x00
+#define EU_VIDEO_FORMAT_CONTROL 0x01
+#define EU_VIDEO_BIT_RATE_CONTROL 0x02
+#define EU_VIDEO_RESOLUTION_CONTROL 0x03
+#define EU_VIDEO_GOP_STRUCTURE_CONTROL 0x04
+#define EU_VIDEO_INPUT_ASPECT_CONTROL 0x0A
+#define EU_AUDIO_FORMAT_CONTROL 0x0C
+#define EU_AUDIO_BIT_RATE_CONTROL 0x0D
+
+/* Firmware Debugging */
+#define SET_DEBUG_LEVEL_CONTROL 0x0B
+#define GET_DEBUG_DATA_CONTROL 0x0C
diff --git a/drivers/media/video/saa7164/saa7164-types.h b/drivers/media/video/saa7164/saa7164-types.h
index 99093f23aae5..df1d2997fa6c 100644
--- a/drivers/media/video/saa7164/saa7164-types.h
+++ b/drivers/media/video/saa7164/saa7164-types.h
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,7 +24,7 @@
/* Some structues are passed directly to/from the firmware and
* have strict alignment requirements. This is one of them.
*/
-typedef struct {
+struct tmComResHWDescr {
u8 bLength;
u8 bDescriptorType;
u8 bDescriptorSubtype;
@@ -37,14 +37,14 @@ typedef struct {
u32 dwHostMemoryRegionSize;
u32 dwHostHibernatMemRegion;
u32 dwHostHibernatMemRegionSize;
-} __attribute__((packed)) tmComResHWDescr_t;
+} __attribute__((packed));
/* This is DWORD aligned on windows but I can't find the right
* gcc syntax to match the binary data from the device.
* I've manually padded with Reserved[3] bytes to match the hardware,
* but this could break if GCC decies to pack in a different way.
*/
-typedef struct {
+struct tmComResInterfaceDescr {
u8 bLength;
u8 bDescriptorType;
u8 bDescriptorSubtype;
@@ -56,52 +56,52 @@ typedef struct {
u8 bDebugInterruptId;
u8 BARLocation;
u8 Reserved[3];
-} tmComResInterfaceDescr_t;
+};
-typedef struct {
+struct tmComResBusDescr {
u64 CommandRing;
u64 ResponseRing;
u32 CommandWrite;
u32 CommandRead;
u32 ResponseWrite;
u32 ResponseRead;
-} tmComResBusDescr_t;
+};
-typedef enum {
+enum tmBusType {
NONE = 0,
TYPE_BUS_PCI = 1,
TYPE_BUS_PCIe = 2,
TYPE_BUS_USB = 3,
TYPE_BUS_I2C = 4
-} tmBusType_t;
+};
-typedef struct {
- tmBusType_t Type;
+struct tmComResBusInfo {
+ enum tmBusType Type;
u16 m_wMaxReqSize;
u8 *m_pdwSetRing;
u32 m_dwSizeSetRing;
u8 *m_pdwGetRing;
u32 m_dwSizeGetRing;
- u32 *m_pdwSetWritePos;
- u32 *m_pdwSetReadPos;
- u32 *m_pdwGetWritePos;
- u32 *m_pdwGetReadPos;
+ u32 m_dwSetWritePos;
+ u32 m_dwSetReadPos;
+ u32 m_dwGetWritePos;
+ u32 m_dwGetReadPos;
/* All access is protected */
struct mutex lock;
-} tmComResBusInfo_t;
+};
-typedef struct {
+struct tmComResInfo {
u8 id;
u8 flags;
u16 size;
u32 command;
u16 controlselector;
u8 seqno;
-} __attribute__((packed)) tmComResInfo_t;
+} __attribute__((packed));
-typedef enum {
+enum tmComResCmd {
SET_CUR = 0x01,
GET_CUR = 0x81,
GET_MIN = 0x82,
@@ -110,7 +110,7 @@ typedef enum {
GET_LEN = 0x85,
GET_INFO = 0x86,
GET_DEF = 0x87
-} tmComResCmd_t;
+};
struct cmd {
u8 seqno;
@@ -121,20 +121,20 @@ struct cmd {
wait_queue_head_t wait;
};
-typedef struct {
+struct tmDescriptor {
u32 pathid;
u32 size;
void *descriptor;
-} tmDescriptor_t;
+};
-typedef struct {
+struct tmComResDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
-} __attribute__((packed)) tmComResDescrHeader_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResExtDevDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -144,22 +144,22 @@ typedef struct {
u32 numgpiopins;
u8 numgpiogroups;
u8 controlsize;
-} __attribute__((packed)) tmComResExtDevDescrHeader_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResGPIO {
u32 pin;
u8 state;
-} __attribute__((packed)) tmComResGPIO_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResPathDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 pathid;
-} __attribute__((packed)) tmComResPathDescrHeader_t;
+} __attribute__((packed));
/* terminaltype */
-typedef enum {
+enum tmComResTermType {
ITT_ANTENNA = 0x0203,
LINE_CONNECTOR = 0x0603,
SPDIF_CONNECTOR = 0x0605,
@@ -167,9 +167,9 @@ typedef enum {
SVIDEO_CONNECTOR = 0x0402,
COMPONENT_CONNECTOR = 0x0403,
STANDARD_DMA = 0xF101
-} tmComResTermType_t;
+};
-typedef struct {
+struct tmComResAntTermDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -178,9 +178,9 @@ typedef struct {
u8 assocterminal;
u8 iterminal;
u8 controlsize;
-} __attribute__((packed)) tmComResAntTermDescrHeader_t;
+} __attribute__((packed));
-typedef struct {
+struct tmComResTunerDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -190,9 +190,9 @@ typedef struct {
u32 tuningstandards;
u8 controlsize;
u32 controls;
-} __attribute__((packed)) tmComResTunerDescrHeader_t;
+} __attribute__((packed));
-typedef enum {
+enum tmBufferFlag {
/* the buffer does not contain any valid data */
TM_BUFFER_FLAG_EMPTY,
@@ -201,23 +201,23 @@ typedef enum {
/* the buffer is the dummy buffer - TODO??? */
TM_BUFFER_FLAG_DUMMY_BUFFER
-} tmBufferFlag_t;
+};
-typedef struct {
+struct tmBuffer {
u64 *pagetablevirt;
u64 pagetablephys;
u16 offset;
u8 *context;
u64 timestamp;
- tmBufferFlag_t BufferFlag_t;
+ enum tmBufferFlag BufferFlag;
u32 lostbuffers;
u32 validbuffers;
u64 *dummypagevirt;
u64 dummypagephys;
u64 *addressvirt;
-} tmBuffer_t;
+};
-typedef struct {
+struct tmHWStreamParameters {
u32 bitspersample;
u32 samplesperline;
u32 numberoflines;
@@ -227,15 +227,15 @@ typedef struct {
u64 *pagetablelistphys;
u32 numpagetables;
u32 numpagetableentries;
-} tmHWStreamParameters_t;
+};
-typedef struct {
- tmHWStreamParameters_t HWStreamParameters_t;
+struct tmStreamParameters {
+ struct tmHWStreamParameters HWStreamParameters;
u64 qwDummyPageTablePhys;
u64 *pDummyPageTableVirt;
-} tmStreamParameters_t;
+};
-typedef struct {
+struct tmComResDMATermDescrHeader {
u8 len;
u8 type;
u8 subtyle;
@@ -251,7 +251,7 @@ typedef struct {
u8 metadatasize;
u8 numformats;
u8 controlsize;
-} __attribute__((packed)) tmComResDMATermDescrHeader_t;
+} __attribute__((packed));
/*
*
@@ -274,7 +274,7 @@ typedef struct {
* Data is to be ignored by the application.
*
*/
-typedef struct {
+struct tmComResTSFormatDescrHeader {
u8 len;
u8 type;
u8 subtype;
@@ -283,5 +283,160 @@ typedef struct {
u8 bPacketLength;
u8 bStrideLength;
u8 guidStrideFormat[16];
-} __attribute__((packed)) tmComResTSFormatDescrHeader_t;
+} __attribute__((packed));
+
+/* Encoder related structures */
+
+/* A/V Mux Selector */
+struct tmComResSelDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 nrinpins;
+ u8 sourceid;
+} __attribute__((packed));
+
+/* A/V Audio processor definitions */
+struct tmComResProcDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 sourceid;
+ u16 wreserved;
+ u8 controlsize;
+} __attribute__((packed));
+
+/* Video bitrate control message */
+#define EU_VIDEO_BIT_RATE_MODE_CONSTANT (0)
+#define EU_VIDEO_BIT_RATE_MODE_VARIABLE_AVERAGE (1)
+#define EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK (2)
+struct tmComResEncVideoBitRate {
+ u8 ucVideoBitRateMode;
+ u32 dwVideoBitRate;
+ u32 dwVideoBitRatePeak;
+} __attribute__((packed));
+
+/* Video Encoder Aspect Ratio message */
+struct tmComResEncVideoInputAspectRatio {
+ u8 width;
+ u8 height;
+} __attribute__((packed));
+
+/* Video Encoder GOP IBP message */
+/* 1. IPPPPPPPPPPPPPP */
+/* 2. IBPBPBPBPBPBPBP */
+/* 3. IBBPBBPBBPBBP */
+#define SAA7164_ENCODER_DEFAULT_GOP_DIST (1)
+#define SAA7164_ENCODER_DEFAULT_GOP_SIZE (15)
+struct tmComResEncVideoGopStructure {
+ u8 ucGOPSize; /* GOP Size 12, 15 */
+ u8 ucRefFrameDist; /* Reference Frame Distance */
+} __attribute__((packed));
+
+/* Encoder processor definition */
+struct tmComResEncoderDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 vsourceid;
+ u8 asourceid;
+ u8 iunit;
+ u32 dwmControlCap;
+ u32 dwmProfileCap;
+ u32 dwmVidFormatCap;
+ u8 bmVidBitrateCap;
+ u16 wmVidResolutionsCap;
+ u16 wmVidFrmRateCap;
+ u32 dwmAudFormatCap;
+ u8 bmAudBitrateCap;
+} __attribute__((packed));
+
+/* Audio processor definition */
+struct tmComResAFeatureDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 unitid;
+ u8 sourceid;
+ u8 controlsize;
+} __attribute__((packed));
+
+/* Audio control messages */
+struct tmComResAudioDefaults {
+ u8 ucDecoderLevel;
+ u8 ucDecoderFM_Level;
+ u8 ucMonoLevel;
+ u8 ucNICAM_Level;
+ u8 ucSAP_Level;
+ u8 ucADC_Level;
+} __attribute__((packed));
+
+/* Audio bitrate control message */
+struct tmComResEncAudioBitRate {
+ u8 ucAudioBitRateMode;
+ u32 dwAudioBitRate;
+ u32 dwAudioBitRatePeak;
+} __attribute__((packed));
+
+/* Tuner / AV Decoder messages */
+struct tmComResTunerStandard {
+ u8 std;
+ u32 country;
+} __attribute__((packed));
+
+struct tmComResTunerStandardAuto {
+ u8 mode;
+} __attribute__((packed));
+
+/* EEPROM definition for PS stream types */
+struct tmComResPSFormatDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype;
+ u8 bFormatIndex;
+ u16 wPacketLength;
+ u16 wPackLength;
+ u8 bPackDataType;
+} __attribute__((packed));
+
+/* VBI control structure */
+struct tmComResVBIFormatDescrHeader {
+ u8 len;
+ u8 type;
+ u8 subtype; /* VS_FORMAT_VBI */
+ u8 bFormatIndex;
+ u32 VideoStandard; /* See KS_AnalogVideoStandard, NTSC = 1 */
+ u8 StartLine; /* NTSC Start = 10 */
+ u8 EndLine; /* NTSC = 21 */
+ u8 FieldRate; /* 60 for NTSC */
+ u8 bNumLines; /* Unsed - scheduled for removal */
+} __attribute__((packed));
+
+struct tmComResProbeCommit {
+ u16 bmHint;
+ u8 bFormatIndex;
+ u8 bFrameIndex;
+} __attribute__((packed));
+
+struct tmComResDebugSetLevel {
+ u32 dwDebugLevel;
+} __attribute__((packed));
+
+struct tmComResDebugGetData {
+ u32 dwResult;
+ u8 ucDebugData[256];
+} __attribute__((packed));
+struct tmFwInfoStruct {
+ u32 status;
+ u32 mode;
+ u32 devicespec;
+ u32 deviceinst;
+ u32 CPULoad;
+ u32 RemainHeap;
+ u32 CPUClock;
+ u32 RAMSpeed;
+} __attribute__((packed));
diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
new file mode 100644
index 000000000000..323c7cdca37b
--- /dev/null
+++ b/drivers/media/video/saa7164/saa7164-vbi.c
@@ -0,0 +1,1375 @@
+/*
+ * Driver for the NXP SAA7164 PCIe bridge
+ *
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "saa7164.h"
+
+static struct saa7164_tvnorm saa7164_tvnorms[] = {
+ {
+ .name = "NTSC-M",
+ .id = V4L2_STD_NTSC_M,
+ }, {
+ .name = "NTSC-JP",
+ .id = V4L2_STD_NTSC_M_JP,
+ }
+};
+
+static const u32 saa7164_v4l2_ctrls[] = {
+ 0
+};
+
+/* Take the encoder configuration from the port struct and
+ * flush it to the hardware.
+ */
+static void saa7164_vbi_configure(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ port->vbi_params.width = port->width;
+ port->vbi_params.height = port->height;
+ port->vbi_params.is_50hz =
+ (port->encodernorm.id & V4L2_STD_625_50) != 0;
+
+ /* Set up the DIF (enable it) for analog mode by default */
+ saa7164_api_initialize_dif(port);
+
+// /* Configure the correct video standard */
+// saa7164_api_configure_dif(port, port->encodernorm.id);
+
+// /* Ensure the audio decoder is correct configured */
+// saa7164_api_set_audio_std(port);
+ dprintk(DBGLVL_VBI, "%s() ends\n", __func__);
+}
+
+static int saa7164_vbi_buffers_dealloc(struct saa7164_port *port)
+{
+ struct list_head *c, *n, *p, *q, *l, *v;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+
+ /* Remove any allocated buffers */
+ mutex_lock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) dmaqueue\n", __func__, port->nr);
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ list_del(c);
+ saa7164_buffer_dealloc(buf);
+ }
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) used\n", __func__, port->nr);
+ list_for_each_safe(p, q, &port->list_buf_used.list) {
+ ubuf = list_entry(p, struct saa7164_user_buffer, list);
+ list_del(p);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) free\n", __func__, port->nr);
+ list_for_each_safe(l, v, &port->list_buf_free.list) {
+ ubuf = list_entry(l, struct saa7164_user_buffer, list);
+ list_del(l);
+ saa7164_buffer_dealloc_user(ubuf);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+ dprintk(DBGLVL_VBI, "%s(port=%d) done\n", __func__, port->nr);
+
+ return 0;
+}
+
+/* Dynamic buffer switch at vbi start time */
+static int saa7164_vbi_buffers_alloc(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct tmHWStreamParameters *params = &port->hw_streamingparams;
+ int result = -ENODEV, i;
+ int len = 0;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ /* TODO: NTSC SPECIFIC */
+ /* Init and establish defaults */
+ params->samplesperline = 1440;
+ params->numberoflines = 12;
+ params->numberoflines = 18;
+ params->pitch = 1600;
+ params->pitch = 1440;
+ params->numpagetables = 2 +
+ ((params->numberoflines * params->pitch) / PAGE_SIZE);
+ params->bitspersample = 8;
+ params->linethreshold = 0;
+ params->pagetablelistvirt = 0;
+ params->pagetablelistphys = 0;
+ params->numpagetableentries = port->hwcfg.buffercount;
+
+ /* Allocate the PCI resources, buffers (hard) */
+ for (i = 0; i < port->hwcfg.buffercount; i++) {
+ buf = saa7164_buffer_alloc(port,
+ params->numberoflines *
+ params->pitch);
+
+ if (!buf) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), unable to allocate buffer\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ } else {
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&buf->list, &port->dmaqueue.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ }
+ }
+
+ /* Allocate some kenrel kernel buffers for copying
+ * to userpsace.
+ */
+ len = params->numberoflines * params->pitch;
+
+ if (vbi_buffers < 16)
+ vbi_buffers = 16;
+ if (vbi_buffers > 512)
+ vbi_buffers = 512;
+
+ for (i = 0; i < vbi_buffers; i++) {
+
+ ubuf = saa7164_buffer_alloc_user(dev, len);
+ if (ubuf) {
+ mutex_lock(&port->dmaqueue_lock);
+ list_add_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+ }
+
+ }
+
+ result = 0;
+
+failed:
+ return result;
+}
+
+
+static int saa7164_vbi_initialize(struct saa7164_port *port)
+{
+ saa7164_vbi_configure(port);
+ return 0;
+}
+
+/* -- V4L2 --------------------------------------------------------- */
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ unsigned int i;
+
+ dprintk(DBGLVL_VBI, "%s(id=0x%x)\n", __func__, (u32)*id);
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_tvnorms); i++) {
+ if (*id & saa7164_tvnorms[i].id)
+ break;
+ }
+ if (i == ARRAY_SIZE(saa7164_tvnorms))
+ return -EINVAL;
+
+ port->encodernorm = saa7164_tvnorms[i];
+
+ /* Update the audio decoder while is not running in
+ * auto detect mode.
+ */
+ saa7164_api_set_audio_std(port);
+
+ dprintk(DBGLVL_VBI, "%s(id=0x%x) OK\n", __func__, (u32)*id);
+
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ int n;
+
+ char *inputs[] = { "tuner", "composite", "svideo", "aux",
+ "composite 2", "svideo 2", "aux 2" };
+
+ if (i->index >= 7)
+ return -EINVAL;
+
+ strcpy(i->name, inputs[i->index]);
+
+ if (i->index == 0)
+ i->type = V4L2_INPUT_TYPE_TUNER;
+ else
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ for (n = 0; n < ARRAY_SIZE(saa7164_tvnorms); n++)
+ i->std |= saa7164_tvnorms[n].id;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (saa7164_api_get_videomux(port) != SAA_OK)
+ return -EIO;
+
+ *i = (port->mux_input - 1);
+
+ dprintk(DBGLVL_VBI, "%s() input=%d\n", __func__, *i);
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s() input=%d\n", __func__, i);
+
+ if (i >= 7)
+ return -EINVAL;
+
+ port->mux_input = i + 1;
+
+ if (saa7164_api_set_videomux(port) != SAA_OK)
+ return -EIO;
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ if (0 != t->index)
+ return -EINVAL;
+
+ strcpy(t->name, "tuner");
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO;
+
+ dprintk(DBGLVL_VBI, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
+
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ /* Update the A/V core */
+ return 0;
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = port->freq;
+
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_port *tsport;
+ struct dvb_frontend *fe;
+
+ /* TODO: Pull this for the std */
+ struct analog_parameters params = {
+ .mode = V4L2_TUNER_ANALOG_TV,
+ .audmode = V4L2_TUNER_MODE_STEREO,
+ .std = port->encodernorm.id,
+ .frequency = f->frequency
+ };
+
+ /* Stop the encoder */
+ dprintk(DBGLVL_VBI, "%s() frequency=%d tuner=%d\n", __func__,
+ f->frequency, f->tuner);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+
+ if (f->type != V4L2_TUNER_ANALOG_TV)
+ return -EINVAL;
+
+ port->freq = f->frequency;
+
+ /* Update the hardware */
+ if (port->nr == SAA7164_PORT_VBI1)
+ tsport = &dev->ports[SAA7164_PORT_TS1];
+ else
+ if (port->nr == SAA7164_PORT_VBI2)
+ tsport = &dev->ports[SAA7164_PORT_TS2];
+ else
+ BUG();
+
+ fe = tsport->dvb.frontend;
+
+ if (fe && fe->ops.tuner_ops.set_analog_params)
+ fe->ops.tuner_ops.set_analog_params(fe, &params);
+ else
+ printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+
+ saa7164_vbi_initialize(port);
+
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctl->value = port->ctl_brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctl->value = port->ctl_contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctl->value = port->ctl_saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctl->value = port->ctl_hue;
+ break;
+ case V4L2_CID_SHARPNESS:
+ ctl->value = port->ctl_sharpness;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ ctl->value = port->ctl_volume;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+
+ dprintk(DBGLVL_VBI, "%s(id=%d, value=%d)\n", __func__,
+ ctl->id, ctl->value);
+
+ switch (ctl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_brightness = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_BRIGHTNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_CONTRAST:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_contrast = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_CONTRAST_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SATURATION:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_saturation = ctl->value;
+ saa7164_api_set_usercontrol(port,
+ PU_SATURATION_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_HUE:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_hue = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_HUE_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_SHARPNESS:
+ if ((ctl->value >= 0) && (ctl->value <= 255)) {
+ port->ctl_sharpness = ctl->value;
+ saa7164_api_set_usercontrol(port, PU_SHARPNESS_CONTROL);
+ } else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ if ((ctl->value >= -83) && (ctl->value <= 24)) {
+ port->ctl_volume = ctl->value;
+ saa7164_api_set_audio_volume(port, port->ctl_volume);
+ } else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int saa7164_get_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_vbi_params *params = &port->vbi_params;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ ctrl->value = params->stream_type;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ ctrl->value = params->ctl_mute;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ ctrl->value = params->ctl_aspect;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ ctrl->value = params->refdist;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctrl->value = params->gop_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_get_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
+{
+ int ret = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ if ((ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) ||
+ (ctrl->value == V4L2_MPEG_STREAM_TYPE_MPEG2_TS))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 1))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ if ((ctrl->value >= V4L2_MPEG_VIDEO_ASPECT_1x1) &&
+ (ctrl->value <= V4L2_MPEG_VIDEO_ASPECT_221x100))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ if ((ctrl->value >= 0) &&
+ (ctrl->value <= 255))
+ ret = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ if ((ctrl->value >= 1) &&
+ (ctrl->value <= 3))
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vidioc_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_set_ctrl(struct saa7164_port *port,
+ struct v4l2_ext_control *ctrl)
+{
+ struct saa7164_vbi_params *params = &port->vbi_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ params->stream_type = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ params->ctl_mute = ctrl->value;
+ ret = saa7164_api_audio_mute(port, params->ctl_mute);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ params->ctl_aspect = ctrl->value;
+ ret = saa7164_api_set_aspect_ratio(port);
+ if (ret != SAA_OK) {
+ printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
+ ret);
+ ret = -EIO;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ params->refdist = ctrl->value;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ params->gop_size = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* TODO: Update the hardware */
+
+ return ret;
+}
+
+static int vidioc_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctrls)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ int i, err = 0;
+
+ if (ctrls->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+ for (i = 0; i < ctrls->count; i++) {
+ struct v4l2_ext_control *ctrl = ctrls->controls + i;
+
+ err = saa7164_try_ctrl(ctrl, 0);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ err = saa7164_set_ctrl(port, ctrl);
+ if (err) {
+ ctrls->error_idx = i;
+ break;
+ }
+ }
+ return err;
+
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ strcpy(cap->driver, dev->name);
+ strlcpy(cap->card, saa7164_boards[dev->board].name,
+ sizeof(cap->card));
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+
+ cap->capabilities =
+ V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_READWRITE |
+ 0;
+
+ cap->capabilities |= V4L2_CAP_TUNER;
+ cap->version = 0;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ strlcpy(f->description, "VBI", sizeof(f->description));
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ f->fmt.pix.width = port->width;
+ f->fmt.pix.height = port->height;
+
+ dprintk(DBGLVL_VBI, "VIDIOC_G_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+ dprintk(DBGLVL_VBI, "VIDIOC_TRY_FMT: w: %d, h: %d\n",
+ port->width, port->height);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ port->ts_packet_size * port->ts_packet_count;
+ f->fmt.pix.colorspace = 0;
+
+ dprintk(DBGLVL_VBI, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
+
+ return 0;
+}
+
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ return 0;
+}
+
+static int fill_queryctrl(struct saa7164_vbi_params *params,
+ struct v4l2_queryctrl *c)
+{
+ switch (c->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 127);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 66);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 62);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0xff, 1, 128);
+ case V4L2_CID_SHARPNESS:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x0f, 1, 8);
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ return v4l2_ctrl_query_fill(c, 0x0, 0x01, 1, 0);
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(c, -83, 24, 1, 20);
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
+ V4L2_MPEG_STREAM_TYPE_MPEG2_TS,
+ 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return v4l2_ctrl_query_fill(c,
+ V4L2_MPEG_VIDEO_ASPECT_1x1,
+ V4L2_MPEG_VIDEO_ASPECT_221x100,
+ 1, V4L2_MPEG_VIDEO_ASPECT_4x3);
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ return v4l2_ctrl_query_fill(c, 1, 255, 1, 15);
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ return v4l2_ctrl_query_fill(c,
+ 1, 3, 1, 1);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *c)
+{
+ struct saa7164_vbi_fh *fh = priv;
+ struct saa7164_port *port = fh->port;
+ int i, next;
+ u32 id = c->id;
+
+ memset(c, 0, sizeof(*c));
+
+ next = !!(id & V4L2_CTRL_FLAG_NEXT_CTRL);
+ c->id = id & ~V4L2_CTRL_FLAG_NEXT_CTRL;
+
+ for (i = 0; i < ARRAY_SIZE(saa7164_v4l2_ctrls); i++) {
+ if (next) {
+ if (c->id < saa7164_v4l2_ctrls[i])
+ c->id = saa7164_v4l2_ctrls[i];
+ else
+ continue;
+ }
+
+ if (c->id == saa7164_v4l2_ctrls[i])
+ return fill_queryctrl(&port->vbi_params, c);
+
+ if (c->id < saa7164_v4l2_ctrls[i])
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int saa7164_vbi_stop_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() stop transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_VBI, "%s() Stopped\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_vbi_acquire_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_VBI, "%s() Acquired\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int saa7164_vbi_pause_port(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int ret;
+
+ ret = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((ret != SAA_OK) && (ret != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, ret = 0x%x\n",
+ __func__, ret);
+ ret = -EIO;
+ } else {
+ dprintk(DBGLVL_VBI, "%s() Paused\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* Firmware is very windows centric, meaning you have to transition
+ * the part through AVStream / KS Windows stages, forwards or backwards.
+ * States are: stopped, acquired (h/w), paused, started.
+ * We have to leave here will all of the soft buffers on the free list,
+ * else the cfg_post() func won't have soft buffers to correctly configure.
+ */
+static int saa7164_vbi_stop_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ struct saa7164_buffer *buf;
+ struct saa7164_user_buffer *ubuf;
+ struct list_head *c, *n;
+ int ret;
+
+ dprintk(DBGLVL_VBI, "%s(port=%d)\n", __func__, port->nr);
+
+ ret = saa7164_vbi_pause_port(port);
+ ret = saa7164_vbi_acquire_port(port);
+ ret = saa7164_vbi_stop_port(port);
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) Hardware stopped\n", __func__,
+ port->nr);
+
+ /* Reset the state of any allocated buffer resources */
+ mutex_lock(&port->dmaqueue_lock);
+
+ /* Reset the hard and soft buffer state */
+ list_for_each_safe(c, n, &port->dmaqueue.list) {
+ buf = list_entry(c, struct saa7164_buffer, list);
+ buf->flags = SAA7164_BUFFER_FREE;
+ buf->pos = 0;
+ }
+
+ list_for_each_safe(c, n, &port->list_buf_used.list) {
+ ubuf = list_entry(c, struct saa7164_user_buffer, list);
+ ubuf->pos = 0;
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ }
+
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Free any allocated resources */
+ saa7164_vbi_buffers_dealloc(port);
+
+ dprintk(DBGLVL_VBI, "%s(port=%d) Released\n", __func__, port->nr);
+
+ return ret;
+}
+
+static int saa7164_vbi_start_streaming(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result, ret = 0;
+
+ dprintk(DBGLVL_VBI, "%s(port=%d)\n", __func__, port->nr);
+
+ port->done_first_interrupt = 0;
+
+ /* allocate all of the PCIe DMA buffer resources on the fly,
+ * allowing switching between TS and PS payloads without
+ * requiring a complete driver reload.
+ */
+ saa7164_vbi_buffers_alloc(port);
+
+ /* Configure the encoder with any cache values */
+// saa7164_api_set_encoder(port);
+// saa7164_api_get_encoder(port);
+
+ /* Place the empty buffers on the hardware */
+ saa7164_buffer_cfg_port(port);
+
+ /* Negotiate format */
+ if (saa7164_api_set_vbi_format(port) != SAA_OK) {
+ printk(KERN_ERR "%s() No supported VBI format\n", __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Acquire the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_ACQUIRE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() acquire transition failed, res = 0x%x\n",
+ __func__, result);
+
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_VBI, "%s() Acquired\n", __func__);
+
+ /* Pause the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_PAUSE);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause transition failed, res = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_vbi_stop_port(port);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() pause/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ goto out;
+ } else
+ dprintk(DBGLVL_VBI, "%s() Paused\n", __func__);
+
+ /* Start the hardware */
+ result = saa7164_api_transition_port(port, SAA_DMASTATE_RUN);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run transition failed, result = 0x%x\n",
+ __func__, result);
+
+ /* Stop the hardware, regardless */
+ result = saa7164_vbi_acquire_port(port);
+ result = saa7164_vbi_stop_port(port);
+ if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ printk(KERN_ERR "%s() run/forced stop transition "
+ "failed, res = 0x%x\n", __func__, result);
+ }
+
+ ret = -EIO;
+ } else
+ dprintk(DBGLVL_VBI, "%s() Running\n", __func__);
+
+out:
+ return ret;
+}
+
+int saa7164_vbi_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ /* ntsc */
+ f->fmt.vbi.samples_per_line = 1600;
+ f->fmt.vbi.samples_per_line = 1440;
+ f->fmt.vbi.sampling_rate = 27000000;
+ f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
+ f->fmt.vbi.offset = 0;
+ f->fmt.vbi.flags = 0;
+ f->fmt.vbi.start[0] = 10;
+ f->fmt.vbi.count[0] = 18;
+ f->fmt.vbi.start[1] = 263 + 10 + 1;
+ f->fmt.vbi.count[1] = 18;
+ return 0;
+}
+
+static int fops_open(struct file *file)
+{
+ struct saa7164_dev *dev;
+ struct saa7164_port *port;
+ struct saa7164_vbi_fh *fh;
+
+ port = (struct saa7164_port *)video_get_drvdata(video_devdata(file));
+ if (!port)
+ return -ENODEV;
+
+ dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (NULL == fh)
+ return -ENOMEM;
+
+ file->private_data = fh;
+ fh->port = port;
+
+ return 0;
+}
+
+static int fops_release(struct file *file)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ /* Shut device down on last close */
+ if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
+ if (atomic_dec_return(&port->v4l_reader_count) == 0) {
+ /* stop vbi capture then cancel buffers */
+ saa7164_vbi_stop_streaming(port);
+ }
+ }
+
+ file->private_data = NULL;
+ kfree(fh);
+
+ return 0;
+}
+
+struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
+{
+ struct saa7164_user_buffer *ubuf = 0;
+ struct saa7164_dev *dev = port->dev;
+ u32 crc;
+
+ mutex_lock(&port->dmaqueue_lock);
+ if (!list_empty(&port->list_buf_used.list)) {
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (crc_checking) {
+ crc = crc32(0, ubuf->data, ubuf->actual_size);
+ if (crc != ubuf->crc) {
+ printk(KERN_ERR "%s() ubuf %p crc became invalid, was 0x%x became 0x%x\n", __func__,
+ ubuf, ubuf->crc, crc);
+ }
+ }
+
+ }
+ mutex_unlock(&port->dmaqueue_lock);
+
+ dprintk(DBGLVL_VBI, "%s() returns %p\n", __func__, ubuf);
+
+ return ubuf;
+}
+
+static ssize_t fops_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf = NULL;
+ struct saa7164_dev *dev = port->dev;
+ int ret = 0;
+ int rem, cnt;
+ u8 *p;
+
+ port->last_read_msecs_diff = port->last_read_msecs;
+ port->last_read_msecs = jiffies_to_msecs(jiffies);
+ port->last_read_msecs_diff = port->last_read_msecs -
+ port->last_read_msecs_diff;
+
+ saa7164_histogram_update(&port->read_interval,
+ port->last_read_msecs_diff);
+
+ if (*pos) {
+ printk(KERN_ERR "%s() ESPIPE\n", __func__);
+ return -ESPIPE;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+
+ if (saa7164_vbi_initialize(port) < 0) {
+ printk(KERN_ERR "%s() EINVAL\n", __func__);
+ return -EINVAL;
+ }
+
+ saa7164_vbi_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_vbi_next_buf(port))) {
+ printk(KERN_ERR "%s() ERESTARTSYS\n", __func__);
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = saa7164_vbi_next_buf(port);
+
+ while ((count > 0) && ubuf) {
+
+ /* set remaining bytes to copy */
+ rem = ubuf->actual_size - ubuf->pos;
+ cnt = rem > count ? count : rem;
+
+ p = ubuf->data + ubuf->pos;
+
+ dprintk(DBGLVL_VBI,
+ "%s() count=%d cnt=%d rem=%d buf=%p buf->pos=%d\n",
+ __func__, (int)count, cnt, rem, ubuf, ubuf->pos);
+
+ if (copy_to_user(buffer, p, cnt)) {
+ printk(KERN_ERR "%s() copy_to_user failed\n", __func__);
+ if (!ret) {
+ printk(KERN_ERR "%s() EFAULT\n", __func__);
+ ret = -EFAULT;
+ }
+ goto err;
+ }
+
+ ubuf->pos += cnt;
+ count -= cnt;
+ buffer += cnt;
+ ret += cnt;
+
+ if (ubuf->pos > ubuf->actual_size) {
+ printk(KERN_ERR "read() pos > actual, huh?\n");
+ }
+
+ if (ubuf->pos == ubuf->actual_size) {
+
+ /* finished with current buffer, take next buffer */
+
+ /* Requeue the buffer on the free list */
+ ubuf->pos = 0;
+
+ mutex_lock(&port->dmaqueue_lock);
+ list_move_tail(&ubuf->list, &port->list_buf_free.list);
+ mutex_unlock(&port->dmaqueue_lock);
+
+ /* Dequeue next */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_vbi_next_buf(port))) {
+ break;
+ }
+ }
+ ubuf = saa7164_vbi_next_buf(port);
+ }
+ }
+err:
+ if (!ret && !ubuf) {
+ printk(KERN_ERR "%s() EAGAIN\n", __func__);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+static unsigned int fops_poll(struct file *file, poll_table *wait)
+{
+ struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
+ struct saa7164_port *port = fh->port;
+ struct saa7164_user_buffer *ubuf;
+ unsigned int mask = 0;
+
+ port->last_poll_msecs_diff = port->last_poll_msecs;
+ port->last_poll_msecs = jiffies_to_msecs(jiffies);
+ port->last_poll_msecs_diff = port->last_poll_msecs -
+ port->last_poll_msecs_diff;
+
+ saa7164_histogram_update(&port->poll_interval,
+ port->last_poll_msecs_diff);
+
+ if (!video_is_registered(port->v4l_device)) {
+ return -EIO;
+ }
+
+ if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
+ if (atomic_inc_return(&port->v4l_reader_count) == 1) {
+ if (saa7164_vbi_initialize(port) < 0)
+ return -EINVAL;
+ saa7164_vbi_start_streaming(port);
+ msleep(200);
+ }
+ }
+
+ /* blocking wait for buffer */
+ if ((file->f_flags & O_NONBLOCK) == 0) {
+ if (wait_event_interruptible(port->wait_read,
+ saa7164_vbi_next_buf(port))) {
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* Pull the first buffer from the used list */
+ ubuf = list_first_entry(&port->list_buf_used.list,
+ struct saa7164_user_buffer, list);
+
+ if (ubuf)
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+static const struct v4l2_file_operations vbi_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_open,
+ .release = fops_release,
+ .read = fops_read,
+ .poll = fops_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops vbi_ioctl_ops = {
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_queryctrl = vidioc_queryctrl,
+// .vidioc_g_chip_ident = saa7164_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+// .vidioc_g_register = saa7164_g_register,
+// .vidioc_s_register = saa7164_s_register,
+#endif
+ .vidioc_g_fmt_vbi_cap = saa7164_vbi_fmt,
+ .vidioc_try_fmt_vbi_cap = saa7164_vbi_fmt,
+ .vidioc_s_fmt_vbi_cap = saa7164_vbi_fmt,
+};
+
+static struct video_device saa7164_vbi_template = {
+ .name = "saa7164",
+ .fops = &vbi_fops,
+ .ioctl_ops = &vbi_ioctl_ops,
+ .minor = -1,
+ .tvnorms = SAA7164_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static struct video_device *saa7164_vbi_alloc(
+ struct saa7164_port *port,
+ struct pci_dev *pci,
+ struct video_device *template,
+ char *type)
+{
+ struct video_device *vfd;
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ vfd = video_device_alloc();
+ if (NULL == vfd)
+ return NULL;
+
+ *vfd = *template;
+ snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
+ type, saa7164_boards[dev->board].name);
+
+ vfd->parent = &pci->dev;
+ vfd->release = video_device_release;
+ return vfd;
+}
+
+int saa7164_vbi_register(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+ int result = -ENODEV;
+
+ dprintk(DBGLVL_VBI, "%s()\n", __func__);
+
+ if (port->type != SAA7164_MPEG_VBI)
+ BUG();
+
+ /* Sanity check that the PCI configuration space is active */
+ if (port->hwcfg.BARLocation == 0) {
+ printk(KERN_ERR "%s() failed "
+ "(errno = %d), NO PCI configuration\n",
+ __func__, result);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ /* Establish VBI defaults here */
+
+ /* Allocate and register the video device node */
+ port->v4l_device = saa7164_vbi_alloc(port,
+ dev->pci, &saa7164_vbi_template, "vbi");
+
+ if (port->v4l_device == NULL) {
+ printk(KERN_INFO "%s: can't allocate vbi device\n",
+ dev->name);
+ result = -ENOMEM;
+ goto failed;
+ }
+
+ video_set_drvdata(port->v4l_device, port);
+ result = video_register_device(port->v4l_device,
+ VFL_TYPE_VBI, -1);
+ if (result < 0) {
+ printk(KERN_INFO "%s: can't register vbi device\n",
+ dev->name);
+ /* TODO: We're going to leak here if we don't dealloc
+ The buffers above. The unreg function can't deal wit it.
+ */
+ goto failed;
+ }
+
+ printk(KERN_INFO "%s: registered device vbi%d [vbi]\n",
+ dev->name, port->v4l_device->num);
+
+ /* Configure the hardware defaults */
+
+ result = 0;
+failed:
+ return result;
+}
+
+void saa7164_vbi_unregister(struct saa7164_port *port)
+{
+ struct saa7164_dev *dev = port->dev;
+
+ dprintk(DBGLVL_VBI, "%s(port=%d)\n", __func__, port->nr);
+
+ if (port->type != SAA7164_MPEG_VBI)
+ BUG();
+
+ if (port->v4l_device) {
+ if (port->v4l_device->minor != -1)
+ video_unregister_device(port->v4l_device);
+ else
+ video_device_release(port->v4l_device);
+
+ port->v4l_device = NULL;
+ }
+
+}
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 42660b546f0e..1d9c5cbbbc52 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -1,7 +1,7 @@
/*
* Driver for the NXP SAA7164 PCIe bridge
*
- * Copyright (c) 2009 Steven Toth <stoth@kernellabs.com>
+ * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -48,18 +48,29 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/kdev_t.h>
+#include <linux/version.h>
+#include <linux/mutex.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <media/tuner.h>
#include <media/tveeprom.h>
#include <media/videobuf-dma-sg.h>
#include <media/videobuf-dvb.h>
+#include <linux/smp_lock.h>
+#include <dvb_demux.h>
+#include <dvb_frontend.h>
+#include <dvb_net.h>
+#include <dvbdev.h>
+#include <dmxdev.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
#include "saa7164-reg.h"
#include "saa7164-types.h"
-#include <linux/version.h>
-#include <linux/mutex.h>
-
#define SAA7164_MAXBOARDS 8
#define UNSET (-1U)
@@ -76,7 +87,19 @@
#define SAA7164_MAX_UNITS 8
#define SAA7164_TS_NUMBER_OF_LINES 312
+#define SAA7164_PS_NUMBER_OF_LINES 256
#define SAA7164_PT_ENTRIES 16 /* (312 * 188) / 4096 */
+#define SAA7164_MAX_ENCODER_BUFFERS 64 /* max 5secs of latency at 6Mbps */
+#define SAA7164_MAX_VBI_BUFFERS 64
+
+/* Port related defines */
+#define SAA7164_PORT_TS1 (0)
+#define SAA7164_PORT_TS2 (SAA7164_PORT_TS1 + 1)
+#define SAA7164_PORT_ENC1 (SAA7164_PORT_TS2 + 1)
+#define SAA7164_PORT_ENC2 (SAA7164_PORT_ENC1 + 1)
+#define SAA7164_PORT_VBI1 (SAA7164_PORT_ENC2 + 1)
+#define SAA7164_PORT_VBI2 (SAA7164_PORT_VBI1 + 1)
+#define SAA7164_MAX_PORTS (SAA7164_PORT_VBI2 + 1)
#define DBGLVL_FW 4
#define DBGLVL_DVB 8
@@ -86,10 +109,18 @@
#define DBGLVL_BUS 128
#define DBGLVL_IRQ 256
#define DBGLVL_BUF 512
+#define DBGLVL_ENC 1024
+#define DBGLVL_VBI 2048
+#define DBGLVL_THR 4096
+#define DBGLVL_CPU 8192
+
+#define SAA7164_NORMS (V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_443)
enum port_t {
SAA7164_MPEG_UNDEFINED = 0,
SAA7164_MPEG_DVB,
+ SAA7164_MPEG_ENCODER,
+ SAA7164_MPEG_VBI,
};
enum saa7164_i2c_bus_nr {
@@ -134,7 +165,8 @@ struct saa7164_unit {
struct saa7164_board {
char *name;
- enum port_t porta, portb;
+ enum port_t porta, portb, portc,
+ portd, porte, portf;
enum {
SAA7164_CHIP_UNDEFINED = 0,
SAA7164_CHIP_REV2,
@@ -149,6 +181,42 @@ struct saa7164_subid {
u32 card;
};
+struct saa7164_encoder_fh {
+ struct saa7164_port *port;
+// u32 freq;
+// u32 tuner_type;
+ atomic_t v4l_reading;
+};
+
+struct saa7164_vbi_fh {
+ struct saa7164_port *port;
+// u32 freq;
+// u32 tuner_type;
+ atomic_t v4l_reading;
+};
+
+struct saa7164_histogram_bucket {
+ u32 val;
+ u32 count;
+ u64 update_time;
+};
+
+struct saa7164_histogram {
+ char name[32];
+ struct saa7164_histogram_bucket counter1[64];
+};
+
+struct saa7164_user_buffer {
+ struct list_head list;
+
+ /* Attributes */
+ u8 *data;
+ u32 pos;
+ u32 actual_size;
+
+ u32 crc;
+};
+
struct saa7164_fw_status {
/* RISC Core details */
@@ -191,14 +259,60 @@ struct saa7164_i2c {
u32 i2c_rc;
};
-struct saa7164_tsport;
+struct saa7164_ctrl {
+ struct v4l2_queryctrl v;
+};
+
+struct saa7164_tvnorm {
+ char *name;
+ v4l2_std_id id;
+// u32 cxiformat;
+// u32 cxoformat;
+};
+
+struct saa7164_encoder_params {
+ struct saa7164_tvnorm encodernorm;
+ u32 height;
+ u32 width;
+ u32 is_50hz;
+ u32 bitrate; /* bps */
+ u32 bitrate_peak; /* bps */
+ u32 bitrate_mode;
+ u32 stream_type; /* V4L2_MPEG_STREAM_TYPE_MPEG2_TS */
+
+ u32 audio_sampling_freq;
+ u32 ctl_mute;
+ u32 ctl_aspect;
+ u32 refdist;
+ u32 gop_size;
+};
+
+struct saa7164_vbi_params {
+ struct saa7164_tvnorm encodernorm;
+ u32 height;
+ u32 width;
+ u32 is_50hz;
+ u32 bitrate; /* bps */
+ u32 bitrate_peak; /* bps */
+ u32 bitrate_mode;
+ u32 stream_type; /* V4L2_MPEG_STREAM_TYPE_MPEG2_TS */
+
+ u32 audio_sampling_freq;
+ u32 ctl_mute;
+ u32 ctl_aspect;
+ u32 refdist;
+ u32 gop_size;
+};
+
+struct saa7164_port;
struct saa7164_buffer {
struct list_head list;
- u32 nr;
+ /* Note of which h/w buffer list index position we occupy */
+ int idx;
- struct saa7164_tsport *port;
+ struct saa7164_port *port;
/* Hardware Specific */
/* PCI Memory allocations */
@@ -206,28 +320,33 @@ struct saa7164_buffer {
/* A block of page align PCI memory */
u32 pci_size; /* PCI allocation size in bytes */
- u64 *cpu; /* Virtual address */
+ u64 __iomem *cpu; /* Virtual address */
dma_addr_t dma; /* Physical address */
+ u32 crc; /* Checksum for the entire buffer data */
/* A page table that splits the block into a number of entries */
u32 pt_size; /* PCI allocation size in bytes */
- u64 *pt_cpu; /* Virtual address */
+ u64 __iomem *pt_cpu; /* Virtual address */
dma_addr_t pt_dma; /* Physical address */
+
+ /* Encoder fops */
+ u32 pos;
+ u32 actual_size;
};
-struct saa7164_tsport {
+struct saa7164_port {
struct saa7164_dev *dev;
- int nr;
enum port_t type;
+ int nr;
- struct saa7164_dvb dvb;
+ /* --- Generic port attributes --- */
- /* HW related stream parameters */
- tmHWStreamParameters_t hw_streamingparams;
+ /* HW stream parameters */
+ struct tmHWStreamParameters hw_streamingparams;
/* DMA configuration values, is seeded during initialization */
- tmComResDMATermDescrHeader_t hwcfg;
+ struct tmComResDMATermDescrHeader hwcfg;
/* hardware specific registers */
u32 bufcounter;
@@ -239,11 +358,76 @@ struct saa7164_tsport {
u64 bufptr64;
u32 numpte; /* Number of entries in array, only valid in head */
+
struct mutex dmaqueue_lock;
- struct mutex dummy_dmaqueue_lock;
struct saa7164_buffer dmaqueue;
- struct saa7164_buffer dummy_dmaqueue;
+ u64 last_irq_msecs, last_svc_msecs;
+ u64 last_irq_msecs_diff, last_svc_msecs_diff;
+ u32 last_svc_wp;
+ u32 last_svc_rp;
+ u64 last_irq_svc_msecs_diff;
+ u64 last_read_msecs, last_read_msecs_diff;
+ u64 last_poll_msecs, last_poll_msecs_diff;
+
+ struct saa7164_histogram irq_interval;
+ struct saa7164_histogram svc_interval;
+ struct saa7164_histogram irq_svc_interval;
+ struct saa7164_histogram read_interval;
+ struct saa7164_histogram poll_interval;
+
+ /* --- DVB Transport Specific --- */
+ struct saa7164_dvb dvb;
+
+ /* --- Encoder/V4L related attributes --- */
+ /* Encoder */
+ /* Defaults established in saa7164-encoder.c */
+ struct saa7164_tvnorm encodernorm;
+ u32 height;
+ u32 width;
+ u32 freq;
+ u32 ts_packet_size;
+ u32 ts_packet_count;
+ u8 mux_input;
+ u8 encoder_profile;
+ u8 video_format;
+ u8 audio_format;
+ u8 video_resolution;
+ u16 ctl_brightness;
+ u16 ctl_contrast;
+ u16 ctl_hue;
+ u16 ctl_saturation;
+ u16 ctl_sharpness;
+ s8 ctl_volume;
+
+ struct tmComResAFeatureDescrHeader audfeat;
+ struct tmComResEncoderDescrHeader encunit;
+ struct tmComResProcDescrHeader vidproc;
+ struct tmComResExtDevDescrHeader ifunit;
+ struct tmComResTunerDescrHeader tunerunit;
+
+ struct work_struct workenc;
+
+ /* V4L Encoder Video */
+ struct saa7164_encoder_params encoder_params;
+ struct video_device *v4l_device;
+ atomic_t v4l_reader_count;
+
+ struct saa7164_buffer list_buf_used;
+ struct saa7164_buffer list_buf_free;
+ wait_queue_head_t wait_read;
+
+ /* V4L VBI */
+ struct tmComResVBIFormatDescrHeader vbi_fmt_ntsc;
+ struct saa7164_vbi_params vbi_params;
+
+ /* Debug */
+ u32 sync_errors;
+ u32 v_cc_errors;
+ u32 a_cc_errors;
+ u8 last_v_cc;
+ u8 last_a_cc;
+ u32 done_first_interrupt;
};
struct saa7164_dev {
@@ -268,12 +452,13 @@ struct saa7164_dev {
/* firmware status */
struct saa7164_fw_status fw_status;
+ u32 firmwareloaded;
- tmComResHWDescr_t hwdesc;
- tmComResInterfaceDescr_t intfdesc;
- tmComResBusDescr_t busdesc;
+ struct tmComResHWDescr hwdesc;
+ struct tmComResInterfaceDescr intfdesc;
+ struct tmComResBusDescr busdesc;
- tmComResBusInfo_t bus;
+ struct tmComResBusInfo bus;
/* Interrupt status and ack registers */
u32 int_status;
@@ -286,15 +471,22 @@ struct saa7164_dev {
struct saa7164_i2c i2c_bus[3];
/* Transport related */
- struct saa7164_tsport ts1, ts2;
+ struct saa7164_port ports[SAA7164_MAX_PORTS];
/* Deferred command/api interrupts handling */
struct work_struct workcmd;
+ /* A kernel thread to monitor the firmware log, used
+ * only in debug mode.
+ */
+ struct task_struct *kthread;
+
};
extern struct list_head saa7164_devlist;
extern unsigned int waitsecs;
+extern unsigned int encoder_buffers;
+extern unsigned int vbi_buffers;
/* ----------------------------------------------------------- */
/* saa7164-core.c */
@@ -302,6 +494,7 @@ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr);
void saa7164_dumphex16(struct saa7164_dev *dev, u8 *buf, int len);
void saa7164_getfirmwarestatus(struct saa7164_dev *dev);
u32 saa7164_getcurrentfirmwareversion(struct saa7164_dev *dev);
+void saa7164_histogram_update(struct saa7164_histogram *hg, u32 val);
/* ----------------------------------------------------------- */
/* saa7164-fw.c */
@@ -318,14 +511,14 @@ extern void saa7164_call_i2c_clients(struct saa7164_i2c *bus,
/* saa7164-bus.c */
int saa7164_bus_setup(struct saa7164_dev *dev);
void saa7164_bus_dump(struct saa7164_dev *dev);
-int saa7164_bus_set(struct saa7164_dev *dev, tmComResInfo_t* msg, void *buf);
-int saa7164_bus_get(struct saa7164_dev *dev, tmComResInfo_t* msg,
+int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg, void *buf);
+int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
void *buf, int peekonly);
/* ----------------------------------------------------------- */
/* saa7164-cmd.c */
int saa7164_cmd_send(struct saa7164_dev *dev,
- u8 id, tmComResCmd_t command, u16 controlselector,
+ u8 id, enum tmComResCmd command, u16 controlselector,
u16 size, void *buf);
void saa7164_cmd_signal(struct saa7164_dev *dev, u8 seqno);
int saa7164_irq_dequeue(struct saa7164_dev *dev);
@@ -343,7 +536,24 @@ int saa7164_api_dif_write(struct saa7164_i2c *bus, u8 addr,
int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen);
int saa7164_api_set_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin);
int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin);
-int saa7164_api_transition_port(struct saa7164_tsport *port, u8 mode);
+int saa7164_api_transition_port(struct saa7164_port *port, u8 mode);
+int saa7164_api_initialize_dif(struct saa7164_port *port);
+int saa7164_api_configure_dif(struct saa7164_port *port, u32 std);
+int saa7164_api_set_encoder(struct saa7164_port *port);
+int saa7164_api_get_encoder(struct saa7164_port *port);
+int saa7164_api_set_aspect_ratio(struct saa7164_port *port);
+int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl);
+int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl);
+int saa7164_api_set_videomux(struct saa7164_port *port);
+int saa7164_api_audio_mute(struct saa7164_port *port, int mute);
+int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level);
+int saa7164_api_set_audio_std(struct saa7164_port *port);
+int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect);
+int saa7164_api_get_videomux(struct saa7164_port *port);
+int saa7164_api_set_vbi_format(struct saa7164_port *port);
+int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level);
+int saa7164_api_collect_debug(struct saa7164_dev *dev);
+int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i);
/* ----------------------------------------------------------- */
/* saa7164-cards.c */
@@ -363,18 +573,36 @@ extern char *saa7164_unitid_name(struct saa7164_dev *dev, u8 unitid);
/* ----------------------------------------------------------- */
/* saa7164-dvb.c */
-extern int saa7164_dvb_register(struct saa7164_tsport *port);
-extern int saa7164_dvb_unregister(struct saa7164_tsport *port);
+extern int saa7164_dvb_register(struct saa7164_port *port);
+extern int saa7164_dvb_unregister(struct saa7164_port *port);
/* ----------------------------------------------------------- */
/* saa7164-buffer.c */
-extern struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_tsport *port,
- u32 len);
-extern int saa7164_buffer_dealloc(struct saa7164_tsport *port,
- struct saa7164_buffer *buf);
+extern struct saa7164_buffer *saa7164_buffer_alloc(
+ struct saa7164_port *port, u32 len);
+extern int saa7164_buffer_dealloc(struct saa7164_buffer *buf);
+extern void saa7164_buffer_display(struct saa7164_buffer *buf);
+extern int saa7164_buffer_activate(struct saa7164_buffer *buf, int i);
+extern int saa7164_buffer_cfg_port(struct saa7164_port *port);
+extern struct saa7164_user_buffer *saa7164_buffer_alloc_user(
+ struct saa7164_dev *dev, u32 len);
+extern void saa7164_buffer_dealloc_user(struct saa7164_user_buffer *buf);
+extern int saa7164_buffer_zero_offsets(struct saa7164_port *port, int i);
+
+/* ----------------------------------------------------------- */
+/* saa7164-encoder.c */
+int saa7164_encoder_register(struct saa7164_port *port);
+void saa7164_encoder_unregister(struct saa7164_port *port);
+
+/* ----------------------------------------------------------- */
+/* saa7164-vbi.c */
+int saa7164_vbi_register(struct saa7164_port *port);
+void saa7164_vbi_unregister(struct saa7164_port *port);
/* ----------------------------------------------------------- */
+extern unsigned int crc_checking;
+
extern unsigned int saa_debug;
#define dprintk(level, fmt, arg...)\
do { if (saa_debug & level)\
@@ -394,7 +622,6 @@ extern unsigned int saa_debug;
#define saa7164_readl(reg) readl(dev->lmmio + ((reg) >> 2))
#define saa7164_writel(reg, value) writel((value), dev->lmmio + ((reg) >> 2))
-
#define saa7164_readb(reg) readl(dev->bmmio + (reg))
#define saa7164_writeb(reg, value) writel((value), dev->bmmio + (reg))
diff --git a/drivers/media/video/saa717x.c b/drivers/media/video/saa717x.c
index 45f8bfc1342e..b6172c2c517e 100644
--- a/drivers/media/video/saa717x.c
+++ b/drivers/media/video/saa717x.c
@@ -39,7 +39,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver");
MODULE_AUTHOR("K. Ohta, T. Adachi, Hans Verkuil");
@@ -1366,9 +1365,25 @@ static const struct i2c_device_id saa717x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa717x_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa717x",
- .probe = saa717x_probe,
- .remove = saa717x_remove,
- .id_table = saa717x_id,
+static struct i2c_driver saa717x_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa717x",
+ },
+ .probe = saa717x_probe,
+ .remove = saa717x_remove,
+ .id_table = saa717x_id,
};
+
+static __init int init_saa717x(void)
+{
+ return i2c_add_driver(&saa717x_driver);
+}
+
+static __exit void exit_saa717x(void)
+{
+ i2c_del_driver(&saa717x_driver);
+}
+
+module_init(init_saa717x);
+module_exit(exit_saa717x);
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index 77db20392910..96f56c2f11f3 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -30,11 +30,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("Philips SAA7185 video encoder driver");
MODULE_AUTHOR("Dave Perks");
@@ -366,9 +364,25 @@ static const struct i2c_device_id saa7185_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7185_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7185",
- .probe = saa7185_probe,
- .remove = saa7185_remove,
- .id_table = saa7185_id,
+static struct i2c_driver saa7185_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7185",
+ },
+ .probe = saa7185_probe,
+ .remove = saa7185_remove,
+ .id_table = saa7185_id,
};
+
+static __init int init_saa7185(void)
+{
+ return i2c_add_driver(&saa7185_driver);
+}
+
+static __exit void exit_saa7185(void)
+{
+ i2c_del_driver(&saa7185_driver);
+}
+
+module_init(init_saa7185);
+module_exit(exit_saa7185);
diff --git a/drivers/media/video/saa7191.c b/drivers/media/video/saa7191.c
index a2513772196b..211fa25a1239 100644
--- a/drivers/media/video/saa7191.c
+++ b/drivers/media/video/saa7191.c
@@ -23,7 +23,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "saa7191.h"
@@ -647,9 +646,25 @@ static const struct i2c_device_id saa7191_id[] = {
};
MODULE_DEVICE_TABLE(i2c, saa7191_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "saa7191",
- .probe = saa7191_probe,
- .remove = saa7191_remove,
- .id_table = saa7191_id,
+static struct i2c_driver saa7191_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "saa7191",
+ },
+ .probe = saa7191_probe,
+ .remove = saa7191_remove,
+ .id_table = saa7191_id,
};
+
+static __init int init_saa7191(void)
+{
+ return i2c_add_driver(&saa7191_driver);
+}
+
+static __exit void exit_saa7191(void)
+{
+ i2c_del_driver(&saa7191_driver);
+}
+
+module_init(init_saa7191);
+module_exit(exit_saa7191);
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 2b24bd0de3ad..5c209afb0ac8 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -245,7 +245,7 @@ static void free_buffer(struct videobuf_queue *vq,
if (in_interrupt())
BUG();
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(vq, &buf->vb, 0, 0);
videobuf_dma_contig_free(vq, &buf->vb);
dev_dbg(dev, "%s freed\n", __func__);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
@@ -1726,7 +1726,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
return ret;
}
-static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
+static int sh_mobile_ceu_reqbufs(struct soc_camera_device *icd,
struct v4l2_requestbuffers *p)
{
int i;
@@ -1740,7 +1740,7 @@ static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
for (i = 0; i < p->count; i++) {
struct sh_mobile_ceu_buffer *buf;
- buf = container_of(icf->vb_vidq.bufs[i],
+ buf = container_of(icd->vb_vidq.bufs[i],
struct sh_mobile_ceu_buffer, vb);
INIT_LIST_HEAD(&buf->vb.queue);
}
@@ -1750,10 +1750,10 @@ static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
struct sh_mobile_ceu_buffer *buf;
- buf = list_entry(icf->vb_vidq.stream.next,
+ buf = list_entry(icd->vb_vidq.stream.next,
struct sh_mobile_ceu_buffer, vb.stream);
poll_wait(file, &buf->vb.done, pt);
@@ -1786,23 +1786,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
pcdev->field,
sizeof(struct sh_mobile_ceu_buffer),
- icd);
-}
-
-static int sh_mobile_ceu_get_parm(struct soc_camera_device *icd,
- struct v4l2_streamparm *parm)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-
- return v4l2_subdev_call(sd, video, g_parm, parm);
-}
-
-static int sh_mobile_ceu_set_parm(struct soc_camera_device *icd,
- struct v4l2_streamparm *parm)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
-
- return v4l2_subdev_call(sd, video, s_parm, parm);
+ icd, NULL);
}
static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
@@ -1866,8 +1850,6 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.try_fmt = sh_mobile_ceu_try_fmt,
.set_ctrl = sh_mobile_ceu_set_ctrl,
.get_ctrl = sh_mobile_ceu_get_ctrl,
- .set_parm = sh_mobile_ceu_set_parm,
- .get_parm = sh_mobile_ceu_get_parm,
.reqbufs = sh_mobile_ceu_reqbufs,
.poll = sh_mobile_ceu_poll,
.querycap = sh_mobile_ceu_querycap,
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index d394187eb701..0f4906136b8f 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -230,7 +230,7 @@ static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
BUG_ON(in_interrupt());
/* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
- videobuf_waiton(vb, 0, 0);
+ videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
}
@@ -1189,7 +1189,8 @@ static int sh_vou_open(struct file *file)
vou_dev->v4l2_dev.dev, &vou_dev->lock,
V4L2_BUF_TYPE_VIDEO_OUTPUT,
V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), vdev);
+ sizeof(struct videobuf_buffer), vdev,
+ NULL);
return 0;
}
@@ -1405,7 +1406,7 @@ static int __devinit sh_vou_probe(struct platform_device *pdev)
goto ereset;
subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
- vou_pdata->module_name, vou_pdata->board_info, NULL);
+ NULL, vou_pdata->board_info, NULL);
if (!subdev) {
ret = -ENOMEM;
goto ei2cnd;
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index b6643ca7656a..ccfa59c54552 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -116,10 +116,14 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
/* SN9C120 */
{ SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), },
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), },
+#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
+#endif
/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index a499cacec1f3..43848a751d11 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -92,8 +92,7 @@ EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
WARN_ON(priv != file->private_data);
@@ -105,8 +104,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int ret = 0;
if (inp->index != 0)
@@ -141,8 +139,7 @@ static int soc_camera_s_input(struct file *file, void *priv, unsigned int i)
static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, s_std, *a);
@@ -152,47 +149,59 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
int ret;
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
WARN_ON(priv != file->private_data);
- ret = videobuf_reqbufs(&icf->vb_vidq, p);
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ ret = videobuf_reqbufs(&icd->vb_vidq, p);
if (ret < 0)
return ret;
- return ici->ops->reqbufs(icf, p);
+ ret = ici->ops->reqbufs(icd, p);
+ if (!ret && !icd->streamer)
+ icd->streamer = file;
+
+ return ret;
}
static int soc_camera_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
WARN_ON(priv != file->private_data);
- return videobuf_querybuf(&icf->vb_vidq, p);
+ return videobuf_querybuf(&icd->vb_vidq, p);
}
static int soc_camera_qbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
WARN_ON(priv != file->private_data);
- return videobuf_qbuf(&icf->vb_vidq, p);
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ return videobuf_qbuf(&icd->vb_vidq, p);
}
static int soc_camera_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
- struct soc_camera_file *icf = file->private_data;
+ struct soc_camera_device *icd = file->private_data;
WARN_ON(priv != file->private_data);
- return videobuf_dqbuf(&icf->vb_vidq, p, file->f_flags & O_NONBLOCK);
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ return videobuf_dqbuf(&icd->vb_vidq, p, file->f_flags & O_NONBLOCK);
}
/* Always entered with .video_lock held */
@@ -280,10 +289,9 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
((x) >> 24) & 0xff
/* Called with .vb_lock held, or from the first open(2), see comment there */
-static int soc_camera_set_fmt(struct soc_camera_file *icf,
+static int soc_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_device *icd = icf->icd;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
@@ -309,7 +317,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
icd->user_width = pix->width;
icd->user_height = pix->height;
icd->colorspace = pix->colorspace;
- icf->vb_vidq.field =
+ icd->vb_vidq.field =
icd->field = pix->field;
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -331,7 +339,6 @@ static int soc_camera_open(struct file *file)
dev);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct soc_camera_host *ici;
- struct soc_camera_file *icf;
int ret;
if (!icd->ops)
@@ -340,14 +347,9 @@ static int soc_camera_open(struct file *file)
ici = to_soc_camera_host(icd->dev.parent);
- icf = vmalloc(sizeof(*icf));
- if (!icf)
- return -ENOMEM;
-
if (!try_module_get(ici->ops->owner)) {
dev_err(&icd->dev, "Couldn't lock capture bus driver.\n");
- ret = -EINVAL;
- goto emgi;
+ return -EINVAL;
}
/*
@@ -356,7 +358,6 @@ static int soc_camera_open(struct file *file)
*/
mutex_lock(&icd->video_lock);
- icf->icd = icd;
icd->use_count++;
/* Now we really have to activate the camera */
@@ -401,15 +402,15 @@ static int soc_camera_open(struct file *file)
* apart from someone else calling open() simultaneously, but
* .video_lock is protecting us against it.
*/
- ret = soc_camera_set_fmt(icf, &f);
+ ret = soc_camera_set_fmt(icd, &f);
if (ret < 0)
goto esfmt;
}
- file->private_data = icf;
+ file->private_data = icd;
dev_dbg(&icd->dev, "camera device open\n");
- ici->ops->init_videobuf(&icf->vb_vidq, icd);
+ ici->ops->init_videobuf(&icd->vb_vidq, icd);
mutex_unlock(&icd->video_lock);
@@ -430,15 +431,13 @@ epower:
icd->use_count--;
mutex_unlock(&icd->video_lock);
module_put(ici->ops->owner);
-emgi:
- vfree(icf);
+
return ret;
}
static int soc_camera_close(struct file *file)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
mutex_lock(&icd->video_lock);
@@ -455,12 +454,13 @@ static int soc_camera_close(struct file *file)
icl->power(icd->pdev, 0);
}
+ if (icd->streamer == file)
+ icd->streamer = NULL;
+
mutex_unlock(&icd->video_lock);
module_put(ici->ops->owner);
- vfree(icf);
-
dev_dbg(&icd->dev, "camera device close\n");
return 0;
@@ -469,8 +469,7 @@ static int soc_camera_close(struct file *file)
static ssize_t soc_camera_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int err = -EINVAL;
dev_err(&icd->dev, "camera device read not implemented\n");
@@ -480,13 +479,15 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int err;
dev_dbg(&icd->dev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
- err = videobuf_mmap_mapper(&icf->vb_vidq, vma);
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ err = videobuf_mmap_mapper(&icd->vb_vidq, vma);
dev_dbg(&icd->dev, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
@@ -498,11 +499,13 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- if (list_empty(&icf->vb_vidq.stream)) {
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ if (list_empty(&icd->vb_vidq.stream)) {
dev_err(&icd->dev, "Trying to poll with no queued buffers!\n");
return POLLERR;
}
@@ -523,24 +526,29 @@ static struct v4l2_file_operations soc_camera_fops = {
static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
int ret;
WARN_ON(priv != file->private_data);
- mutex_lock(&icf->vb_vidq.vb_lock);
+ if (icd->streamer && icd->streamer != file)
+ return -EBUSY;
+
+ mutex_lock(&icd->vb_vidq.vb_lock);
- if (icf->vb_vidq.bufs[0]) {
+ if (icd->vb_vidq.bufs[0]) {
dev_err(&icd->dev, "S_FMT denied: queue initialised\n");
ret = -EBUSY;
goto unlock;
}
- ret = soc_camera_set_fmt(icf, f);
+ ret = soc_camera_set_fmt(icd, f);
+
+ if (!ret && !icd->streamer)
+ icd->streamer = file;
unlock:
- mutex_unlock(&icf->vb_vidq.vb_lock);
+ mutex_unlock(&icd->vb_vidq.vb_lock);
return ret;
}
@@ -548,8 +556,7 @@ unlock:
static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
const struct soc_mbus_pixelfmt *format;
WARN_ON(priv != file->private_data);
@@ -568,15 +575,14 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_pix_format *pix = &f->fmt.pix;
WARN_ON(priv != file->private_data);
pix->width = icd->user_width;
pix->height = icd->user_height;
- pix->field = icf->vb_vidq.field;
+ pix->field = icd->vb_vidq.field;
pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
icd->current_fmt->host_fmt);
@@ -592,8 +598,7 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
WARN_ON(priv != file->private_data);
@@ -605,8 +610,7 @@ static int soc_camera_querycap(struct file *file, void *priv,
static int soc_camera_streamon(struct file *file, void *priv,
enum v4l2_buf_type i)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -615,12 +619,15 @@ static int soc_camera_streamon(struct file *file, void *priv,
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ if (icd->streamer != file)
+ return -EBUSY;
+
mutex_lock(&icd->video_lock);
v4l2_subdev_call(sd, video, s_stream, 1);
/* This calls buf_queue from host driver's videobuf_queue_ops */
- ret = videobuf_streamon(&icf->vb_vidq);
+ ret = videobuf_streamon(&icd->vb_vidq);
mutex_unlock(&icd->video_lock);
@@ -630,8 +637,7 @@ static int soc_camera_streamon(struct file *file, void *priv,
static int soc_camera_streamoff(struct file *file, void *priv,
enum v4l2_buf_type i)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
WARN_ON(priv != file->private_data);
@@ -639,13 +645,16 @@ static int soc_camera_streamoff(struct file *file, void *priv,
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ if (icd->streamer != file)
+ return -EBUSY;
+
mutex_lock(&icd->video_lock);
/*
* This calls buf_release from host driver's videobuf_queue_ops for all
* remaining buffers. When the last buffer is freed, stop capture
*/
- videobuf_streamoff(&icf->vb_vidq);
+ videobuf_streamoff(&icd->vb_vidq);
v4l2_subdev_call(sd, video, s_stream, 0);
@@ -657,8 +666,7 @@ static int soc_camera_streamoff(struct file *file, void *priv,
static int soc_camera_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
int i;
@@ -689,8 +697,7 @@ static int soc_camera_queryctrl(struct file *file, void *priv,
static int soc_camera_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -709,8 +716,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv,
static int soc_camera_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -729,8 +735,7 @@ static int soc_camera_s_ctrl(struct file *file, void *priv,
static int soc_camera_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
return ici->ops->cropcap(icd, a);
@@ -739,14 +744,13 @@ static int soc_camera_cropcap(struct file *file, void *fh,
static int soc_camera_g_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
int ret;
- mutex_lock(&icf->vb_vidq.vb_lock);
+ mutex_lock(&icd->vb_vidq.vb_lock);
ret = ici->ops->get_crop(icd, a);
- mutex_unlock(&icf->vb_vidq.vb_lock);
+ mutex_unlock(&icd->vb_vidq.vb_lock);
return ret;
}
@@ -759,8 +763,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
static int soc_camera_s_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_rect *rect = &a->c;
struct v4l2_crop current_crop;
@@ -773,7 +776,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
rect->width, rect->height, rect->left, rect->top);
/* Cropping is allowed during a running capture, guard consistency */
- mutex_lock(&icf->vb_vidq.vb_lock);
+ mutex_lock(&icd->vb_vidq.vb_lock);
/* If get_crop fails, we'll let host and / or client drivers decide */
ret = ici->ops->get_crop(icd, &current_crop);
@@ -782,7 +785,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
if (ret < 0) {
dev_err(&icd->dev,
"S_CROP denied: getting current crop failed\n");
- } else if (icf->vb_vidq.bufs[0] &&
+ } else if (icd->vb_vidq.bufs[0] &&
(a->c.width != current_crop.c.width ||
a->c.height != current_crop.c.height)) {
dev_err(&icd->dev,
@@ -792,7 +795,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
ret = ici->ops->set_crop(icd, a);
}
- mutex_unlock(&icf->vb_vidq.vb_lock);
+ mutex_unlock(&icd->vb_vidq.vb_lock);
return ret;
}
@@ -800,8 +803,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
static int soc_camera_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
if (ici->ops->get_parm)
@@ -813,8 +815,7 @@ static int soc_camera_g_parm(struct file *file, void *fh,
static int soc_camera_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
if (ici->ops->set_parm)
@@ -826,8 +827,7 @@ static int soc_camera_s_parm(struct file *file, void *fh,
static int soc_camera_g_chip_ident(struct file *file, void *fh,
struct v4l2_dbg_chip_ident *id)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, g_chip_ident, id);
@@ -837,8 +837,7 @@ static int soc_camera_g_chip_ident(struct file *file, void *fh,
static int soc_camera_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, g_register, reg);
@@ -847,8 +846,7 @@ static int soc_camera_g_register(struct file *file, void *fh,
static int soc_camera_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct soc_camera_file *icf = file->private_data;
- struct soc_camera_device *icd = icf->icd;
+ struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, core, s_register, reg);
@@ -898,11 +896,11 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
icl->board_info->platform_data = icd;
subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
- icl->module_name, icl->board_info, NULL);
+ NULL, icl->board_info, NULL);
if (!subdev)
goto ei2cnd;
- client = subdev->priv;
+ client = v4l2_get_subdevdata(subdev);
/* Use to_i2c_client(dev) to recover the i2c client */
dev_set_drvdata(&icd->dev, &client->dev);
@@ -1148,6 +1146,20 @@ static int default_s_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
return v4l2_subdev_call(sd, video, s_crop, a);
}
+static int default_g_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *parm)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, g_parm, parm);
+}
+
+static int default_s_parm(struct soc_camera_device *icd,
+ struct v4l2_streamparm *parm)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ return v4l2_subdev_call(sd, video, s_parm, parm);
+}
+
static void soc_camera_device_init(struct device *dev, void *pdata)
{
dev->platform_data = pdata;
@@ -1179,6 +1191,10 @@ int soc_camera_host_register(struct soc_camera_host *ici)
ici->ops->get_crop = default_g_crop;
if (!ici->ops->cropcap)
ici->ops->cropcap = default_cropcap;
+ if (!ici->ops->set_parm)
+ ici->ops->set_parm = default_s_parm;
+ if (!ici->ops->get_parm)
+ ici->ops->get_parm = default_g_parm;
mutex_lock(&list_lock);
list_for_each_entry(ix, &hosts, list) {
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
new file mode 100644
index 000000000000..c9dc67aba980
--- /dev/null
+++ b/drivers/media/video/sr030pc30.c
@@ -0,0 +1,894 @@
+/*
+ * Driver for SiliconFile SR030PC30 VGA (1/10-Inch) Image Sensor with ISP
+ *
+ * Copyright (C) 2010 Samsung Electronics Co., Ltd
+ * Author: Sylwester Nawrocki, s.nawrocki@samsung.com
+ *
+ * Based on original driver authored by Dongsoo Nathaniel Kim
+ * and HeungJun Kim <riverful.kim@samsung.com>.
+ *
+ * Based on mt9v011 Micron Digital Image Sensor driver
+ * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+#include <media/sr030pc30.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define MODULE_NAME "SR030PC30"
+
+/*
+ * Register offsets within a page
+ * b15..b8 - page id, b7..b0 - register address
+ */
+#define POWER_CTRL_REG 0x0001
+#define PAGEMODE_REG 0x03
+#define DEVICE_ID_REG 0x0004
+#define NOON010PC30_ID 0x86
+#define SR030PC30_ID 0x8C
+#define VDO_CTL1_REG 0x0010
+#define SUBSAMPL_NONE_VGA 0
+#define SUBSAMPL_QVGA 0x10
+#define SUBSAMPL_QQVGA 0x20
+#define VDO_CTL2_REG 0x0011
+#define SYNC_CTL_REG 0x0012
+#define WIN_ROWH_REG 0x0020
+#define WIN_ROWL_REG 0x0021
+#define WIN_COLH_REG 0x0022
+#define WIN_COLL_REG 0x0023
+#define WIN_HEIGHTH_REG 0x0024
+#define WIN_HEIGHTL_REG 0x0025
+#define WIN_WIDTHH_REG 0x0026
+#define WIN_WIDTHL_REG 0x0027
+#define HBLANKH_REG 0x0040
+#define HBLANKL_REG 0x0041
+#define VSYNCH_REG 0x0042
+#define VSYNCL_REG 0x0043
+/* page 10 */
+#define ISP_CTL_REG(n) (0x1010 + (n))
+#define YOFS_REG 0x1040
+#define DARK_YOFS_REG 0x1041
+#define AG_ABRTH_REG 0x1050
+#define SAT_CTL_REG 0x1060
+#define BSAT_REG 0x1061
+#define RSAT_REG 0x1062
+#define AG_SAT_TH_REG 0x1063
+/* page 11 */
+#define ZLPF_CTRL_REG 0x1110
+#define ZLPF_CTRL2_REG 0x1112
+#define ZLPF_AGH_THR_REG 0x1121
+#define ZLPF_THR_REG 0x1160
+#define ZLPF_DYN_THR_REG 0x1160
+/* page 12 */
+#define YCLPF_CTL1_REG 0x1240
+#define YCLPF_CTL2_REG 0x1241
+#define YCLPF_THR_REG 0x1250
+#define BLPF_CTL_REG 0x1270
+#define BLPF_THR1_REG 0x1274
+#define BLPF_THR2_REG 0x1275
+/* page 14 - Lens Shading Compensation */
+#define LENS_CTRL_REG 0x1410
+#define LENS_XCEN_REG 0x1420
+#define LENS_YCEN_REG 0x1421
+#define LENS_R_COMP_REG 0x1422
+#define LENS_G_COMP_REG 0x1423
+#define LENS_B_COMP_REG 0x1424
+/* page 15 - Color correction */
+#define CMC_CTL_REG 0x1510
+#define CMC_OFSGH_REG 0x1514
+#define CMC_OFSGL_REG 0x1516
+#define CMC_SIGN_REG 0x1517
+/* Color correction coefficients */
+#define CMC_COEF_REG(n) (0x1530 + (n))
+/* Color correction offset coefficients */
+#define CMC_OFS_REG(n) (0x1540 + (n))
+/* page 16 - Gamma correction */
+#define GMA_CTL_REG 0x1610
+/* Gamma correction coefficients 0.14 */
+#define GMA_COEF_REG(n) (0x1630 + (n))
+/* page 20 - Auto Exposure */
+#define AE_CTL1_REG 0x2010
+#define AE_CTL2_REG 0x2011
+#define AE_FRM_CTL_REG 0x2020
+#define AE_FINE_CTL_REG(n) (0x2028 + (n))
+#define EXP_TIMEH_REG 0x2083
+#define EXP_TIMEM_REG 0x2084
+#define EXP_TIMEL_REG 0x2085
+#define EXP_MMINH_REG 0x2086
+#define EXP_MMINL_REG 0x2087
+#define EXP_MMAXH_REG 0x2088
+#define EXP_MMAXM_REG 0x2089
+#define EXP_MMAXL_REG 0x208A
+/* page 22 - Auto White Balance */
+#define AWB_CTL1_REG 0x2210
+#define AWB_ENABLE 0x80
+#define AWB_CTL2_REG 0x2211
+#define MWB_ENABLE 0x01
+/* RGB gain control (manual WB) when AWB_CTL1[7]=0 */
+#define AWB_RGAIN_REG 0x2280
+#define AWB_GGAIN_REG 0x2281
+#define AWB_BGAIN_REG 0x2282
+#define AWB_RMAX_REG 0x2283
+#define AWB_RMIN_REG 0x2284
+#define AWB_BMAX_REG 0x2285
+#define AWB_BMIN_REG 0x2286
+/* R, B gain range in bright light conditions */
+#define AWB_RMAXB_REG 0x2287
+#define AWB_RMINB_REG 0x2288
+#define AWB_BMAXB_REG 0x2289
+#define AWB_BMINB_REG 0x228A
+/* manual white balance, when AWB_CTL2[0]=1 */
+#define MWB_RGAIN_REG 0x22B2
+#define MWB_BGAIN_REG 0x22B3
+/* the token to mark an array end */
+#define REG_TERM 0xFFFF
+
+/* Minimum and maximum exposure time in ms */
+#define EXPOS_MIN_MS 1
+#define EXPOS_MAX_MS 125
+
+struct sr030pc30_info {
+ struct v4l2_subdev sd;
+ const struct sr030pc30_platform_data *pdata;
+ const struct sr030pc30_format *curr_fmt;
+ const struct sr030pc30_frmsize *curr_win;
+ unsigned int auto_wb:1;
+ unsigned int auto_exp:1;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ unsigned int sleep:1;
+ unsigned int exposure;
+ u8 blue_balance;
+ u8 red_balance;
+ u8 i2c_reg_page;
+};
+
+struct sr030pc30_format {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+ u16 ispctl1_reg;
+};
+
+struct sr030pc30_frmsize {
+ u16 width;
+ u16 height;
+ int vid_ctl1;
+};
+
+struct i2c_regval {
+ u16 addr;
+ u16 val;
+};
+
+static const struct v4l2_queryctrl sr030pc30_ctrl[] = {
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ }, {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 64,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 64,
+ }, {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Auto Exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ }, {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = EXPOS_MIN_MS,
+ .maximum = EXPOS_MAX_MS,
+ .step = 1,
+ .default_value = 1,
+ }, {
+ }
+};
+
+/* supported resolutions */
+static const struct sr030pc30_frmsize sr030pc30_sizes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .vid_ctl1 = SUBSAMPL_NONE_VGA,
+ }, {
+ .width = 320,
+ .height = 240,
+ .vid_ctl1 = SUBSAMPL_QVGA,
+ }, {
+ .width = 160,
+ .height = 120,
+ .vid_ctl1 = SUBSAMPL_QQVGA,
+ },
+};
+
+/* supported pixel formats */
+static const struct sr030pc30_format sr030pc30_formats[] = {
+ {
+ .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x03,
+ }, {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x02,
+ }, {
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0,
+ }, {
+ .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x01,
+ }, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .ispctl1_reg = 0x40,
+ },
+};
+
+static const struct i2c_regval sr030pc30_base_regs[] = {
+ /* Window size and position within pixel matrix */
+ { WIN_ROWH_REG, 0x00 }, { WIN_ROWL_REG, 0x06 },
+ { WIN_COLH_REG, 0x00 }, { WIN_COLL_REG, 0x06 },
+ { WIN_HEIGHTH_REG, 0x01 }, { WIN_HEIGHTL_REG, 0xE0 },
+ { WIN_WIDTHH_REG, 0x02 }, { WIN_WIDTHL_REG, 0x80 },
+ { HBLANKH_REG, 0x01 }, { HBLANKL_REG, 0x50 },
+ { VSYNCH_REG, 0x00 }, { VSYNCL_REG, 0x14 },
+ { SYNC_CTL_REG, 0 },
+ /* Color corection and saturation */
+ { ISP_CTL_REG(0), 0x30 }, { YOFS_REG, 0x80 },
+ { DARK_YOFS_REG, 0x04 }, { AG_ABRTH_REG, 0x78 },
+ { SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 },
+ { AG_SAT_TH_REG, 0xF0 }, { 0x1064, 0x80 },
+ { CMC_CTL_REG, 0x03 }, { CMC_OFSGH_REG, 0x3C },
+ { CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x2F },
+ { CMC_COEF_REG(0), 0xCB }, { CMC_OFS_REG(0), 0x87 },
+ { CMC_COEF_REG(1), 0x61 }, { CMC_OFS_REG(1), 0x18 },
+ { CMC_COEF_REG(2), 0x16 }, { CMC_OFS_REG(2), 0x91 },
+ { CMC_COEF_REG(3), 0x23 }, { CMC_OFS_REG(3), 0x94 },
+ { CMC_COEF_REG(4), 0xCE }, { CMC_OFS_REG(4), 0x9f },
+ { CMC_COEF_REG(5), 0x2B }, { CMC_OFS_REG(5), 0x33 },
+ { CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x00 },
+ { CMC_COEF_REG(7), 0x34 }, { CMC_OFS_REG(7), 0x94 },
+ { CMC_COEF_REG(8), 0x75 }, { CMC_OFS_REG(8), 0x14 },
+ /* Color corection coefficients */
+ { GMA_CTL_REG, 0x03 }, { GMA_COEF_REG(0), 0x00 },
+ { GMA_COEF_REG(1), 0x19 }, { GMA_COEF_REG(2), 0x26 },
+ { GMA_COEF_REG(3), 0x3B }, { GMA_COEF_REG(4), 0x5D },
+ { GMA_COEF_REG(5), 0x79 }, { GMA_COEF_REG(6), 0x8E },
+ { GMA_COEF_REG(7), 0x9F }, { GMA_COEF_REG(8), 0xAF },
+ { GMA_COEF_REG(9), 0xBD }, { GMA_COEF_REG(10), 0xCA },
+ { GMA_COEF_REG(11), 0xDD }, { GMA_COEF_REG(12), 0xEC },
+ { GMA_COEF_REG(13), 0xF7 }, { GMA_COEF_REG(14), 0xFF },
+ /* Noise reduction, Z-LPF, YC-LPF and BLPF filters setup */
+ { ZLPF_CTRL_REG, 0x99 }, { ZLPF_CTRL2_REG, 0x0E },
+ { ZLPF_AGH_THR_REG, 0x29 }, { ZLPF_THR_REG, 0x0F },
+ { ZLPF_DYN_THR_REG, 0x63 }, { YCLPF_CTL1_REG, 0x23 },
+ { YCLPF_CTL2_REG, 0x3B }, { YCLPF_THR_REG, 0x05 },
+ { BLPF_CTL_REG, 0x1D }, { BLPF_THR1_REG, 0x05 },
+ { BLPF_THR2_REG, 0x04 },
+ /* Automatic white balance */
+ { AWB_CTL1_REG, 0xFB }, { AWB_CTL2_REG, 0x26 },
+ { AWB_RMAX_REG, 0x54 }, { AWB_RMIN_REG, 0x2B },
+ { AWB_BMAX_REG, 0x57 }, { AWB_BMIN_REG, 0x29 },
+ { AWB_RMAXB_REG, 0x50 }, { AWB_RMINB_REG, 0x43 },
+ { AWB_BMAXB_REG, 0x30 }, { AWB_BMINB_REG, 0x22 },
+ /* Auto exposure */
+ { AE_CTL1_REG, 0x8C }, { AE_CTL2_REG, 0x04 },
+ { AE_FRM_CTL_REG, 0x01 }, { AE_FINE_CTL_REG(0), 0x3F },
+ { AE_FINE_CTL_REG(1), 0xA3 }, { AE_FINE_CTL_REG(3), 0x34 },
+ /* Lens shading compensation */
+ { LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 },
+ { LENS_YCEN_REG, 0x70 }, { LENS_R_COMP_REG, 0x53 },
+ { LENS_G_COMP_REG, 0x40 }, { LENS_B_COMP_REG, 0x3e },
+ { REG_TERM, 0 },
+};
+
+static inline struct sr030pc30_info *to_sr030pc30(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct sr030pc30_info, sd);
+}
+
+static inline int set_i2c_page(struct sr030pc30_info *info,
+ struct i2c_client *client, unsigned int reg)
+{
+ int ret = 0;
+ u32 page = reg >> 8 & 0xFF;
+
+ if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
+ ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
+ if (!ret)
+ info->i2c_reg_page = page;
+ }
+ return ret;
+}
+
+static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ int ret = set_i2c_page(info, client, reg_addr);
+ if (!ret)
+ ret = i2c_smbus_read_byte_data(client, reg_addr & 0xFF);
+ return ret;
+}
+
+static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ int ret = set_i2c_page(info, client, reg_addr);
+ if (!ret)
+ ret = i2c_smbus_write_byte_data(
+ client, reg_addr & 0xFF, val);
+ return ret;
+}
+
+static inline int sr030pc30_bulk_write_reg(struct v4l2_subdev *sd,
+ const struct i2c_regval *msg)
+{
+ while (msg->addr != REG_TERM) {
+ int ret = cam_i2c_write(sd, msg->addr, msg->val);
+ if (ret)
+ return ret;
+ msg++;
+ }
+ return 0;
+}
+
+/* Device reset and sleep mode control */
+static int sr030pc30_pwr_ctrl(struct v4l2_subdev *sd,
+ bool reset, bool sleep)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ u8 reg = sleep ? 0xF1 : 0xF0;
+ int ret = 0;
+
+ if (reset)
+ ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
+ if (!ret) {
+ ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
+ if (!ret) {
+ info->sleep = sleep;
+ if (reset)
+ info->i2c_reg_page = -1;
+ }
+ }
+ return ret;
+}
+
+static inline int sr030pc30_enable_autoexposure(struct v4l2_subdev *sd, int on)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ /* auto anti-flicker is also enabled here */
+ int ret = cam_i2c_write(sd, AE_CTL1_REG, on ? 0xDC : 0x0C);
+ if (!ret)
+ info->auto_exp = on;
+ return ret;
+}
+
+static int sr030pc30_set_exposure(struct v4l2_subdev *sd, int value)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ unsigned long expos = value * info->pdata->clk_rate / (8 * 1000);
+
+ int ret = cam_i2c_write(sd, EXP_TIMEH_REG, expos >> 16 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_TIMEM_REG, expos >> 8 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_TIMEL_REG, expos & 0xFF);
+ if (!ret) { /* Turn off AE */
+ info->exposure = value;
+ ret = sr030pc30_enable_autoexposure(sd, 0);
+ }
+ return ret;
+}
+
+/* Automatic white balance control */
+static int sr030pc30_enable_autowhitebalance(struct v4l2_subdev *sd, int on)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ int ret = cam_i2c_write(sd, AWB_CTL2_REG, on ? 0x2E : 0x2F);
+ if (!ret)
+ ret = cam_i2c_write(sd, AWB_CTL1_REG, on ? 0xFB : 0x7B);
+ if (!ret)
+ info->auto_wb = on;
+
+ return ret;
+}
+
+static int sr030pc30_set_flip(struct v4l2_subdev *sd)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ s32 reg = cam_i2c_read(sd, VDO_CTL2_REG);
+ if (reg < 0)
+ return reg;
+
+ reg &= 0x7C;
+ if (info->hflip)
+ reg |= 0x01;
+ if (info->vflip)
+ reg |= 0x02;
+ return cam_i2c_write(sd, VDO_CTL2_REG, reg | 0x80);
+}
+
+/* Configure resolution, color format and image flip */
+static int sr030pc30_set_params(struct v4l2_subdev *sd)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ int ret;
+
+ if (!info->curr_win)
+ return -EINVAL;
+
+ /* Configure the resolution through subsampling */
+ ret = cam_i2c_write(sd, VDO_CTL1_REG,
+ info->curr_win->vid_ctl1);
+
+ if (!ret && info->curr_fmt)
+ ret = cam_i2c_write(sd, ISP_CTL_REG(0),
+ info->curr_fmt->ispctl1_reg);
+ if (!ret)
+ ret = sr030pc30_set_flip(sd);
+
+ return ret;
+}
+
+/* Find nearest matching image pixel size. */
+static int sr030pc30_try_frame_size(struct v4l2_mbus_framefmt *mf)
+{
+ unsigned int min_err = ~0;
+ int i = ARRAY_SIZE(sr030pc30_sizes);
+ const struct sr030pc30_frmsize *fsize = &sr030pc30_sizes[0],
+ *match = NULL;
+ while (i--) {
+ int err = abs(fsize->width - mf->width)
+ + abs(fsize->height - mf->height);
+ if (err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ fsize++;
+ }
+ if (match) {
+ mf->width = match->width;
+ mf->height = match->height;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int sr030pc30_queryctrl(struct v4l2_subdev *sd,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++)
+ if (qc->id == sr030pc30_ctrl[i].id) {
+ *qc = sr030pc30_ctrl[i];
+ v4l2_dbg(1, debug, sd, "%s id: %d\n",
+ __func__, qc->id);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int sr030pc30_set_bluebalance(struct v4l2_subdev *sd, int value)
+{
+ int ret = cam_i2c_write(sd, MWB_BGAIN_REG, value);
+ if (!ret)
+ to_sr030pc30(sd)->blue_balance = value;
+ return ret;
+}
+
+static inline int sr030pc30_set_redbalance(struct v4l2_subdev *sd, int value)
+{
+ int ret = cam_i2c_write(sd, MWB_RGAIN_REG, value);
+ if (!ret)
+ to_sr030pc30(sd)->red_balance = value;
+ return ret;
+}
+
+static int sr030pc30_s_ctrl(struct v4l2_subdev *sd,
+ struct v4l2_control *ctrl)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++)
+ if (ctrl->id == sr030pc30_ctrl[i].id)
+ break;
+
+ if (i == ARRAY_SIZE(sr030pc30_ctrl))
+ return -EINVAL;
+
+ if (ctrl->value < sr030pc30_ctrl[i].minimum ||
+ ctrl->value > sr030pc30_ctrl[i].maximum)
+ return -ERANGE;
+
+ v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
+ __func__, ctrl->id, ctrl->value);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ sr030pc30_enable_autowhitebalance(sd, ctrl->value);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ret = sr030pc30_set_bluebalance(sd, ctrl->value);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ret = sr030pc30_set_redbalance(sd, ctrl->value);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ sr030pc30_enable_autoexposure(sd,
+ ctrl->value == V4L2_EXPOSURE_AUTO);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = sr030pc30_set_exposure(sd, ctrl->value);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int sr030pc30_g_ctrl(struct v4l2_subdev *sd,
+ struct v4l2_control *ctrl)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ v4l2_dbg(1, debug, sd, "%s: id: %d\n", __func__, ctrl->id);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrl->value = info->auto_wb;
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ctrl->value = info->blue_balance;
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ctrl->value = info->red_balance;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ctrl->value = info->auto_exp;
+ break;
+ case V4L2_CID_EXPOSURE:
+ ctrl->value = info->exposure;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (!code || index >= ARRAY_SIZE(sr030pc30_formats))
+ return -EINVAL;
+
+ *code = sr030pc30_formats[index].code;
+ return 0;
+}
+
+static int sr030pc30_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ int ret;
+
+ if (!mf)
+ return -EINVAL;
+
+ if (!info->curr_win || !info->curr_fmt) {
+ ret = sr030pc30_set_params(sd);
+ if (ret)
+ return ret;
+ }
+
+ mf->width = info->curr_win->width;
+ mf->height = info->curr_win->height;
+ mf->code = info->curr_fmt->code;
+ mf->colorspace = info->curr_fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/* Return nearest media bus frame format. */
+static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ int i = ARRAY_SIZE(sr030pc30_formats);
+
+ sr030pc30_try_frame_size(mf);
+
+ while (i--)
+ if (mf->code == sr030pc30_formats[i].code)
+ break;
+
+ mf->code = sr030pc30_formats[i].code;
+
+ return &sr030pc30_formats[i];
+}
+
+/* Return nearest media bus frame format. */
+static int sr030pc30_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ if (!sd || !mf)
+ return -EINVAL;
+
+ try_fmt(sd, mf);
+ return 0;
+}
+
+static int sr030pc30_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ if (!sd || !mf)
+ return -EINVAL;
+
+ info->curr_fmt = try_fmt(sd, mf);
+
+ return sr030pc30_set_params(sd);
+}
+
+static int sr030pc30_base_config(struct v4l2_subdev *sd)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ int ret;
+ unsigned long expmin, expmax;
+
+ ret = sr030pc30_bulk_write_reg(sd, sr030pc30_base_regs);
+ if (!ret) {
+ info->curr_fmt = &sr030pc30_formats[0];
+ info->curr_win = &sr030pc30_sizes[0];
+ ret = sr030pc30_set_params(sd);
+ }
+ if (!ret)
+ ret = sr030pc30_pwr_ctrl(sd, false, false);
+
+ if (!ret && !info->pdata)
+ return ret;
+
+ expmin = EXPOS_MIN_MS * info->pdata->clk_rate / (8 * 1000);
+ expmax = EXPOS_MAX_MS * info->pdata->clk_rate / (8 * 1000);
+
+ v4l2_dbg(1, debug, sd, "%s: expmin= %lx, expmax= %lx", __func__,
+ expmin, expmax);
+
+ /* Setting up manual exposure time range */
+ ret = cam_i2c_write(sd, EXP_MMINH_REG, expmin >> 8 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMINL_REG, expmin & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMAXH_REG, expmax >> 16 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMAXM_REG, expmax >> 8 & 0xFF);
+ if (!ret)
+ ret = cam_i2c_write(sd, EXP_MMAXL_REG, expmax & 0xFF);
+
+ return ret;
+}
+
+static int sr030pc30_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ info->pdata = platform_data;
+ return 0;
+}
+
+static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+ const struct sr030pc30_platform_data *pdata = info->pdata;
+ int ret;
+
+ if (WARN(pdata == NULL, "No platform data!"))
+ return -ENOMEM;
+
+ /*
+ * Put sensor into power sleep mode before switching off
+ * power and disabling MCLK.
+ */
+ if (!on)
+ sr030pc30_pwr_ctrl(sd, false, true);
+
+ /* set_power controls sensor's power and clock */
+ if (pdata->set_power) {
+ ret = pdata->set_power(&client->dev, on);
+ if (ret)
+ return ret;
+ }
+
+ if (on) {
+ ret = sr030pc30_base_config(sd);
+ } else {
+ info->curr_win = NULL;
+ info->curr_fmt = NULL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
+ .s_config = sr030pc30_s_config,
+ .s_power = sr030pc30_s_power,
+ .queryctrl = sr030pc30_queryctrl,
+ .s_ctrl = sr030pc30_s_ctrl,
+ .g_ctrl = sr030pc30_g_ctrl,
+};
+
+static const struct v4l2_subdev_video_ops sr030pc30_video_ops = {
+ .s_stream = sr030pc30_s_stream,
+ .g_mbus_fmt = sr030pc30_g_fmt,
+ .s_mbus_fmt = sr030pc30_s_fmt,
+ .try_mbus_fmt = sr030pc30_try_fmt,
+ .enum_mbus_fmt = sr030pc30_enum_fmt,
+};
+
+static const struct v4l2_subdev_ops sr030pc30_ops = {
+ .core = &sr030pc30_core_ops,
+ .video = &sr030pc30_video_ops,
+};
+
+/*
+ * Detect sensor type. Return 0 if SR030PC30 was detected
+ * or -ENODEV otherwise.
+ */
+static int sr030pc30_detect(struct i2c_client *client)
+{
+ const struct sr030pc30_platform_data *pdata
+ = client->dev.platform_data;
+ int ret;
+
+ /* Enable sensor's power and clock */
+ if (pdata->set_power) {
+ ret = pdata->set_power(&client->dev, 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG);
+
+ if (pdata->set_power)
+ pdata->set_power(&client->dev, 0);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: I2C read failed\n", __func__);
+ return ret;
+ }
+
+ return ret == SR030PC30_ID ? 0 : -ENODEV;
+}
+
+
+static int sr030pc30_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sr030pc30_info *info;
+ struct v4l2_subdev *sd;
+ const struct sr030pc30_platform_data *pdata
+ = client->dev.platform_data;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&client->dev, "No platform data!");
+ return -EIO;
+ }
+
+ ret = sr030pc30_detect(client);
+ if (ret)
+ return ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ sd = &info->sd;
+ strcpy(sd->name, MODULE_NAME);
+ info->pdata = client->dev.platform_data;
+
+ v4l2_i2c_subdev_init(sd, client, &sr030pc30_ops);
+
+ info->i2c_reg_page = -1;
+ info->hflip = 1;
+ info->auto_exp = 1;
+ info->exposure = 30;
+
+ return 0;
+}
+
+static int sr030pc30_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct sr030pc30_info *info = to_sr030pc30(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(info);
+ return 0;
+}
+
+static const struct i2c_device_id sr030pc30_id[] = {
+ { MODULE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sr030pc30_id);
+
+
+static struct i2c_driver sr030pc30_i2c_driver = {
+ .driver = {
+ .name = MODULE_NAME
+ },
+ .probe = sr030pc30_probe,
+ .remove = sr030pc30_remove,
+ .id_table = sr030pc30_id,
+};
+
+static int __init sr030pc30_init(void)
+{
+ return i2c_add_driver(&sr030pc30_i2c_driver);
+}
+
+static void __exit sr030pc30_exit(void)
+{
+ i2c_del_driver(&sr030pc30_i2c_driver);
+}
+
+module_init(sr030pc30_init);
+module_exit(sr030pc30_exit);
+
+MODULE_DESCRIPTION("Siliconfile SR030PC30 camera driver");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 80f1cee23fa5..3941f954daf4 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -36,7 +36,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/i2c-addr.h>
-#include <media/v4l2-i2c-drv.h>
#ifndef VIDEO_AUDIO_BALANCE
# define VIDEO_AUDIO_BALANCE 32
@@ -472,9 +471,25 @@ static const struct i2c_device_id tda7432_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tda7432_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tda7432",
- .probe = tda7432_probe,
- .remove = tda7432_remove,
- .id_table = tda7432_id,
+static struct i2c_driver tda7432_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tda7432",
+ },
+ .probe = tda7432_probe,
+ .remove = tda7432_remove,
+ .id_table = tda7432_id,
};
+
+static __init int init_tda7432(void)
+{
+ return i2c_add_driver(&tda7432_driver);
+}
+
+static __exit void exit_tda7432(void)
+{
+ i2c_del_driver(&tda7432_driver);
+}
+
+module_init(init_tda7432);
+module_exit(exit_tda7432);
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index 92d22d8931c1..5d4cf3b3d435 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
MODULE_DESCRIPTION("tda9840 driver");
@@ -199,9 +198,25 @@ static const struct i2c_device_id tda9840_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tda9840_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tda9840",
- .probe = tda9840_probe,
- .remove = tda9840_remove,
- .id_table = tda9840_id,
+static struct i2c_driver tda9840_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tda9840",
+ },
+ .probe = tda9840_probe,
+ .remove = tda9840_remove,
+ .id_table = tda9840_id,
};
+
+static __init int init_tda9840(void)
+{
+ return i2c_add_driver(&tda9840_driver);
+}
+
+static __exit void exit_tda9840(void)
+{
+ i2c_del_driver(&tda9840_driver);
+}
+
+module_init(init_tda9840);
+module_exit(exit_tda9840);
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
index 24e2b7d2ae58..35b6ff5db319 100644
--- a/drivers/media/video/tda9875.c
+++ b/drivers/media/video/tda9875.c
@@ -28,7 +28,6 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/i2c-addr.h>
static int debug; /* insmod parameter */
@@ -388,9 +387,25 @@ static const struct i2c_device_id tda9875_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tda9875_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tda9875",
- .probe = tda9875_probe,
- .remove = tda9875_remove,
- .id_table = tda9875_id,
+static struct i2c_driver tda9875_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tda9875",
+ },
+ .probe = tda9875_probe,
+ .remove = tda9875_remove,
+ .id_table = tda9875_id,
};
+
+static __init int init_tda9875(void)
+{
+ return i2c_add_driver(&tda9875_driver);
+}
+
+static __exit void exit_tda9875(void)
+{
+ i2c_del_driver(&tda9875_driver);
+}
+
+module_init(init_tda9875);
+module_exit(exit_tda9875);
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index 3021a1e6b7bb..3e99cea8e4dc 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -34,7 +34,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "tea6415c.h"
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
@@ -175,9 +174,25 @@ static const struct i2c_device_id tea6415c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tea6415c_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tea6415c",
- .probe = tea6415c_probe,
- .remove = tea6415c_remove,
- .id_table = tea6415c_id,
+static struct i2c_driver tea6415c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tea6415c",
+ },
+ .probe = tea6415c_probe,
+ .remove = tea6415c_remove,
+ .id_table = tea6415c_id,
};
+
+static __init int init_tea6415c(void)
+{
+ return i2c_add_driver(&tea6415c_driver);
+}
+
+static __exit void exit_tea6415c(void)
+{
+ i2c_del_driver(&tea6415c_driver);
+}
+
+module_init(init_tea6415c);
+module_exit(exit_tea6415c);
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index 49dafc5e1e2f..5ea840401f21 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -34,7 +34,6 @@
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include "tea6420.h"
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
@@ -157,9 +156,25 @@ static const struct i2c_device_id tea6420_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tea6420_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tea6420",
- .probe = tea6420_probe,
- .remove = tea6420_remove,
- .id_table = tea6420_id,
+static struct i2c_driver tea6420_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tea6420",
+ },
+ .probe = tea6420_probe,
+ .remove = tea6420_remove,
+ .id_table = tea6420_id,
};
+
+static __init int init_tea6420(void)
+{
+ return i2c_add_driver(&tea6420_driver);
+}
+
+static __exit void exit_tea6420(void)
+{
+ i2c_del_driver(&tea6420_driver);
+}
+
+module_init(init_tea6420);
+module_exit(exit_tea6420);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index d0cc012f7ae6..a1ffe18640fe 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -1434,7 +1434,7 @@ static int pd_video_open(struct file *file)
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,/* video is interlacd */
sizeof(struct videobuf_buffer),/*it's enough*/
- front);
+ front, NULL);
} else if (vfd->vfl_type == VFL_TYPE_VBI
&& !(pd->state & POSEIDON_STATE_VBI)) {
front = kzalloc(sizeof(struct front_face), GFP_KERNEL);
@@ -1451,7 +1451,7 @@ static int pd_video_open(struct file *file)
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_NONE, /* vbi is NONE mode */
sizeof(struct videobuf_buffer),
- front);
+ front, NULL);
} else {
/* maybe add FM support here */
log("other ");
diff --git a/drivers/media/video/tlv320aic23b.c b/drivers/media/video/tlv320aic23b.c
index 9ddb32bc7af0..dfc4dd7c5097 100644
--- a/drivers/media/video/tlv320aic23b.c
+++ b/drivers/media/video/tlv320aic23b.c
@@ -29,10 +29,8 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("tlv320aic23b driver");
MODULE_AUTHOR("Scott Alfter, Ulf Eklund, Hans Verkuil");
@@ -199,9 +197,25 @@ static const struct i2c_device_id tlv320aic23b_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tlv320aic23b",
- .probe = tlv320aic23b_probe,
- .remove = tlv320aic23b_remove,
- .id_table = tlv320aic23b_id,
+static struct i2c_driver tlv320aic23b_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tlv320aic23b",
+ },
+ .probe = tlv320aic23b_probe,
+ .remove = tlv320aic23b_remove,
+ .id_table = tlv320aic23b_id,
};
+
+static __init int init_tlv320aic23b(void)
+{
+ return i2c_add_driver(&tlv320aic23b_driver);
+}
+
+static __exit void exit_tlv320aic23b(void)
+{
+ i2c_del_driver(&tlv320aic23b_driver);
+}
+
+module_init(init_tlv320aic23b);
+module_exit(exit_tlv320aic23b);
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index c4dab6cfd948..1cec1224913f 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -20,7 +20,6 @@
#include <media/tuner-types.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-i2c-drv.h>
#include "mt20xx.h"
#include "tda8290.h"
#include "tea5761.h"
@@ -428,6 +427,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
{
struct tda18271_config cfg = {
.config = t->config,
+ .small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr,
@@ -1053,12 +1053,6 @@ static int tuner_probe(struct i2c_client *client,
printk(KERN_CONT "%02x ", buffer[i]);
printk("\n");
}
- /* HACK: This test was added to avoid tuner to probe tda9840 and
- tea6415c on the MXB card */
- if (client->adapter->id == I2C_HW_SAA7146 && client->addr < 0x4a) {
- kfree(t);
- return -ENODEV;
- }
/* autodetection code based on the i2c addr */
if (!no_autodetect) {
@@ -1176,16 +1170,32 @@ static const struct i2c_device_id tuner_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tuner_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tuner",
- .probe = tuner_probe,
- .remove = tuner_remove,
- .command = tuner_command,
- .suspend = tuner_suspend,
- .resume = tuner_resume,
- .id_table = tuner_id,
+static struct i2c_driver tuner_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tuner",
+ },
+ .probe = tuner_probe,
+ .remove = tuner_remove,
+ .command = tuner_command,
+ .suspend = tuner_suspend,
+ .resume = tuner_resume,
+ .id_table = tuner_id,
};
+static __init int init_tuner(void)
+{
+ return i2c_add_driver(&tuner_driver);
+}
+
+static __exit void exit_tuner(void)
+{
+ i2c_del_driver(&tuner_driver);
+}
+
+module_init(init_tuner);
+module_exit(exit_tuner);
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index 800fc1b111ef..a25e2b5e1944 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -35,7 +35,6 @@
#include <media/tvaudio.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/i2c-addr.h>
@@ -1227,18 +1226,6 @@ static int tea6320_initialize(struct CHIPSTATE * chip)
static int tda8425_shift10(int val) { return (val >> 10) | 0xc0; }
static int tda8425_shift12(int val) { return (val >> 12) | 0xf0; }
-static int tda8425_initialize(struct CHIPSTATE *chip)
-{
- struct CHIPDESC *desc = chip->desc;
- struct i2c_client *c = v4l2_get_subdevdata(&chip->sd);
- int inputmap[4] = { /* tuner */ TDA8425_S1_CH2, /* radio */ TDA8425_S1_CH1,
- /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF};
-
- if (c->adapter->id == I2C_HW_B_RIVA)
- memcpy(desc->inputmap, inputmap, sizeof(inputmap));
- return 0;
-}
-
static void tda8425_setmode(struct CHIPSTATE *chip, int mode)
{
int s1 = chip->shadow.bytes[TDA8425_S1+1] & 0xe1;
@@ -1574,7 +1561,6 @@ static struct CHIPDESC chiplist[] = {
.treblereg = TDA8425_TR,
/* callbacks */
- .initialize = tda8425_initialize,
.volfunc = tda8425_shift10,
.bassfunc = tda8425_shift12,
.treblefunc = tda8425_shift12,
@@ -2079,9 +2065,25 @@ static const struct i2c_device_id tvaudio_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tvaudio_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tvaudio",
- .probe = tvaudio_probe,
- .remove = tvaudio_remove,
- .id_table = tvaudio_id,
+static struct i2c_driver tvaudio_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tvaudio",
+ },
+ .probe = tvaudio_probe,
+ .remove = tvaudio_remove,
+ .id_table = tvaudio_id,
};
+
+static __init int init_tvaudio(void)
+{
+ return i2c_add_driver(&tvaudio_driver);
+}
+
+static __exit void exit_tvaudio(void)
+{
+ i2c_del_driver(&tvaudio_driver);
+}
+
+module_init(init_tvaudio);
+module_exit(exit_tvaudio);
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 71c73fa0d68c..45bcf0358a1d 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -35,6 +35,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-mediabus.h>
#include <media/v4l2-chip-ident.h>
#include <media/tvp514x.h>
@@ -929,69 +930,51 @@ tvp514x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
}
/**
- * tvp514x_enum_fmt_cap() - V4L2 decoder interface handler for enum_fmt
+ * tvp514x_enum_mbus_fmt() - V4L2 decoder interface handler for enum_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
- * @fmt: standard V4L2 VIDIOC_ENUM_FMT ioctl structure
+ * @index: index of pixelcode to retrieve
+ * @code: receives the pixelcode
*
- * Implement the VIDIOC_ENUM_FMT ioctl to enumerate supported formats
+ * Enumerates supported mediabus formats
*/
static int
-tvp514x_enum_fmt_cap(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmt)
+tvp514x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
+ enum v4l2_mbus_pixelcode *code)
{
- if (fmt == NULL || fmt->index)
+ if (index)
return -EINVAL;
- if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- /* only capture is supported */
- return -EINVAL;
-
- /* only one format */
- fmt->flags = 0;
- strlcpy(fmt->description, "8-bit UYVY 4:2:2 Format",
- sizeof(fmt->description));
- fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+ *code = V4L2_MBUS_FMT_YUYV10_2X10;
return 0;
}
/**
- * tvp514x_fmt_cap() - V4L2 decoder interface handler for try/s/g_fmt
+ * tvp514x_mbus_fmt_cap() - V4L2 decoder interface handler for try/s/g_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 VIDIOC_TRY_FMT ioctl structure
+ * @f: pointer to the mediabus format structure
*
- * Implement the VIDIOC_TRY/S/G_FMT ioctl for the CAPTURE buffer type. This
- * ioctl is used to negotiate the image capture size and pixel format.
+ * Negotiates the image capture size and mediabus format.
*/
static int
-tvp514x_fmt_cap(struct v4l2_subdev *sd, struct v4l2_format *f)
+tvp514x_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
- struct v4l2_pix_format *pix;
enum tvp514x_std current_std;
if (f == NULL)
return -EINVAL;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- pix = &f->fmt.pix;
-
/* Calculate height and width based on current standard */
current_std = decoder->current_std;
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->width = decoder->std_list[current_std].width;
- pix->height = decoder->std_list[current_std].height;
- pix->field = V4L2_FIELD_INTERLACED;
- pix->bytesperline = pix->width * 2;
- pix->sizeimage = pix->bytesperline * pix->height;
- pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- pix->priv = 0;
-
- v4l2_dbg(1, debug, sd, "FMT: bytesperline - %d"
- "Width - %d, Height - %d\n",
- pix->bytesperline,
- pix->width, pix->height);
+ f->code = V4L2_MBUS_FMT_YUYV10_2X10;
+ f->width = decoder->std_list[current_std].width;
+ f->height = decoder->std_list[current_std].height;
+ f->field = V4L2_FIELD_INTERLACED;
+ f->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ v4l2_dbg(1, debug, sd, "MBUS_FMT: Width - %d, Height - %d\n",
+ f->width, f->height);
return 0;
}
@@ -1131,10 +1114,10 @@ static const struct v4l2_subdev_core_ops tvp514x_core_ops = {
static const struct v4l2_subdev_video_ops tvp514x_video_ops = {
.s_routing = tvp514x_s_routing,
.querystd = tvp514x_querystd,
- .enum_fmt = tvp514x_enum_fmt_cap,
- .g_fmt = tvp514x_fmt_cap,
- .try_fmt = tvp514x_fmt_cap,
- .s_fmt = tvp514x_fmt_cap,
+ .enum_mbus_fmt = tvp514x_enum_mbus_fmt,
+ .g_mbus_fmt = tvp514x_mbus_fmt,
+ .try_mbus_fmt = tvp514x_mbus_fmt,
+ .s_mbus_fmt = tvp514x_mbus_fmt,
.g_parm = tvp514x_g_parm,
.s_parm = tvp514x_s_parm,
.s_stream = tvp514x_s_stream,
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 1654f65cca7c..58927664d3ea 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <media/v4l2-device.h>
#include <media/tvp5150.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-chip-ident.h>
#include "tvp5150_reg.h"
@@ -277,7 +276,7 @@ static int tvp5150_log_status(struct v4l2_subdev *sd)
static inline void tvp5150_selmux(struct v4l2_subdev *sd)
{
- int opmode=0;
+ int opmode = 0;
struct tvp5150 *decoder = to_tvp5150(sd);
int input = 0;
unsigned char val;
@@ -290,12 +289,10 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
input |= 2;
/* fall through */
case TVP5150_COMPOSITE0:
- opmode=0x30; /* TV Mode */
break;
case TVP5150_SVIDEO:
default:
input |= 1;
- opmode=0; /* Auto Mode */
break;
}
@@ -1111,9 +1108,25 @@ static const struct i2c_device_id tvp5150_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tvp5150_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "tvp5150",
- .probe = tvp5150_probe,
- .remove = tvp5150_remove,
- .id_table = tvp5150_id,
+static struct i2c_driver tvp5150_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tvp5150",
+ },
+ .probe = tvp5150_probe,
+ .remove = tvp5150_remove,
+ .id_table = tvp5150_id,
};
+
+static __init int init_tvp5150(void)
+{
+ return i2c_add_driver(&tvp5150_driver);
+}
+
+static __exit void exit_tvp5150(void)
+{
+ i2c_del_driver(&tvp5150_driver);
+}
+
+module_init(init_tvp5150);
+module_exit(exit_tvp5150);
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index 48f5c76ab521..e63b40f5a706 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -330,19 +330,6 @@ static const struct i2c_reg_value tvp7002_parms_720P50[] = {
{ TVP7002_EOR, 0xff, TVP7002_RESERVED }
};
-/* Struct list for available formats */
-static const struct v4l2_fmtdesc tvp7002_fmt_list[] = {
- {
- .index = 0,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .flags = 0,
- .description = "8-bit UYVY 4:2:2 Format",
- .pixelformat = V4L2_PIX_FMT_UYVY,
- },
-};
-
-#define NUM_FORMATS ARRAY_SIZE(tvp7002_fmt_list)
-
/* Preset definition for handling device operation */
struct tvp7002_preset_definition {
u32 preset;
@@ -439,7 +426,6 @@ struct tvp7002 {
int ver;
int streaming;
- struct v4l2_pix_format pix;
const struct tvp7002_preset_definition *current_preset;
u8 gain;
};
@@ -695,81 +681,33 @@ static int tvp7002_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
}
/*
- * tvp7002_try_fmt_cap() - V4L2 decoder interface handler for try_fmt
+ * tvp7002_mbus_fmt() - V4L2 decoder interface handler for try/s/g_mbus_fmt
* @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 VIDIOC_TRY_FMT ioctl structure
+ * @f: pointer to mediabus format structure
*
- * Implement the VIDIOC_TRY_FMT ioctl for the CAPTURE buffer type. This
- * ioctl is used to negotiate the image capture size and pixel format
- * without actually making it take effect.
+ * Negotiate the image capture size and mediabus format.
+ * There is only one possible format, so this single function works for
+ * get, set and try.
*/
-static int tvp7002_try_fmt_cap(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int tvp7002_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f)
{
struct tvp7002 *device = to_tvp7002(sd);
struct v4l2_dv_enum_preset e_preset;
- struct v4l2_pix_format *pix;
- int error = 0;
-
- pix = &f->fmt.pix;
+ int error;
/* Calculate height and width based on current standard */
error = v4l_fill_dv_preset_info(device->current_preset->preset, &e_preset);
if (error)
- return -EINVAL;
-
- pix->width = e_preset.width;
- pix->height = e_preset.height;
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->field = device->current_preset->scanmode;
- pix->bytesperline = pix->width * 2;
- pix->sizeimage = pix->bytesperline * pix->height;
- pix->colorspace = device->current_preset->color_space;
- pix->priv = 0;
-
- v4l2_dbg(1, debug, sd, "Try FMT: pixelformat - %s, bytesperline - %d"
- "Width - %d, Height - %d", "8-bit UYVY 4:2:2 Format",
- pix->bytesperline, pix->width, pix->height);
- return error;
-}
-
-/*
- * tvp7002_s_fmt() - V4L2 decoder interface handler for s_fmt
- * @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 VIDIOC_S_FMT ioctl structure
- *
- * If the requested format is supported, configures the HW to use that
- * format, returns error code if format not supported or HW can't be
- * correctly configured.
- */
-static int tvp7002_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
-{
- struct tvp7002 *decoder = to_tvp7002(sd);
- int rval;
-
- rval = tvp7002_try_fmt_cap(sd, f);
- if (!rval)
- decoder->pix = f->fmt.pix;
- return rval;
-}
-
-/*
- * tvp7002_g_fmt() - V4L2 decoder interface handler for tvp7002_g_fmt
- * @sd: pointer to standard V4L2 sub-device structure
- * @f: pointer to standard V4L2 v4l2_format structure
- *
- * Returns the decoder's current pixel format in the v4l2_format
- * parameter.
- */
-static int tvp7002_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
-{
- struct tvp7002 *decoder = to_tvp7002(sd);
+ return error;
- f->fmt.pix = decoder->pix;
+ f->width = e_preset.width;
+ f->height = e_preset.height;
+ f->code = V4L2_MBUS_FMT_YUYV10_1X20;
+ f->field = device->current_preset->scanmode;
+ f->colorspace = device->current_preset->color_space;
- v4l2_dbg(1, debug, sd, "Current FMT: bytesperline - %d"
- "Width - %d, Height - %d",
- decoder->pix.bytesperline,
- decoder->pix.width, decoder->pix.height);
+ v4l2_dbg(1, debug, sd, "MBUS_FMT: Width - %d, Height - %d",
+ f->width, f->height);
return 0;
}
@@ -894,21 +832,21 @@ static int tvp7002_s_register(struct v4l2_subdev *sd,
#endif
/*
- * tvp7002_enum_fmt() - Enum supported formats
+ * tvp7002_enum_mbus_fmt() - Enum supported mediabus formats
* @sd: pointer to standard V4L2 sub-device structure
- * @fmtdesc: pointer to format struct
+ * @index: format index
+ * @code: pointer to mediabus format
*
- * Enumerate supported formats.
+ * Enumerate supported mediabus formats.
*/
-static int tvp7002_enum_fmt(struct v4l2_subdev *sd,
- struct v4l2_fmtdesc *fmtdesc)
+static int tvp7002_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
+ enum v4l2_mbus_pixelcode *code)
{
/* Check requested format index is within range */
- if (fmtdesc->index < 0 || fmtdesc->index >= NUM_FORMATS)
+ if (index)
return -EINVAL;
- *fmtdesc = tvp7002_fmt_list[fmtdesc->index];
-
+ *code = V4L2_MBUS_FMT_YUYV10_1X20;
return 0;
}
@@ -1027,9 +965,10 @@ static const struct v4l2_subdev_video_ops tvp7002_video_ops = {
.s_dv_preset = tvp7002_s_dv_preset,
.query_dv_preset = tvp7002_query_dv_preset,
.s_stream = tvp7002_s_stream,
- .g_fmt = tvp7002_g_fmt,
- .s_fmt = tvp7002_s_fmt,
- .enum_fmt = tvp7002_enum_fmt,
+ .g_mbus_fmt = tvp7002_mbus_fmt,
+ .try_mbus_fmt = tvp7002_mbus_fmt,
+ .s_mbus_fmt = tvp7002_mbus_fmt,
+ .enum_mbus_fmt = tvp7002_enum_mbus_fmt,
};
/* V4L2 top level operation handlers */
@@ -1040,17 +979,6 @@ static const struct v4l2_subdev_ops tvp7002_ops = {
static struct tvp7002 tvp7002_dev = {
.streaming = 0,
-
- .pix = {
- .width = 1280,
- .height = 720,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_NONE,
- .bytesperline = 1280 * 2,
- .sizeimage = 1280 * 2 * 720,
- .colorspace = V4L2_COLORSPACE_REC709,
- },
-
.current_preset = tvp7002_presets,
.gain = 0,
};
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index a727962781a3..0347bbe36459 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -469,7 +469,7 @@ tw9910_select_norm(struct soc_camera_device *icd, u32 width, u32 height)
*/
static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
u8 val;
int ret;
@@ -511,7 +511,7 @@ static int tw9910_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 val = VSSL_VVALID | HSSL_DVALID;
/*
@@ -565,7 +565,7 @@ static int tw9910_enum_input(struct soc_camera_device *icd,
static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
id->ident = V4L2_IDENT_TW9910;
@@ -578,7 +578,7 @@ static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
static int tw9910_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
if (reg->reg > 0xff)
@@ -600,7 +600,7 @@ static int tw9910_g_register(struct v4l2_subdev *sd,
static int tw9910_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->reg > 0xff ||
reg->val > 0xff)
@@ -613,7 +613,7 @@ static int tw9910_s_register(struct v4l2_subdev *sd,
static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
struct soc_camera_device *icd = client->dev.platform_data;
int ret = -EINVAL;
@@ -701,7 +701,7 @@ tw9910_set_fmt_error:
static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
if (!priv->scale) {
@@ -748,7 +748,7 @@ static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int tw9910_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
if (!priv->scale) {
@@ -778,7 +778,7 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
static int tw9910_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
/* See tw9910_s_crop() - no proper cropping support */
struct v4l2_crop a = {
@@ -813,7 +813,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd,
static int tw9910_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = sd->priv;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct soc_camera_device *icd = client->dev.platform_data;
const struct tw9910_scale_ctrl *scale;
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
index 36c0c461d8be..f8138c75be8b 100644
--- a/drivers/media/video/upd64031a.c
+++ b/drivers/media/video/upd64031a.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/upd64031a.h>
/* --------------------- read registers functions define -------------------- */
@@ -262,9 +261,25 @@ static const struct i2c_device_id upd64031a_id[] = {
};
MODULE_DEVICE_TABLE(i2c, upd64031a_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "upd64031a",
- .probe = upd64031a_probe,
- .remove = upd64031a_remove,
- .id_table = upd64031a_id,
+static struct i2c_driver upd64031a_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "upd64031a",
+ },
+ .probe = upd64031a_probe,
+ .remove = upd64031a_remove,
+ .id_table = upd64031a_id,
};
+
+static __init int init_upd64031a(void)
+{
+ return i2c_add_driver(&upd64031a_driver);
+}
+
+static __exit void exit_upd64031a(void)
+{
+ i2c_del_driver(&upd64031a_driver);
+}
+
+module_init(init_upd64031a);
+module_exit(exit_upd64031a);
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index c5af93b30a2b..28e0e6b6ca84 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/upd64083.h>
MODULE_DESCRIPTION("uPD64083 driver");
@@ -234,9 +233,25 @@ static const struct i2c_device_id upd64083_id[] = {
};
MODULE_DEVICE_TABLE(i2c, upd64083_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "upd64083",
- .probe = upd64083_probe,
- .remove = upd64083_remove,
- .id_table = upd64083_id,
+static struct i2c_driver upd64083_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "upd64083",
+ },
+ .probe = upd64083_probe,
+ .remove = upd64083_remove,
+ .id_table = upd64083_id,
};
+
+static __init int init_upd64083(void)
+{
+ return i2c_add_driver(&upd64083_driver);
+}
+
+static __exit void exit_upd64083(void)
+{
+ i2c_del_driver(&upd64083_driver);
+}
+
+module_init(init_upd64083);
+module_exit(exit_upd64083);
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
index d6e16959f78b..dfa7fc68a657 100644
--- a/drivers/media/video/usbvideo/Kconfig
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -12,10 +12,13 @@ config USB_VICAM
module will be called vicam.
config USB_IBMCAM
- tristate "USB IBM (Xirlink) C-it Camera support"
+ tristate "USB IBM (Xirlink) C-it Camera support (DEPRECATED)"
depends on VIDEO_V4L1
select VIDEO_USBVIDEO
---help---
+ This driver is DEPRECATED please use the gspca xirlink_cit module
+ instead.
+
Say Y here if you want to connect a IBM "C-It" camera, also known as
"Xirlink PC Camera" to your computer's USB port.
@@ -27,10 +30,13 @@ config USB_IBMCAM
<file:Documentation/video4linux/ibmcam.txt> to learn more.
config USB_KONICAWC
- tristate "USB Konica Webcam support"
+ tristate "USB Konica Webcam support (DEPRECATED)"
depends on VIDEO_V4L1
select VIDEO_USBVIDEO
---help---
+ This driver is DEPRECATED (and known to crash) please use the
+ gspca konica module instead.
+
Say Y here if you want support for webcams based on a Konica
chipset. This is known to work with the Intel YC76 webcam.
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 42ba28785750..e3bbae26e3ce 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -211,6 +211,9 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
0x42 >> 1, 0x40 >> 1, /* SAA7114, SAA7115 and SAA7118 */
I2C_CLIENT_END };
+ if (usbvision->registered_i2c)
+ return 0;
+
memcpy(&usbvision->i2c_adap, &i2c_adap_template,
sizeof(struct i2c_adapter));
@@ -248,7 +251,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
hit-and-miss. */
mdelay(10);
v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap, "saa7115",
+ &usbvision->i2c_adap, NULL,
"saa7115_auto", 0, saa711x_addrs);
break;
}
@@ -258,16 +261,18 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
struct tuner_setup tun_setup;
sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap, "tuner",
+ &usbvision->i2c_adap, NULL,
"tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
/* depending on whether we found a demod or not, select
the tuner type. */
type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap, "tuner",
+ &usbvision->i2c_adap, NULL,
"tuner", 0, v4l2_i2c_tuner_addrs(type));
+ if (sd == NULL)
+ return -ENODEV;
if (usbvision->tuner_type != -1) {
tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
tun_setup.type = usbvision->tuner_type;
@@ -275,14 +280,18 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
call_all(usbvision, tuner, s_type_addr, &tun_setup);
}
}
+ usbvision->registered_i2c = 1;
return 0;
}
int usbvision_i2c_unregister(struct usb_usbvision *usbvision)
{
+ if (!usbvision->registered_i2c)
+ return 0;
i2c_del_adapter(&(usbvision->i2c_adap));
+ usbvision->registered_i2c = 0;
PDEBUG(DBG_I2C,"i2c bus for %s unregistered", usbvision->i2c_adap.name);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index c2690df33438..db6b828594f5 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -357,7 +357,7 @@ static int usbvision_v4l2_open(struct file *file)
PDEBUG(DBG_IO, "open");
- lock_kernel();
+ mutex_lock(&usbvision->lock);
usbvision_reset_powerOffTimer(usbvision);
if (usbvision->user)
@@ -379,7 +379,6 @@ static int usbvision_v4l2_open(struct file *file)
/* If so far no errors then we shall start the camera */
if (!errCode) {
- mutex_lock(&usbvision->lock);
if (usbvision->power == 0) {
usbvision_power_on(usbvision);
usbvision_i2c_register(usbvision);
@@ -408,14 +407,13 @@ static int usbvision_v4l2_open(struct file *file)
usbvision->initialized = 0;
}
}
- mutex_unlock(&usbvision->lock);
}
/* prepare queues */
usbvision_empty_framequeues(usbvision);
PDEBUG(DBG_IO, "success");
- unlock_kernel();
+ mutex_unlock(&usbvision->lock);
return errCode;
}
@@ -1645,8 +1643,8 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
usbvision->usb_bandwidth = 0;
usbvision->user = 0;
usbvision->streaming = Stream_Off;
- usbvision_register_video(usbvision);
usbvision_configure_video(usbvision);
+ usbvision_register_video(usbvision);
mutex_unlock(&usbvision->lock);
usbvision_create_sysfs(usbvision->vdev);
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index d1b3cc0cd87f..cc4e96c8cd6c 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -363,6 +363,7 @@ struct usb_usbvision {
/* i2c Declaration Section*/
struct i2c_adapter i2c_adap;
+ int registered_i2c;
struct urb *ctrlUrb;
unsigned char ctrlUrbBuffer[8];
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index a350fad0db43..f169f7736677 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1,8 +1,8 @@
/*
* uvc_ctrl.c -- USB Video Class driver - Controls
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -643,7 +643,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
static inline __u8 *uvc_ctrl_data(struct uvc_control *ctrl, int id)
{
- return ctrl->uvc_data + id * ctrl->info->size;
+ return ctrl->uvc_data + id * ctrl->info.size;
}
static inline int uvc_test_bit(const __u8 *data, int bit)
@@ -727,7 +727,8 @@ static const __u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
static const __u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
-static int uvc_entity_match_guid(struct uvc_entity *entity, __u8 guid[16])
+static int uvc_entity_match_guid(const struct uvc_entity *entity,
+ const __u8 guid[16])
{
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_ITT_CAMERA:
@@ -765,10 +766,10 @@ static void __uvc_find_control(struct uvc_entity *entity, __u32 v4l2_id,
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL)
+ if (!ctrl->initialized)
continue;
- list_for_each_entry(map, &ctrl->info->mappings, list) {
+ list_for_each_entry(map, &ctrl->info.mappings, list) {
if ((map->id == v4l2_id) && !next) {
*control = ctrl;
*mapping = map;
@@ -815,36 +816,36 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
{
int ret;
- if (ctrl->info->flags & UVC_CONTROL_GET_DEF) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_DEF) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_DEF, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_MIN) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_MIN) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_MIN, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_MAX) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_MAX) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_MAX, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_RES) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_RES) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_RES, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
@@ -862,9 +863,15 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
unsigned int i;
int ret;
+ ret = mutex_lock_interruptible(&chain->ctrl_mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
- if (ctrl == NULL)
- return -EINVAL;
+ if (ctrl == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl);
v4l2_ctrl->id = mapping->id;
@@ -872,18 +879,18 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name);
v4l2_ctrl->flags = 0;
- if (!(ctrl->info->flags & UVC_CONTROL_GET_CUR))
+ if (!(ctrl->info.flags & UVC_CONTROL_GET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- if (!(ctrl->info->flags & UVC_CONTROL_SET_CUR))
+ if (!(ctrl->info.flags & UVC_CONTROL_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
- return ret;
+ goto done;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_DEF) {
+ if (ctrl->info.flags & UVC_CONTROL_GET_DEF) {
v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF));
}
@@ -902,37 +909,39 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
}
- return 0;
+ goto done;
case V4L2_CTRL_TYPE_BOOLEAN:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 1;
v4l2_ctrl->step = 1;
- return 0;
+ goto done;
case V4L2_CTRL_TYPE_BUTTON:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 0;
v4l2_ctrl->step = 0;
- return 0;
+ goto done;
default:
break;
}
- if (ctrl->info->flags & UVC_CONTROL_GET_MIN)
+ if (ctrl->info.flags & UVC_CONTROL_GET_MIN)
v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
- if (ctrl->info->flags & UVC_CONTROL_GET_MAX)
+ if (ctrl->info.flags & UVC_CONTROL_GET_MAX)
v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
- if (ctrl->info->flags & UVC_CONTROL_GET_RES)
+ if (ctrl->info.flags & UVC_CONTROL_GET_RES)
v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
- return 0;
+done:
+ mutex_unlock(&chain->ctrl_mutex);
+ return ret;
}
@@ -977,14 +986,14 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL)
+ if (!ctrl->initialized)
continue;
/* Reset the loaded flag for auto-update controls that were
* marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent
* uvc_ctrl_get from using the cached value.
*/
- if (ctrl->info->flags & UVC_CONTROL_AUTO_UPDATE)
+ if (ctrl->info.flags & UVC_CONTROL_AUTO_UPDATE)
ctrl->loaded = 0;
if (!ctrl->dirty)
@@ -992,16 +1001,16 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
if (!rollback)
ret = uvc_query_ctrl(dev, UVC_SET_CUR, ctrl->entity->id,
- dev->intfnum, ctrl->info->selector,
+ dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
else
ret = 0;
if (rollback || ret < 0)
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
- ctrl->info->size);
+ ctrl->info.size);
ctrl->dirty = 0;
@@ -1039,14 +1048,14 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info->flags & UVC_CONTROL_GET_CUR) == 0)
+ if (ctrl == NULL || (ctrl->info.flags & UVC_CONTROL_GET_CUR) == 0)
return -EINVAL;
if (!ctrl->loaded) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info->selector,
+ chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
@@ -1081,7 +1090,7 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info->flags & UVC_CONTROL_SET_CUR) == 0)
+ if (ctrl == NULL || (ctrl->info.flags & UVC_CONTROL_SET_CUR) == 0)
return -EINVAL;
/* Clamp out of range values. */
@@ -1127,16 +1136,16 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
* needs to be loaded from the device to perform the read-modify-write
* operation.
*/
- if (!ctrl->loaded && (ctrl->info->size * 8) != mapping->size) {
- if ((ctrl->info->flags & UVC_CONTROL_GET_CUR) == 0) {
+ if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) {
+ if ((ctrl->info.flags & UVC_CONTROL_GET_CUR) == 0) {
memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- 0, ctrl->info->size);
+ 0, ctrl->info.size);
} else {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
ctrl->entity->id, chain->dev->intfnum,
- ctrl->info->selector,
+ ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
if (ret < 0)
return ret;
}
@@ -1148,7 +1157,7 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
if (!ctrl->dirty) {
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info->size);
+ ctrl->info.size);
}
mapping->set(mapping, value,
@@ -1163,12 +1172,138 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
* Dynamic controls
*/
+static void uvc_ctrl_fixup_xu_info(struct uvc_device *dev,
+ const struct uvc_control *ctrl, struct uvc_control_info *info)
+{
+ struct uvc_ctrl_fixup {
+ struct usb_device_id id;
+ u8 entity;
+ u8 selector;
+ u8 flags;
+ };
+
+ static const struct uvc_ctrl_fixup fixups[] = {
+ { { USB_DEVICE(0x046d, 0x08c2) }, 9, 1,
+ UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
+ UVC_CONTROL_GET_DEF | UVC_CONTROL_SET_CUR |
+ UVC_CONTROL_AUTO_UPDATE },
+ { { USB_DEVICE(0x046d, 0x08cc) }, 9, 1,
+ UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
+ UVC_CONTROL_GET_DEF | UVC_CONTROL_SET_CUR |
+ UVC_CONTROL_AUTO_UPDATE },
+ { { USB_DEVICE(0x046d, 0x0994) }, 9, 1,
+ UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
+ UVC_CONTROL_GET_DEF | UVC_CONTROL_SET_CUR |
+ UVC_CONTROL_AUTO_UPDATE },
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fixups); ++i) {
+ if (!usb_match_one_id(dev->intf, &fixups[i].id))
+ continue;
+
+ if (fixups[i].entity == ctrl->entity->id &&
+ fixups[i].selector == info->selector) {
+ info->flags = fixups[i].flags;
+ return;
+ }
+ }
+}
+
+/*
+ * Query control information (size and flags) for XU controls.
+ */
+static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
+ const struct uvc_control *ctrl, struct uvc_control_info *info)
+{
+ u8 *data;
+ int ret;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ memcpy(info->entity, ctrl->entity->extension.guidExtensionCode,
+ sizeof(info->entity));
+ info->index = ctrl->index;
+ info->selector = ctrl->index + 1;
+
+ /* Query and verify the control length (GET_LEN) */
+ ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id, dev->intfnum,
+ info->selector, data, 2);
+ if (ret < 0) {
+ uvc_trace(UVC_TRACE_CONTROL,
+ "GET_LEN failed on control %pUl/%u (%d).\n",
+ info->entity, info->selector, ret);
+ goto done;
+ }
+
+ info->size = le16_to_cpup((__le16 *)data);
+
+ /* Query the control information (GET_INFO) */
+ ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum,
+ info->selector, data, 1);
+ if (ret < 0) {
+ uvc_trace(UVC_TRACE_CONTROL,
+ "GET_INFO failed on control %pUl/%u (%d).\n",
+ info->entity, info->selector, ret);
+ goto done;
+ }
+
+ info->flags = UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX
+ | UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF
+ | (data[0] & UVC_CONTROL_CAP_GET ? UVC_CONTROL_GET_CUR : 0)
+ | (data[0] & UVC_CONTROL_CAP_SET ? UVC_CONTROL_SET_CUR : 0)
+ | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
+ UVC_CONTROL_AUTO_UPDATE : 0);
+
+ uvc_ctrl_fixup_xu_info(dev, ctrl, info);
+
+ uvc_trace(UVC_TRACE_CONTROL, "XU control %pUl/%u queried: len %u, "
+ "flags { get %u set %u auto %u }.\n",
+ info->entity, info->selector, info->size,
+ (info->flags & UVC_CONTROL_GET_CUR) ? 1 : 0,
+ (info->flags & UVC_CONTROL_SET_CUR) ? 1 : 0,
+ (info->flags & UVC_CONTROL_AUTO_UPDATE) ? 1 : 0);
+
+done:
+ kfree(data);
+ return ret;
+}
+
+static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
+ const struct uvc_control_info *info);
+
+static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev,
+ struct uvc_control *ctrl)
+{
+ struct uvc_control_info info;
+ int ret;
+
+ if (ctrl->initialized)
+ return 0;
+
+ ret = uvc_ctrl_fill_xu_info(dev, ctrl, &info);
+ if (ret < 0)
+ return ret;
+
+ ret = uvc_ctrl_add_info(dev, ctrl, &info);
+ if (ret < 0)
+ uvc_trace(UVC_TRACE_CONTROL, "Failed to initialize control "
+ "%pUl/%u on device %s entity %u\n", info.entity,
+ info.selector, dev->udev->devpath, ctrl->entity->id);
+
+ return ret;
+}
+
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control *xctrl, int set)
{
struct uvc_entity *entity;
struct uvc_control *ctrl = NULL;
unsigned int i, found = 0;
+ int restore = 0;
__u8 *data;
int ret;
@@ -1185,13 +1320,10 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
return -EINVAL;
}
- /* Find the control. */
+ /* Find the control and perform delayed initialization if needed. */
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL)
- continue;
-
- if (ctrl->info->selector == xctrl->selector) {
+ if (ctrl->index == xctrl->selector - 1) {
found = 1;
break;
}
@@ -1203,40 +1335,48 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
return -EINVAL;
}
- /* Validate control data size. */
- if (ctrl->info->size != xctrl->size)
- return -EINVAL;
-
- if ((set && !(ctrl->info->flags & UVC_CONTROL_SET_CUR)) ||
- (!set && !(ctrl->info->flags & UVC_CONTROL_GET_CUR)))
- return -EINVAL;
-
if (mutex_lock_interruptible(&chain->ctrl_mutex))
return -ERESTARTSYS;
+ ret = uvc_ctrl_init_xu_ctrl(chain->dev, ctrl);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ /* Validate control data size. */
+ if (ctrl->info.size != xctrl->size) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((set && !(ctrl->info.flags & UVC_CONTROL_SET_CUR)) ||
+ (!set && !(ctrl->info.flags & UVC_CONTROL_GET_CUR))) {
+ ret = -EINVAL;
+ goto done;
+ }
+
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- xctrl->size);
+ ctrl->info.size);
data = uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT);
+ restore = set;
if (set && copy_from_user(data, xctrl->data, xctrl->size)) {
ret = -EFAULT;
- goto out;
+ goto done;
}
ret = uvc_query_ctrl(chain->dev, set ? UVC_SET_CUR : UVC_GET_CUR,
xctrl->unit, chain->dev->intfnum, xctrl->selector,
data, xctrl->size);
if (ret < 0)
- goto out;
+ goto done;
- if (!set && copy_to_user(xctrl->data, data, xctrl->size)) {
+ if (!set && copy_to_user(xctrl->data, data, xctrl->size))
ret = -EFAULT;
- goto out;
- }
-
-out:
- if (ret)
+done:
+ if (ret && restore)
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
xctrl->size);
@@ -1271,13 +1411,13 @@ int uvc_ctrl_resume_device(struct uvc_device *dev)
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
- if (ctrl->info == NULL || !ctrl->modified ||
- (ctrl->info->flags & UVC_CONTROL_RESTORE) == 0)
+ if (!ctrl->initialized || !ctrl->modified ||
+ (ctrl->info.flags & UVC_CONTROL_RESTORE) == 0)
continue;
printk(KERN_INFO "restoring control %pUl/%u/%u\n",
- ctrl->info->entity, ctrl->info->index,
- ctrl->info->selector);
+ ctrl->info.entity, ctrl->info.index,
+ ctrl->info.selector);
ctrl->dirty = 1;
}
@@ -1293,201 +1433,150 @@ int uvc_ctrl_resume_device(struct uvc_device *dev)
* Control and mapping handling
*/
-static int uvc_ctrl_add_ctrl(struct uvc_device *dev,
- struct uvc_control_info *info)
+/*
+ * Add control information to a given control.
+ */
+static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
+ const struct uvc_control_info *info)
{
- struct uvc_entity *entity;
- struct uvc_control *ctrl = NULL;
- int ret = 0, found = 0;
- unsigned int i;
- u8 *uvc_info;
- u8 *uvc_data;
-
- list_for_each_entry(entity, &dev->entities, list) {
- if (!uvc_entity_match_guid(entity, info->entity))
- continue;
-
- for (i = 0; i < entity->ncontrols; ++i) {
- ctrl = &entity->controls[i];
- if (ctrl->index == info->index) {
- found = 1;
- break;
- }
- }
-
- if (found)
- break;
- }
-
- if (!found)
- return 0;
-
- uvc_data = kmalloc(info->size * UVC_CTRL_DATA_LAST + 1, GFP_KERNEL);
- if (uvc_data == NULL)
- return -ENOMEM;
-
- uvc_info = uvc_data + info->size * UVC_CTRL_DATA_LAST;
-
- if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) {
- /* Check if the device control information and length match
- * the user supplied information.
- */
- ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id,
- dev->intfnum, info->selector, uvc_data, 2);
- if (ret < 0) {
- uvc_trace(UVC_TRACE_CONTROL,
- "GET_LEN failed on control %pUl/%u (%d).\n",
- info->entity, info->selector, ret);
- goto done;
- }
-
- if (info->size != le16_to_cpu(*(__le16 *)uvc_data)) {
- uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u size "
- "doesn't match user supplied value.\n",
- info->entity, info->selector);
- ret = -EINVAL;
- goto done;
- }
+ int ret = 0;
- ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id,
- dev->intfnum, info->selector, uvc_info, 1);
- if (ret < 0) {
- uvc_trace(UVC_TRACE_CONTROL,
- "GET_INFO failed on control %pUl/%u (%d).\n",
- info->entity, info->selector, ret);
- goto done;
- }
+ memcpy(&ctrl->info, info, sizeof(*info));
+ INIT_LIST_HEAD(&ctrl->info.mappings);
- if (((info->flags & UVC_CONTROL_GET_CUR) &&
- !(*uvc_info & UVC_CONTROL_CAP_GET)) ||
- ((info->flags & UVC_CONTROL_SET_CUR) &&
- !(*uvc_info & UVC_CONTROL_CAP_SET))) {
- uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u flags "
- "don't match supported operations.\n",
- info->entity, info->selector);
- ret = -EINVAL;
- goto done;
- }
+ /* Allocate an array to save control values (cur, def, max, etc.) */
+ ctrl->uvc_data = kzalloc(ctrl->info.size * UVC_CTRL_DATA_LAST + 1,
+ GFP_KERNEL);
+ if (ctrl->uvc_data == NULL) {
+ ret = -ENOMEM;
+ goto done;
}
- ctrl->info = info;
- ctrl->uvc_data = uvc_data;
- ctrl->uvc_info = uvc_info;
+ ctrl->initialized = 1;
uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s "
- "entity %u\n", ctrl->info->entity, ctrl->info->selector,
- dev->udev->devpath, entity->id);
+ "entity %u\n", ctrl->info.entity, ctrl->info.selector,
+ dev->udev->devpath, ctrl->entity->id);
done:
if (ret < 0)
- kfree(uvc_data);
-
+ kfree(ctrl->uvc_data);
return ret;
}
/*
- * Add an item to the UVC control information list, and instantiate a control
- * structure for each device that supports the control.
+ * Add a control mapping to a given control.
*/
-int uvc_ctrl_add_info(struct uvc_control_info *info)
+static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
+ struct uvc_control *ctrl, const struct uvc_control_mapping *mapping)
{
- struct uvc_control_info *ctrl;
- struct uvc_device *dev;
- int ret = 0;
-
- /* Find matching controls by walking the devices, entities and
- * controls list.
- */
- mutex_lock(&uvc_driver.ctrl_mutex);
+ struct uvc_control_mapping *map;
+ unsigned int size;
- /* First check if the list contains a control matching the new one.
- * Bail out if it does.
+ /* Most mappings come from static kernel data and need to be duplicated.
+ * Mappings that come from userspace will be unnecessarily duplicated,
+ * this could be optimized.
*/
- list_for_each_entry(ctrl, &uvc_driver.controls, list) {
- if (memcmp(ctrl->entity, info->entity, 16))
- continue;
+ map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
- if (ctrl->selector == info->selector) {
- uvc_trace(UVC_TRACE_CONTROL,
- "Control %pUl/%u is already defined.\n",
- info->entity, info->selector);
- ret = -EEXIST;
- goto end;
- }
- if (ctrl->index == info->index) {
- uvc_trace(UVC_TRACE_CONTROL,
- "Control %pUl/%u would overwrite index %d.\n",
- info->entity, info->selector, info->index);
- ret = -EEXIST;
- goto end;
- }
+ size = sizeof(*mapping->menu_info) * mapping->menu_count;
+ map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
+ if (map->menu_info == NULL) {
+ kfree(map);
+ return -ENOMEM;
}
- list_for_each_entry(dev, &uvc_driver.devices, list)
- uvc_ctrl_add_ctrl(dev, info);
+ if (map->get == NULL)
+ map->get = uvc_get_le_value;
+ if (map->set == NULL)
+ map->set = uvc_set_le_value;
- INIT_LIST_HEAD(&info->mappings);
- list_add_tail(&info->list, &uvc_driver.controls);
-end:
- mutex_unlock(&uvc_driver.ctrl_mutex);
- return ret;
+ map->ctrl = &ctrl->info;
+ list_add_tail(&map->list, &ctrl->info.mappings);
+ uvc_trace(UVC_TRACE_CONTROL,
+ "Adding mapping '%s' to control %pUl/%u.\n",
+ map->name, ctrl->info.entity, ctrl->info.selector);
+
+ return 0;
}
-int uvc_ctrl_add_mapping(struct uvc_control_mapping *mapping)
+int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ const struct uvc_control_mapping *mapping)
{
- struct uvc_control_info *info;
+ struct uvc_device *dev = chain->dev;
struct uvc_control_mapping *map;
- int ret = -EINVAL;
-
- if (mapping->get == NULL)
- mapping->get = uvc_get_le_value;
- if (mapping->set == NULL)
- mapping->set = uvc_set_le_value;
+ struct uvc_entity *entity;
+ struct uvc_control *ctrl;
+ int found = 0;
+ int ret;
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
- uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s' with "
- "invalid control id 0x%08x\n", mapping->name,
+ uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', control "
+ "id 0x%08x is invalid.\n", mapping->name,
mapping->id);
return -EINVAL;
}
- mutex_lock(&uvc_driver.ctrl_mutex);
- list_for_each_entry(info, &uvc_driver.controls, list) {
- if (memcmp(info->entity, mapping->entity, 16) ||
- info->selector != mapping->selector)
- continue;
+ /* Search for the matching (GUID/CS) control in the given device */
+ list_for_each_entry(entity, &dev->entities, list) {
+ unsigned int i;
- if (info->size * 8 < mapping->size + mapping->offset) {
- uvc_trace(UVC_TRACE_CONTROL,
- "Mapping '%s' would overflow control %pUl/%u\n",
- mapping->name, info->entity, info->selector);
- ret = -EOVERFLOW;
- goto end;
- }
+ if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT ||
+ !uvc_entity_match_guid(entity, mapping->entity))
+ continue;
- /* Check if the list contains a mapping matching the new one.
- * Bail out if it does.
- */
- list_for_each_entry(map, &info->mappings, list) {
- if (map->id == mapping->id) {
- uvc_trace(UVC_TRACE_CONTROL, "Mapping '%s' is "
- "already defined.\n", mapping->name);
- ret = -EEXIST;
- goto end;
+ for (i = 0; i < entity->ncontrols; ++i) {
+ ctrl = &entity->controls[i];
+ if (ctrl->index == mapping->selector - 1) {
+ found = 1;
+ break;
}
}
- mapping->ctrl = info;
- list_add_tail(&mapping->list, &info->mappings);
- uvc_trace(UVC_TRACE_CONTROL,
- "Adding mapping %s to control %pUl/%u.\n",
- mapping->name, info->entity, info->selector);
+ if (found)
+ break;
+ }
+ if (!found)
+ return -ENOENT;
- ret = 0;
- break;
+ if (mutex_lock_interruptible(&chain->ctrl_mutex))
+ return -ERESTARTSYS;
+
+ /* Perform delayed initialization of XU controls */
+ ret = uvc_ctrl_init_xu_ctrl(dev, ctrl);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto done;
}
-end:
- mutex_unlock(&uvc_driver.ctrl_mutex);
+
+ list_for_each_entry(map, &ctrl->info.mappings, list) {
+ if (mapping->id == map->id) {
+ uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
+ "control id 0x%08x already exists.\n",
+ mapping->name, mapping->id);
+ ret = -EEXIST;
+ goto done;
+ }
+ }
+
+ /* Prevent excess memory consumption */
+ if (atomic_inc_return(&dev->nmappings) > UVC_MAX_CONTROL_MAPPINGS) {
+ atomic_dec(&dev->nmappings);
+ uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', maximum "
+ "mappings count (%u) exceeded.\n", mapping->name,
+ UVC_MAX_CONTROL_MAPPINGS);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ if (ret < 0)
+ atomic_dec(&dev->nmappings);
+
+done:
+ mutex_unlock(&chain->ctrl_mutex);
return ret;
}
@@ -1496,29 +1585,49 @@ end:
* are currently the ones that crash the camera or unconditionally return an
* error when queried.
*/
-static void
-uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
+static void uvc_ctrl_prune_entity(struct uvc_device *dev,
+ struct uvc_entity *entity)
{
- static const struct {
+ struct uvc_ctrl_blacklist {
struct usb_device_id id;
u8 index;
- } blacklist[] = {
+ };
+
+ static const struct uvc_ctrl_blacklist processing_blacklist[] = {
{ { USB_DEVICE(0x13d3, 0x509b) }, 9 }, /* Gain */
{ { USB_DEVICE(0x1c4f, 0x3000) }, 6 }, /* WB Temperature */
{ { USB_DEVICE(0x5986, 0x0241) }, 2 }, /* Hue */
};
+ static const struct uvc_ctrl_blacklist camera_blacklist[] = {
+ { { USB_DEVICE(0x06f8, 0x3005) }, 9 }, /* Zoom, Absolute */
+ };
- u8 *controls;
+ const struct uvc_ctrl_blacklist *blacklist;
unsigned int size;
+ unsigned int count;
unsigned int i;
+ u8 *controls;
- if (UVC_ENTITY_TYPE(entity) != UVC_VC_PROCESSING_UNIT)
- return;
+ switch (UVC_ENTITY_TYPE(entity)) {
+ case UVC_VC_PROCESSING_UNIT:
+ blacklist = processing_blacklist;
+ count = ARRAY_SIZE(processing_blacklist);
+ controls = entity->processing.bmControls;
+ size = entity->processing.bControlSize;
+ break;
- controls = entity->processing.bmControls;
- size = entity->processing.bControlSize;
+ case UVC_ITT_CAMERA:
+ blacklist = camera_blacklist;
+ count = ARRAY_SIZE(camera_blacklist);
+ controls = entity->camera.bmControls;
+ size = entity->camera.bControlSize;
+ break;
- for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
+ default:
+ return;
+ }
+
+ for (i = 0; i < count; ++i) {
if (!usb_match_one_id(dev->intf, &blacklist[i].id))
continue;
@@ -1534,17 +1643,54 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
}
/*
+ * Add control information and hardcoded stock control mappings to the given
+ * device.
+ */
+static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
+{
+ const struct uvc_control_info *info = uvc_ctrls;
+ const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
+ const struct uvc_control_mapping *mapping = uvc_ctrl_mappings;
+ const struct uvc_control_mapping *mend =
+ mapping + ARRAY_SIZE(uvc_ctrl_mappings);
+
+ /* XU controls initialization requires querying the device for control
+ * information. As some buggy UVC devices will crash when queried
+ * repeatedly in a tight loop, delay XU controls initialization until
+ * first use.
+ */
+ if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT)
+ return;
+
+ for (; info < iend; ++info) {
+ if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
+ ctrl->index == info->index) {
+ uvc_ctrl_add_info(dev, ctrl, info);
+ break;
+ }
+ }
+
+ if (!ctrl->initialized)
+ return;
+
+ for (; mapping < mend; ++mapping) {
+ if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
+ ctrl->info.selector == mapping->selector)
+ __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ }
+}
+
+/*
* Initialize device controls.
*/
int uvc_ctrl_init_device(struct uvc_device *dev)
{
- struct uvc_control_info *info;
- struct uvc_control *ctrl;
struct uvc_entity *entity;
unsigned int i;
/* Walk the entities list and instantiate controls */
list_for_each_entry(entity, &dev->entities, list) {
+ struct uvc_control *ctrl;
unsigned int bControlSize = 0, ncontrols = 0;
__u8 *bmControls = NULL;
@@ -1559,20 +1705,22 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
bControlSize = entity->camera.bControlSize;
}
+ /* Remove bogus/blacklisted controls */
uvc_ctrl_prune_entity(dev, entity);
+ /* Count supported controls and allocate the controls array */
for (i = 0; i < bControlSize; ++i)
ncontrols += hweight8(bmControls[i]);
-
if (ncontrols == 0)
continue;
- entity->controls = kzalloc(ncontrols*sizeof *ctrl, GFP_KERNEL);
+ entity->controls = kzalloc(ncontrols * sizeof(*ctrl),
+ GFP_KERNEL);
if (entity->controls == NULL)
return -ENOMEM;
-
entity->ncontrols = ncontrols;
+ /* Initialize all supported controls */
ctrl = entity->controls;
for (i = 0; i < bControlSize * 8; ++i) {
if (uvc_test_bit(bmControls, i) == 0)
@@ -1580,81 +1728,47 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
ctrl->entity = entity;
ctrl->index = i;
+
+ uvc_ctrl_init_ctrl(dev, ctrl);
ctrl++;
}
}
- /* Walk the controls info list and associate them with the device
- * controls, then add the device to the global device list. This has
- * to be done while holding the controls lock, to make sure
- * uvc_ctrl_add_info() will not get called in-between.
- */
- mutex_lock(&uvc_driver.ctrl_mutex);
- list_for_each_entry(info, &uvc_driver.controls, list)
- uvc_ctrl_add_ctrl(dev, info);
-
- list_add_tail(&dev->list, &uvc_driver.devices);
- mutex_unlock(&uvc_driver.ctrl_mutex);
-
return 0;
}
/*
* Cleanup device controls.
*/
-void uvc_ctrl_cleanup_device(struct uvc_device *dev)
+static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
+ struct uvc_control *ctrl)
{
- struct uvc_entity *entity;
- unsigned int i;
+ struct uvc_control_mapping *mapping, *nm;
- /* Remove the device from the global devices list */
- mutex_lock(&uvc_driver.ctrl_mutex);
- if (dev->list.next != NULL)
- list_del(&dev->list);
- mutex_unlock(&uvc_driver.ctrl_mutex);
-
- list_for_each_entry(entity, &dev->entities, list) {
- for (i = 0; i < entity->ncontrols; ++i)
- kfree(entity->controls[i].uvc_data);
-
- kfree(entity->controls);
+ list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
+ list_del(&mapping->list);
+ kfree(mapping->menu_info);
+ kfree(mapping);
}
}
-void uvc_ctrl_cleanup(void)
+void uvc_ctrl_cleanup_device(struct uvc_device *dev)
{
- struct uvc_control_info *info;
- struct uvc_control_info *ni;
- struct uvc_control_mapping *mapping;
- struct uvc_control_mapping *nm;
+ struct uvc_entity *entity;
+ unsigned int i;
- list_for_each_entry_safe(info, ni, &uvc_driver.controls, list) {
- if (!(info->flags & UVC_CONTROL_EXTENSION))
- continue;
+ /* Free controls and control mappings for all entities. */
+ list_for_each_entry(entity, &dev->entities, list) {
+ for (i = 0; i < entity->ncontrols; ++i) {
+ struct uvc_control *ctrl = &entity->controls[i];
- list_for_each_entry_safe(mapping, nm, &info->mappings, list) {
- list_del(&mapping->list);
- kfree(mapping->menu_info);
- kfree(mapping);
+ if (!ctrl->initialized)
+ continue;
+
+ uvc_ctrl_cleanup_mappings(dev, ctrl);
+ kfree(ctrl->uvc_data);
}
- list_del(&info->list);
- kfree(info);
+ kfree(entity->controls);
}
}
-
-void uvc_ctrl_init(void)
-{
- struct uvc_control_info *ctrl = uvc_ctrls;
- struct uvc_control_info *cend = ctrl + ARRAY_SIZE(uvc_ctrls);
- struct uvc_control_mapping *mapping = uvc_ctrl_mappings;
- struct uvc_control_mapping *mend =
- mapping + ARRAY_SIZE(uvc_ctrl_mappings);
-
- for (; ctrl < cend; ++ctrl)
- uvc_ctrl_add_info(ctrl);
-
- for (; mapping < mend; ++mapping)
- uvc_ctrl_add_mapping(mapping);
-}
-
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 2ac85d8984f0..a1e9dfb52f69 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1,8 +1,8 @@
/*
* uvc_driver.c -- USB Video Class driver
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,11 +38,9 @@
#include "uvcvideo.h"
-#define DRIVER_AUTHOR "Laurent Pinchart <laurent.pinchart@skynet.be>"
+#define DRIVER_AUTHOR "Laurent Pinchart " \
+ "<laurent.pinchart@ideasonboard.com>"
#define DRIVER_DESC "USB Video Class driver"
-#ifndef DRIVER_VERSION
-#define DRIVER_VERSION "v0.1.0"
-#endif
unsigned int uvc_clock_param = CLOCK_MONOTONIC;
unsigned int uvc_no_drop_param;
@@ -1762,6 +1760,7 @@ static int uvc_probe(struct usb_interface *intf,
INIT_LIST_HEAD(&dev->streams);
atomic_set(&dev->nstreams, 0);
atomic_set(&dev->users, 0);
+ atomic_set(&dev->nmappings, 0);
dev->udev = usb_get_dev(udev);
dev->intf = usb_get_intf(intf);
@@ -1820,6 +1819,7 @@ static int uvc_probe(struct usb_interface *intf,
}
uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
+ usb_enable_autosuspend(udev);
return 0;
error:
@@ -2287,12 +2287,6 @@ static int __init uvc_init(void)
{
int result;
- INIT_LIST_HEAD(&uvc_driver.devices);
- INIT_LIST_HEAD(&uvc_driver.controls);
- mutex_init(&uvc_driver.ctrl_mutex);
-
- uvc_ctrl_init();
-
result = usb_register(&uvc_driver.driver);
if (result == 0)
printk(KERN_INFO DRIVER_DESC " (" DRIVER_VERSION ")\n");
@@ -2302,7 +2296,6 @@ static int __init uvc_init(void)
static void __exit uvc_cleanup(void)
{
usb_deregister(&uvc_driver.driver);
- uvc_ctrl_cleanup();
}
module_init(uvc_init);
diff --git a/drivers/media/video/uvc/uvc_isight.c b/drivers/media/video/uvc/uvc_isight.c
index a9285b570dbe..74bbe8f18f3e 100644
--- a/drivers/media/video/uvc/uvc_isight.c
+++ b/drivers/media/video/uvc/uvc_isight.c
@@ -4,7 +4,7 @@
* Copyright (C) 2006-2007
* Ivan N. Zlatev <contact@i-nz.net>
* Copyright (C) 2008-2009
- * Laurent Pinchart <laurent.pinchart@skynet.be>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index e9928a415086..ed6d5449741c 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -1,8 +1,8 @@
/*
* uvc_queue.c -- USB Video Class driver - Buffers management
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -135,7 +135,6 @@ int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
queue->buffer[i].buf.m.offset = i * bufsize;
queue->buffer[i].buf.length = buflength;
queue->buffer[i].buf.type = queue->type;
- queue->buffer[i].buf.sequence = 0;
queue->buffer[i].buf.field = V4L2_FIELD_NONE;
queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
queue->buffer[i].buf.flags = 0;
@@ -410,8 +409,7 @@ done:
* state can be properly initialized before buffers are accessed from the
* interrupt handler.
*
- * Enabling the video queue initializes parameters (such as sequence number,
- * sync pattern, ...). If the queue is already enabled, return -EBUSY.
+ * Enabling the video queue returns -EBUSY if the queue is already enabled.
*
* Disabling the video queue cancels the queue and removes all buffers from
* the main queue.
@@ -430,7 +428,6 @@ int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
ret = -EBUSY;
goto done;
}
- queue->sequence = 0;
queue->flags |= UVC_QUEUE_STREAMING;
queue->buf_used = 0;
} else {
@@ -510,8 +507,6 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
nextbuf = NULL;
spin_unlock_irqrestore(&queue->irqlock, flags);
- buf->buf.sequence = queue->sequence++;
-
wake_up(&buf->wait);
return nextbuf;
}
diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c
index 85019bdacdf7..b7492775e6ae 100644
--- a/drivers/media/video/uvc/uvc_status.c
+++ b/drivers/media/video/uvc/uvc_status.c
@@ -1,8 +1,8 @@
/*
* uvc_status.c -- USB Video Class driver - Status endpoint
*
- * Copyright (C) 2007-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2009
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 86db32697b80..6d15de9b5204 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -1,8 +1,8 @@
/*
* uvc_v4l2.c -- USB Video Class driver - V4L2 API
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +31,8 @@
/* ------------------------------------------------------------------------
* UVC ioctls
*/
-static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old)
+static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
+ struct uvc_xu_control_mapping *xmap, int old)
{
struct uvc_control_mapping *map;
unsigned int size;
@@ -58,6 +59,8 @@ static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old)
case V4L2_CTRL_TYPE_MENU:
if (old) {
+ uvc_trace(UVC_TRACE_CONTROL, "V4L2_CTRL_TYPE_MENU not "
+ "supported for UVCIOC_CTRL_MAP_OLD.\n");
ret = -EINVAL;
goto done;
}
@@ -78,17 +81,17 @@ static int uvc_ioctl_ctrl_map(struct uvc_xu_control_mapping *xmap, int old)
break;
default:
+ uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
+ "%u.\n", xmap->v4l2_type);
ret = -EINVAL;
goto done;
}
- ret = uvc_ctrl_add_mapping(map);
+ ret = uvc_ctrl_add_mapping(chain, map);
done:
- if (ret < 0) {
- kfree(map->menu_info);
- kfree(map);
- }
+ kfree(map->menu_info);
+ kfree(map);
return ret;
}
@@ -1021,42 +1024,13 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Dynamic controls. */
case UVCIOC_CTRL_ADD:
- {
- struct uvc_xu_control_info *xinfo = arg;
- struct uvc_control_info *info;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (xinfo->size == 0)
- return -EINVAL;
-
- info = kzalloc(sizeof *info, GFP_KERNEL);
- if (info == NULL)
- return -ENOMEM;
-
- memcpy(info->entity, xinfo->entity, sizeof info->entity);
- info->index = xinfo->index;
- info->selector = xinfo->selector;
- info->size = xinfo->size;
- info->flags = xinfo->flags;
-
- info->flags |= UVC_CONTROL_GET_MIN | UVC_CONTROL_GET_MAX |
- UVC_CONTROL_GET_RES | UVC_CONTROL_GET_DEF |
- UVC_CONTROL_EXTENSION;
-
- ret = uvc_ctrl_add_info(info);
- if (ret < 0)
- kfree(info);
- break;
- }
+ /* Legacy ioctl, kept for API compatibility reasons */
+ return -EEXIST;
case UVCIOC_CTRL_MAP_OLD:
case UVCIOC_CTRL_MAP:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- return uvc_ioctl_ctrl_map(arg, cmd == UVCIOC_CTRL_MAP_OLD);
+ return uvc_ioctl_ctrl_map(chain, arg,
+ cmd == UVCIOC_CTRL_MAP_OLD);
case UVCIOC_CTRL_GET:
return uvc_xu_ctrl_query(chain, arg, 0);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index e27cf0d3b6d9..5555f0102838 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1,8 +1,8 @@
/*
* uvc_video.c -- USB Video Class driver - Video handling
*
- * Copyright (C) 2005-2009
- * Laurent Pinchart (laurent.pinchart@skynet.be)
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -45,6 +45,30 @@ static int __uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
unit << 8 | intfnum, data, size, timeout);
}
+static const char *uvc_query_name(__u8 query)
+{
+ switch (query) {
+ case UVC_SET_CUR:
+ return "SET_CUR";
+ case UVC_GET_CUR:
+ return "GET_CUR";
+ case UVC_GET_MIN:
+ return "GET_MIN";
+ case UVC_GET_MAX:
+ return "GET_MAX";
+ case UVC_GET_RES:
+ return "GET_RES";
+ case UVC_GET_LEN:
+ return "GET_LEN";
+ case UVC_GET_INFO:
+ return "GET_INFO";
+ case UVC_GET_DEF:
+ return "GET_DEF";
+ default:
+ return "<invalid>";
+ }
+}
+
int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
__u8 intfnum, __u8 cs, void *data, __u16 size)
{
@@ -53,9 +77,9 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
UVC_CTRL_CONTROL_TIMEOUT);
if (ret != size) {
- uvc_printk(KERN_ERR, "Failed to query (%u) UVC control %u "
- "(unit %u) : %d (exp. %u).\n", query, cs, unit, ret,
- size);
+ uvc_printk(KERN_ERR, "Failed to query (%s) UVC control %u on "
+ "unit %u: %d (exp. %u).\n", uvc_query_name(query), cs,
+ unit, ret, size);
return -EIO;
}
@@ -114,6 +138,15 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
bandwidth /= 8;
bandwidth += 12;
+ /* The bandwidth estimate is too low for many cameras. Don't use
+ * maximum packet sizes lower than 1024 bytes to try and work
+ * around the problem. According to measurements done on two
+ * different camera models, the value is high enough to get most
+ * resolutions working while not preventing two simultaneous
+ * VGA streams at 15 fps.
+ */
+ bandwidth = max_t(u32, bandwidth, 1024);
+
ctrl->dwMaxPayloadTransferSize = bandwidth;
}
}
@@ -394,6 +427,12 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
fid = data[1] & UVC_STREAM_FID;
+ /* Increase the sequence number regardless of any buffer states, so
+ * that discontinuous sequence numbers always indicate lost frames.
+ */
+ if (stream->last_fid != fid)
+ stream->sequence++;
+
/* Store the payload FID bit and return immediately when the buffer is
* NULL.
*/
@@ -427,6 +466,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
else
ktime_get_real_ts(&ts);
+ buf->buf.sequence = stream->sequence;
buf->buf.timestamp.tv_sec = ts.tv_sec;
buf->buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -688,6 +728,7 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
if (buf->buf.bytesused == stream->queue.buf_used) {
stream->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_READY;
+ buf->buf.sequence = ++stream->sequence;
uvc_queue_next_buffer(&stream->queue, buf);
stream->last_fid ^= UVC_STREAM_FID;
}
@@ -946,6 +987,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
unsigned int i;
int ret;
+ stream->sequence = -1;
stream->last_fid = -1;
stream->bulk.header_size = 0;
stream->bulk.skip_payload = 0;
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 892e0e51916c..d97cf6d6a4f9 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -27,8 +27,6 @@
#define UVC_CONTROL_RESTORE (1 << 6)
/* Control can be updated by the camera. */
#define UVC_CONTROL_AUTO_UPDATE (1 << 7)
-/* Control is an extension unit control. */
-#define UVC_CONTROL_EXTENSION (1 << 8)
#define UVC_CONTROL_GET_RANGE (UVC_CONTROL_GET_CUR | UVC_CONTROL_GET_MIN | \
UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES | \
@@ -159,7 +157,8 @@ struct uvc_xu_control {
* Driver specific constants.
*/
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(0, 1, 0)
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(1, 0, 0)
+#define DRIVER_VERSION "v1.0.0"
/* Number of isochronous URBs. */
#define UVC_URBS 5
@@ -173,6 +172,9 @@ struct uvc_xu_control {
#define UVC_CTRL_CONTROL_TIMEOUT 300
#define UVC_CTRL_STREAMING_TIMEOUT 5000
+/* Maximum allowed number of control mappings per device */
+#define UVC_MAX_CONTROL_MAPPINGS 1024
+
/* Devices quirks */
#define UVC_QUIRK_STATUS_INTERVAL 0x00000001
#define UVC_QUIRK_PROBE_MINMAX 0x00000002
@@ -198,11 +200,10 @@ struct uvc_device;
* structures to maximize cache efficiency.
*/
struct uvc_control_info {
- struct list_head list;
struct list_head mappings;
__u8 entity[16];
- __u8 index;
+ __u8 index; /* Bit index in bmControls */
__u8 selector;
__u16 size;
@@ -235,17 +236,17 @@ struct uvc_control_mapping {
struct uvc_control {
struct uvc_entity *entity;
- struct uvc_control_info *info;
+ struct uvc_control_info info;
__u8 index; /* Used to match the uvc_control entry with a
uvc_control_info. */
- __u8 dirty : 1,
- loaded : 1,
- modified : 1,
- cached : 1;
+ __u8 dirty:1,
+ loaded:1,
+ modified:1,
+ cached:1,
+ initialized:1;
__u8 *uvc_data;
- __u8 *uvc_info;
};
struct uvc_format_desc {
@@ -392,7 +393,6 @@ struct uvc_video_queue {
void *mem;
unsigned int flags;
- __u32 sequence;
unsigned int count;
unsigned int buf_size;
@@ -413,7 +413,7 @@ struct uvc_video_chain {
struct uvc_entity *processing; /* Processing unit */
struct uvc_entity *selector; /* Selector unit */
- struct mutex ctrl_mutex;
+ struct mutex ctrl_mutex; /* Protects ctrl.info */
};
struct uvc_streaming {
@@ -458,6 +458,7 @@ struct uvc_streaming {
dma_addr_t urb_dma[UVC_URBS];
unsigned int urb_size;
+ __u32 sequence;
__u8 last_fid;
};
@@ -474,8 +475,8 @@ struct uvc_device {
char name[32];
enum uvc_device_state state;
- struct list_head list;
atomic_t users;
+ atomic_t nmappings;
/* Video control interface */
__u16 uvc_version;
@@ -509,11 +510,6 @@ struct uvc_fh {
struct uvc_driver {
struct usb_driver driver;
-
- struct list_head devices; /* struct uvc_device list */
- struct list_head controls; /* struct uvc_control_info list */
- struct mutex ctrl_mutex; /* protects controls and devices
- lists */
};
/* ------------------------------------------------------------------------
@@ -615,13 +611,11 @@ extern struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
extern int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct v4l2_queryctrl *v4l2_ctrl);
-extern int uvc_ctrl_add_info(struct uvc_control_info *info);
-extern int uvc_ctrl_add_mapping(struct uvc_control_mapping *mapping);
+extern int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ const struct uvc_control_mapping *mapping);
extern int uvc_ctrl_init_device(struct uvc_device *dev);
extern void uvc_ctrl_cleanup_device(struct uvc_device *dev);
extern int uvc_ctrl_resume_device(struct uvc_device *dev);
-extern void uvc_ctrl_init(void);
-extern void uvc_ctrl_cleanup(void);
extern int uvc_ctrl_begin(struct uvc_video_chain *chain);
extern int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback);
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 0c2105ca611e..d4ac751036a2 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -645,9 +645,16 @@ static noinline long v4l1_compat_get_picture(
goto done;
}
- pict->depth = ((fmt->fmt.pix.bytesperline << 3)
- + (fmt->fmt.pix.width - 1))
- / fmt->fmt.pix.width;
+ if (fmt->fmt.pix.width)
+ {
+ pict->depth = ((fmt->fmt.pix.bytesperline << 3)
+ + (fmt->fmt.pix.width - 1))
+ / fmt->fmt.pix.width;
+ } else {
+ err = -EINVAL;
+ goto done;
+ }
+
pict->palette = pixelformat_to_palette(
fmt->fmt.pix.pixelformat);
done:
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 8ee1179be926..9294282b5add 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -378,6 +378,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
if (module_name)
request_module(module_name);
+ else
+ request_module(I2C_MODULE_PREFIX "%s", info->type);
/* Create the i2c client */
if (info->addr == 0 && probe_addrs)
@@ -676,3 +678,28 @@ int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info)
return 0;
}
EXPORT_SYMBOL_GPL(v4l_fill_dv_preset_info);
+
+const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
+ const struct v4l2_discrete_probe *probe,
+ s32 width, s32 height)
+{
+ int i;
+ u32 error, min_error = UINT_MAX;
+ const struct v4l2_frmsize_discrete *size, *best = NULL;
+
+ if (!probe)
+ return best;
+
+ for (i = 0, size = probe->sizes; i < probe->num_sizes; i++, size++) {
+ error = abs(size->width - width) + abs(size->height - height);
+ if (error < min_error) {
+ min_error = error;
+ best = size;
+ }
+ if (!error)
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index ea8d32cd425d..9d2502cd03ff 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -305,6 +305,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_ROTATE: return "Rotate";
case V4L2_CID_BG_COLOR: return "Background Color";
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
+ case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
+ case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
/* MPEG controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -419,6 +421,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_AUDIO_LIMITER_ENABLED:
case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
case V4L2_CID_PILOT_TONE_ENABLED:
+ case V4L2_CID_ILLUMINATORS_1:
+ case V4L2_CID_ILLUMINATORS_2:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index cb77197d480e..0ca7978654b5 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -81,7 +81,7 @@ static inline unsigned long *devnode_bits(int vfl_type)
/* Any types not assigned to fixed minor ranges must be mapped to
one single bitmap for the purposes of finding a free node number
since all those unassigned types use the same minor range. */
- int idx = (vfl_type > VFL_TYPE_VTX) ? VFL_TYPE_MAX - 1 : vfl_type;
+ int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type;
return devnode_nums[idx];
}
@@ -187,48 +187,69 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
size_t sz, loff_t *off)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = -EIO;
if (!vdev->fops->read)
return -EINVAL;
- if (!video_is_registered(vdev))
- return -EIO;
- return vdev->fops->read(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->read(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
static ssize_t v4l2_write(struct file *filp, const char __user *buf,
size_t sz, loff_t *off)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = -EIO;
if (!vdev->fops->write)
return -EINVAL;
- if (!video_is_registered(vdev))
- return -EIO;
- return vdev->fops->write(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->write(filp, buf, sz, off);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = DEFAULT_POLLMASK;
- if (!vdev->fops->poll || !video_is_registered(vdev))
- return DEFAULT_POLLMASK;
- return vdev->fops->poll(filp, poll);
+ if (!vdev->fops->poll)
+ return ret;
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->poll(filp, poll);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct video_device *vdev = video_devdata(filp);
- int ret;
+ int ret = -ENODEV;
- /* Allow ioctl to continue even if the device was unregistered.
- Things like dequeueing buffers might still be useful. */
if (vdev->fops->unlocked_ioctl) {
- ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
} else if (vdev->fops->ioctl) {
/* TODO: convert all drivers to unlocked_ioctl */
lock_kernel();
- ret = vdev->fops->ioctl(filp, cmd, arg);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->ioctl(filp, cmd, arg);
unlock_kernel();
} else
ret = -ENOTTY;
@@ -236,30 +257,20 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
-#ifdef CONFIG_MMU
-#define v4l2_get_unmapped_area NULL
-#else
-static unsigned long v4l2_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct video_device *vdev = video_devdata(filp);
-
- if (!vdev->fops->get_unmapped_area)
- return -ENOSYS;
- if (!video_is_registered(vdev))
- return -ENODEV;
- return vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
-}
-#endif
-
static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
{
struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
- if (!vdev->fops->mmap || !video_is_registered(vdev))
- return -ENODEV;
- return vdev->fops->mmap(filp, vm);
+ if (!vdev->fops->mmap)
+ return ret;
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->mmap(filp, vm);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ return ret;
}
/* Override for the open function */
@@ -271,17 +282,24 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* Check if the video device is available */
mutex_lock(&videodev_lock);
vdev = video_devdata(filp);
- /* return ENODEV if the video device has been removed
- already or if it is not registered anymore. */
- if (vdev == NULL || !video_is_registered(vdev)) {
+ /* return ENODEV if the video device has already been removed. */
+ if (vdev == NULL) {
mutex_unlock(&videodev_lock);
return -ENODEV;
}
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
- if (vdev->fops->open)
- ret = vdev->fops->open(filp);
+ if (vdev->fops->open) {
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
+ if (video_is_registered(vdev))
+ ret = vdev->fops->open(filp);
+ else
+ ret = -ENODEV;
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ }
/* decrease the refcount in case of an error */
if (ret)
@@ -295,8 +313,13 @@ static int v4l2_release(struct inode *inode, struct file *filp)
struct video_device *vdev = video_devdata(filp);
int ret = 0;
- if (vdev->fops->release)
+ if (vdev->fops->release) {
+ if (vdev->lock)
+ mutex_lock(vdev->lock);
vdev->fops->release(filp);
+ if (vdev->lock)
+ mutex_unlock(vdev->lock);
+ }
/* decrease the refcount unconditionally since the release()
return value is ignored. */
@@ -309,7 +332,6 @@ static const struct file_operations v4l2_fops = {
.read = v4l2_read,
.write = v4l2_write,
.open = v4l2_open,
- .get_unmapped_area = v4l2_get_unmapped_area,
.mmap = v4l2_mmap,
.unlocked_ioctl = v4l2_ioctl,
#ifdef CONFIG_COMPAT
@@ -377,8 +399,6 @@ static int get_index(struct video_device *vdev)
*
* %VFL_TYPE_GRABBER - A frame grabber
*
- * %VFL_TYPE_VTX - A teletext device
- *
* %VFL_TYPE_VBI - Vertical blank data (undecoded)
*
* %VFL_TYPE_RADIO - A radio card
@@ -411,9 +431,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
case VFL_TYPE_GRABBER:
name_base = "video";
break;
- case VFL_TYPE_VTX:
- name_base = "vtx";
- break;
case VFL_TYPE_VBI:
name_base = "vbi";
break;
@@ -451,10 +468,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
minor_offset = 64;
minor_cnt = 64;
break;
- case VFL_TYPE_VTX:
- minor_offset = 192;
- minor_cnt = 32;
- break;
case VFL_TYPE_VBI:
minor_offset = 224;
minor_cnt = 32;
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index de74ce07b5e2..69fd343d4774 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -134,15 +134,22 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
if (nonblocking)
return __v4l2_event_dequeue(fh, event);
+ /* Release the vdev lock while waiting */
+ if (fh->vdev->lock)
+ mutex_unlock(fh->vdev->lock);
+
do {
ret = wait_event_interruptible(events->wait,
events->navailable != 0);
if (ret < 0)
- return ret;
+ break;
ret = __v4l2_event_dequeue(fh, event);
} while (ret == -ENOENT);
+ if (fh->vdev->lock)
+ mutex_lock(fh->vdev->lock);
+
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
index f45f9405ea39..ac832a28e18e 100644
--- a/drivers/media/video/v4l2-mem2mem.c
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -421,8 +421,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
- mutex_lock(&src_q->vb_lock);
- mutex_lock(&dst_q->vb_lock);
+ videobuf_queue_lock(src_q);
+ videobuf_queue_lock(dst_q);
if (src_q->streaming && !list_empty(&src_q->stream))
src_vb = list_first_entry(&src_q->stream,
@@ -450,8 +450,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
end:
- mutex_unlock(&dst_q->vb_lock);
- mutex_unlock(&src_q->vb_lock);
+ videobuf_queue_unlock(dst_q);
+ videobuf_queue_unlock(src_q);
return rc;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
new file mode 100644
index 000000000000..02a21bccae18
--- /dev/null
+++ b/drivers/media/video/via-camera.c
@@ -0,0 +1,1474 @@
+/*
+ * Driver for the VIA Chrome integrated camera controller.
+ *
+ * Copyright 2009,2010 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under the terms of the GNU General Public License, version 2
+ *
+ * This work was supported by the One Laptop Per Child project
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/videobuf-dma-sg.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_qos_params.h>
+#include <linux/via-core.h>
+#include <linux/via-gpio.h>
+#include <linux/via_i2c.h>
+
+#include "via-camera.h"
+
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("VIA framebuffer-based camera controller driver");
+MODULE_LICENSE("GPL");
+
+static int flip_image;
+module_param(flip_image, bool, 0444);
+MODULE_PARM_DESC(flip_image,
+ "If set, the sensor will be instructed to flip the image "
+ "vertically.");
+
+#ifdef CONFIG_OLPC_XO_1_5
+static int override_serial;
+module_param(override_serial, bool, 0444);
+MODULE_PARM_DESC(override_serial,
+ "The camera driver will normally refuse to load if "
+ "the XO 1.5 serial port is enabled. Set this option "
+ "to force the issue.");
+#endif
+
+/*
+ * Basic window sizes.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+#define QCIF_WIDTH 176
+#define QCIF_HEIGHT 144
+
+/*
+ * The structure describing our camera.
+ */
+enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 };
+
+struct via_camera {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_subdev *sensor;
+ struct platform_device *platdev;
+ struct viafb_dev *viadev;
+ struct mutex lock;
+ enum viacam_opstate opstate;
+ unsigned long flags;
+ struct pm_qos_request_list qos_request;
+ /*
+ * GPIO info for power/reset management
+ */
+ int power_gpio;
+ int reset_gpio;
+ /*
+ * I/O memory stuff.
+ */
+ void __iomem *mmio; /* Where the registers live */
+ void __iomem *fbmem; /* Frame buffer memory */
+ u32 fb_offset; /* Reserved memory offset (FB) */
+ /*
+ * Capture buffers and related. The controller supports
+ * up to three, so that's what we have here. These buffers
+ * live in frame buffer memory, so we don't call them "DMA".
+ */
+ unsigned int cb_offsets[3]; /* offsets into fb mem */
+ u8 *cb_addrs[3]; /* Kernel-space addresses */
+ int n_cap_bufs; /* How many are we using? */
+ int next_buf;
+ struct videobuf_queue vb_queue;
+ struct list_head buffer_queue; /* prot. by reg_lock */
+ /*
+ * User tracking.
+ */
+ int users;
+ struct file *owner;
+ /*
+ * Video format information. sensor_format is kept in a form
+ * that we can use to pass to the sensor. We always run the
+ * sensor in VGA resolution, though, and let the controller
+ * downscale things if need be. So we keep the "real*
+ * dimensions separately.
+ */
+ struct v4l2_pix_format sensor_format;
+ struct v4l2_pix_format user_format;
+ enum v4l2_mbus_pixelcode mbus_code;
+};
+
+/*
+ * Yes, this is a hack, but there's only going to be one of these
+ * on any system we know of.
+ */
+static struct via_camera *via_cam_info;
+
+/*
+ * Flag values, manipulated with bitops
+ */
+#define CF_DMA_ACTIVE 0 /* A frame is incoming */
+#define CF_CONFIG_NEEDED 1 /* Must configure hardware */
+
+
+/*
+ * Nasty ugly v4l2 boilerplate.
+ */
+#define sensor_call(cam, optype, func, args...) \
+ v4l2_subdev_call(cam->sensor, optype, func, ##args)
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn(&(cam)->platdev->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+ dev_dbg(&(cam)->platdev->dev, fmt, ##arg);
+
+/*
+ * Format handling. This is ripped almost directly from Hans's changes
+ * to cafe_ccic.c. It's a little unfortunate; until this change, we
+ * didn't need to know anything about the format except its byte depth;
+ * now this information must be managed at this level too.
+ */
+static struct via_format {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ enum v4l2_mbus_pixelcode mbus_code;
+} via_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ },
+ /* RGB444 and Bayer should be doable, but have never been
+ tested with this driver. */
+};
+#define N_VIA_FMTS ARRAY_SIZE(via_formats)
+
+static struct via_format *via_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_VIA_FMTS; i++)
+ if (via_formats[i].pixelformat == pixelformat)
+ return via_formats + i;
+ /* Not found? Then return the first format. */
+ return via_formats;
+}
+
+
+/*--------------------------------------------------------------------------*/
+/*
+ * Sensor power/reset management. This piece is OLPC-specific for
+ * sure; other configurations will have things connected differently.
+ */
+static int via_sensor_power_setup(struct via_camera *cam)
+{
+ int ret;
+
+ cam->power_gpio = viafb_gpio_lookup("VGPIO3");
+ cam->reset_gpio = viafb_gpio_lookup("VGPIO2");
+ if (cam->power_gpio < 0 || cam->reset_gpio < 0) {
+ dev_err(&cam->platdev->dev, "Unable to find GPIO lines\n");
+ return -EINVAL;
+ }
+ ret = gpio_request(cam->power_gpio, "viafb-camera");
+ if (ret) {
+ dev_err(&cam->platdev->dev, "Unable to request power GPIO\n");
+ return ret;
+ }
+ ret = gpio_request(cam->reset_gpio, "viafb-camera");
+ if (ret) {
+ dev_err(&cam->platdev->dev, "Unable to request reset GPIO\n");
+ gpio_free(cam->power_gpio);
+ return ret;
+ }
+ gpio_direction_output(cam->power_gpio, 0);
+ gpio_direction_output(cam->reset_gpio, 0);
+ return 0;
+}
+
+/*
+ * Power up the sensor and perform the reset dance.
+ */
+static void via_sensor_power_up(struct via_camera *cam)
+{
+ gpio_set_value(cam->power_gpio, 1);
+ gpio_set_value(cam->reset_gpio, 0);
+ msleep(20); /* Probably excessive */
+ gpio_set_value(cam->reset_gpio, 1);
+ msleep(20);
+}
+
+static void via_sensor_power_down(struct via_camera *cam)
+{
+ gpio_set_value(cam->power_gpio, 0);
+ gpio_set_value(cam->reset_gpio, 0);
+}
+
+
+static void via_sensor_power_release(struct via_camera *cam)
+{
+ via_sensor_power_down(cam);
+ gpio_free(cam->power_gpio);
+ gpio_free(cam->reset_gpio);
+}
+
+/* --------------------------------------------------------------------------*/
+/* Sensor ops */
+
+/*
+ * Manage the ov7670 "flip" bit, which needs special help.
+ */
+static int viacam_set_flip(struct via_camera *cam)
+{
+ struct v4l2_control ctrl;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_VFLIP;
+ ctrl.value = flip_image;
+ return sensor_call(cam, core, s_ctrl, &ctrl);
+}
+
+/*
+ * Configure the sensor. It's up to the caller to ensure
+ * that the camera is in the correct operating state.
+ */
+static int viacam_configure_sensor(struct via_camera *cam)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ v4l2_fill_mbus_format(&mbus_fmt, &cam->sensor_format, cam->mbus_code);
+ ret = sensor_call(cam, core, init, 0);
+ if (ret == 0)
+ ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
+ /*
+ * OV7670 does weird things if flip is set *before* format...
+ */
+ if (ret == 0)
+ ret = viacam_set_flip(cam);
+ return ret;
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/*
+ * Some simple register accessors; they assume that the lock is held.
+ *
+ * Should we want to support the second capture engine, we could
+ * hide the register difference by adding 0x1000 to registers in the
+ * 0x300-350 range.
+ */
+static inline void viacam_write_reg(struct via_camera *cam,
+ int reg, int value)
+{
+ iowrite32(value, cam->mmio + reg);
+}
+
+static inline int viacam_read_reg(struct via_camera *cam, int reg)
+{
+ return ioread32(cam->mmio + reg);
+}
+
+static inline void viacam_write_reg_mask(struct via_camera *cam,
+ int reg, int value, int mask)
+{
+ int tmp = viacam_read_reg(cam, reg);
+
+ tmp = (tmp & ~mask) | (value & mask);
+ viacam_write_reg(cam, reg, tmp);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Interrupt management and handling */
+
+static irqreturn_t viacam_quick_irq(int irq, void *data)
+{
+ struct via_camera *cam = data;
+ irqreturn_t ret = IRQ_NONE;
+ int icv;
+
+ /*
+ * All we do here is to clear the interrupts and tell
+ * the handler thread to wake up.
+ */
+ spin_lock(&cam->viadev->reg_lock);
+ icv = viacam_read_reg(cam, VCR_INTCTRL);
+ if (icv & VCR_IC_EAV) {
+ icv |= VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL;
+ viacam_write_reg(cam, VCR_INTCTRL, icv);
+ ret = IRQ_WAKE_THREAD;
+ }
+ spin_unlock(&cam->viadev->reg_lock);
+ return ret;
+}
+
+/*
+ * Find the next videobuf buffer which has somebody waiting on it.
+ */
+static struct videobuf_buffer *viacam_next_buffer(struct via_camera *cam)
+{
+ unsigned long flags;
+ struct videobuf_buffer *buf = NULL;
+
+ spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+ if (cam->opstate != S_RUNNING)
+ goto out;
+ if (list_empty(&cam->buffer_queue))
+ goto out;
+ buf = list_entry(cam->buffer_queue.next, struct videobuf_buffer, queue);
+ if (!waitqueue_active(&buf->done)) {/* Nobody waiting */
+ buf = NULL;
+ goto out;
+ }
+ list_del(&buf->queue);
+ buf->state = VIDEOBUF_ACTIVE;
+out:
+ spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+ return buf;
+}
+
+/*
+ * The threaded IRQ handler.
+ */
+static irqreturn_t viacam_irq(int irq, void *data)
+{
+ int bufn;
+ struct videobuf_buffer *vb;
+ struct via_camera *cam = data;
+ struct videobuf_dmabuf *vdma;
+
+ /*
+ * If there is no place to put the data frame, don't bother
+ * with anything else.
+ */
+ vb = viacam_next_buffer(cam);
+ if (vb == NULL)
+ goto done;
+ /*
+ * Figure out which buffer we just completed.
+ */
+ bufn = (viacam_read_reg(cam, VCR_INTCTRL) & VCR_IC_ACTBUF) >> 3;
+ bufn -= 1;
+ if (bufn < 0)
+ bufn = cam->n_cap_bufs - 1;
+ /*
+ * Copy over the data and let any waiters know.
+ */
+ vdma = videobuf_to_dma(vb);
+ viafb_dma_copy_out_sg(cam->cb_offsets[bufn], vdma->sglist, vdma->sglen);
+ vb->state = VIDEOBUF_DONE;
+ vb->size = cam->user_format.sizeimage;
+ wake_up(&vb->done);
+done:
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * These functions must mess around with the general interrupt
+ * control register, which is relevant to much more than just the
+ * camera. Nothing else uses interrupts, though, as of this writing.
+ * Should that situation change, we'll have to improve support at
+ * the via-core level.
+ */
+static void viacam_int_enable(struct via_camera *cam)
+{
+ viacam_write_reg(cam, VCR_INTCTRL,
+ VCR_IC_INTEN|VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL);
+ viafb_irq_enable(VDE_I_C0AVEN);
+}
+
+static void viacam_int_disable(struct via_camera *cam)
+{
+ viafb_irq_disable(VDE_I_C0AVEN);
+ viacam_write_reg(cam, VCR_INTCTRL, 0);
+}
+
+
+
+/* --------------------------------------------------------------------------*/
+/* Controller operations */
+
+/*
+ * Set up our capture buffers in framebuffer memory.
+ */
+static int viacam_ctlr_cbufs(struct via_camera *cam)
+{
+ int nbuf = cam->viadev->camera_fbmem_size/cam->sensor_format.sizeimage;
+ int i;
+ unsigned int offset;
+
+ /*
+ * See how many buffers we can work with.
+ */
+ if (nbuf >= 3) {
+ cam->n_cap_bufs = 3;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_3BUFS,
+ VCR_CI_3BUFS);
+ } else if (nbuf == 2) {
+ cam->n_cap_bufs = 2;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_3BUFS);
+ } else {
+ cam_warn(cam, "Insufficient frame buffer memory\n");
+ return -ENOMEM;
+ }
+ /*
+ * Set them up.
+ */
+ offset = cam->fb_offset;
+ for (i = 0; i < cam->n_cap_bufs; i++) {
+ cam->cb_offsets[i] = offset;
+ cam->cb_addrs[i] = cam->fbmem + offset;
+ viacam_write_reg(cam, VCR_VBUF1 + i*4, offset & VCR_VBUF_MASK);
+ offset += cam->sensor_format.sizeimage;
+ }
+ return 0;
+}
+
+/*
+ * Set the scaling register for downscaling the image.
+ *
+ * This register works like this... Vertical scaling is enabled
+ * by bit 26; if that bit is set, downscaling is controlled by the
+ * value in bits 16:25. Those bits are divided by 1024 to get
+ * the scaling factor; setting just bit 25 thus cuts the height
+ * in half.
+ *
+ * Horizontal scaling works about the same, but it's enabled by
+ * bit 11, with bits 0:10 giving the numerator of a fraction
+ * (over 2048) for the scaling value.
+ *
+ * This function is naive in that, if the user departs from
+ * the 3x4 VGA scaling factor, the image will distort. We
+ * could work around that if it really seemed important.
+ */
+static void viacam_set_scale(struct via_camera *cam)
+{
+ unsigned int avscale;
+ int sf;
+
+ if (cam->user_format.width == VGA_WIDTH)
+ avscale = 0;
+ else {
+ sf = (cam->user_format.width*2048)/VGA_WIDTH;
+ avscale = VCR_AVS_HEN | sf;
+ }
+ if (cam->user_format.height < VGA_HEIGHT) {
+ sf = (1024*cam->user_format.height)/VGA_HEIGHT;
+ avscale |= VCR_AVS_VEN | (sf << 16);
+ }
+ viacam_write_reg(cam, VCR_AVSCALE, avscale);
+}
+
+
+/*
+ * Configure image-related information into the capture engine.
+ */
+static void viacam_ctlr_image(struct via_camera *cam)
+{
+ int cicreg;
+
+ /*
+ * Disable clock before messing with stuff - from the via
+ * sample driver.
+ */
+ viacam_write_reg(cam, VCR_CAPINTC, ~(VCR_CI_ENABLE|VCR_CI_CLKEN));
+ /*
+ * Set up the controller for VGA resolution, modulo magic
+ * offsets from the via sample driver.
+ */
+ viacam_write_reg(cam, VCR_HORRANGE, 0x06200120);
+ viacam_write_reg(cam, VCR_VERTRANGE, 0x01de0000);
+ viacam_set_scale(cam);
+ /*
+ * Image size info.
+ */
+ viacam_write_reg(cam, VCR_MAXDATA,
+ (cam->sensor_format.height << 16) |
+ (cam->sensor_format.bytesperline >> 3));
+ viacam_write_reg(cam, VCR_MAXVBI, 0);
+ viacam_write_reg(cam, VCR_VSTRIDE,
+ cam->user_format.bytesperline & VCR_VS_STRIDE);
+ /*
+ * Set up the capture interface control register,
+ * everything but the "go" bit.
+ *
+ * The FIFO threshold is a bit of a magic number; 8 is what
+ * VIA's sample code uses.
+ */
+ cicreg = VCR_CI_CLKEN |
+ 0x08000000 | /* FIFO threshold */
+ VCR_CI_FLDINV | /* OLPC-specific? */
+ VCR_CI_VREFINV | /* OLPC-specific? */
+ VCR_CI_DIBOTH | /* Capture both fields */
+ VCR_CI_CCIR601_8;
+ if (cam->n_cap_bufs == 3)
+ cicreg |= VCR_CI_3BUFS;
+ /*
+ * YUV formats need different byte swapping than RGB.
+ */
+ if (cam->user_format.pixelformat == V4L2_PIX_FMT_YUYV)
+ cicreg |= VCR_CI_YUYV;
+ else
+ cicreg |= VCR_CI_UYVY;
+ viacam_write_reg(cam, VCR_CAPINTC, cicreg);
+}
+
+
+static int viacam_config_controller(struct via_camera *cam)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->viadev->reg_lock, flags);
+ ret = viacam_ctlr_cbufs(cam);
+ if (!ret)
+ viacam_ctlr_image(cam);
+ spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
+ clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+ return ret;
+}
+
+/*
+ * Make it start grabbing data.
+ */
+static void viacam_start_engine(struct via_camera *cam)
+{
+ spin_lock_irq(&cam->viadev->reg_lock);
+ cam->next_buf = 0;
+ viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_ENABLE, VCR_CI_ENABLE);
+ viacam_int_enable(cam);
+ (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+ cam->opstate = S_RUNNING;
+ spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+static void viacam_stop_engine(struct via_camera *cam)
+{
+ spin_lock_irq(&cam->viadev->reg_lock);
+ viacam_int_disable(cam);
+ viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_ENABLE);
+ (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
+ cam->opstate = S_IDLE;
+ spin_unlock_irq(&cam->viadev->reg_lock);
+}
+
+
+/* --------------------------------------------------------------------------*/
+/* Videobuf callback ops */
+
+/*
+ * buffer_setup. The purpose of this one would appear to be to tell
+ * videobuf how big a single image is. It's also evidently up to us
+ * to put some sort of limit on the maximum number of buffers allowed.
+ */
+static int viacam_vb_buf_setup(struct videobuf_queue *q,
+ unsigned int *count, unsigned int *size)
+{
+ struct via_camera *cam = q->priv_data;
+
+ *size = cam->user_format.sizeimage;
+ if (*count == 0 || *count > 6) /* Arbitrary number */
+ *count = 6;
+ return 0;
+}
+
+/*
+ * Prepare a buffer.
+ */
+static int viacam_vb_buf_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb, enum v4l2_field field)
+{
+ struct via_camera *cam = q->priv_data;
+
+ vb->size = cam->user_format.sizeimage;
+ vb->width = cam->user_format.width; /* bytesperline???? */
+ vb->height = cam->user_format.height;
+ vb->field = field;
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ int ret = videobuf_iolock(q, vb, NULL);
+ if (ret)
+ return ret;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ return 0;
+}
+
+/*
+ * We've got a buffer to put data into.
+ *
+ * FIXME: check for a running engine and valid buffers?
+ */
+static void viacam_vb_buf_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct via_camera *cam = q->priv_data;
+
+ /*
+ * Note that videobuf holds the lock when it calls
+ * us, so we need not (indeed, cannot) take it here.
+ */
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &cam->buffer_queue);
+}
+
+/*
+ * Free a buffer.
+ */
+static void viacam_vb_buf_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct via_camera *cam = q->priv_data;
+
+ videobuf_dma_unmap(&cam->platdev->dev, videobuf_to_dma(vb));
+ videobuf_dma_free(videobuf_to_dma(vb));
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static const struct videobuf_queue_ops viacam_vb_ops = {
+ .buf_setup = viacam_vb_buf_setup,
+ .buf_prepare = viacam_vb_buf_prepare,
+ .buf_queue = viacam_vb_buf_queue,
+ .buf_release = viacam_vb_buf_release,
+};
+
+/* --------------------------------------------------------------------------*/
+/* File operations */
+
+static int viacam_open(struct file *filp)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ filp->private_data = cam;
+ /*
+ * Note the new user. If this is the first one, we'll also
+ * need to power up the sensor.
+ */
+ mutex_lock(&cam->lock);
+ if (cam->users == 0) {
+ int ret = viafb_request_dma();
+
+ if (ret) {
+ mutex_unlock(&cam->lock);
+ return ret;
+ }
+ via_sensor_power_up(cam);
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ /*
+ * Hook into videobuf. Evidently this cannot fail.
+ */
+ videobuf_queue_sg_init(&cam->vb_queue, &viacam_vb_ops,
+ &cam->platdev->dev, &cam->viadev->reg_lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), cam, NULL);
+ }
+ (cam->users)++;
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+static int viacam_release(struct file *filp)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ mutex_lock(&cam->lock);
+ (cam->users)--;
+ /*
+ * If the "owner" is closing, shut down any ongoing
+ * operations.
+ */
+ if (filp == cam->owner) {
+ videobuf_stop(&cam->vb_queue);
+ /*
+ * We don't hold the spinlock here, but, if release()
+ * is being called by the owner, nobody else will
+ * be changing the state. And an extra stop would
+ * not hurt anyway.
+ */
+ if (cam->opstate != S_IDLE)
+ viacam_stop_engine(cam);
+ cam->owner = NULL;
+ }
+ /*
+ * Last one out needs to turn out the lights.
+ */
+ if (cam->users == 0) {
+ videobuf_mmap_free(&cam->vb_queue);
+ via_sensor_power_down(cam);
+ viafb_release_dma();
+ }
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+/*
+ * Read a frame from the device.
+ */
+static ssize_t viacam_read(struct file *filp, char __user *buffer,
+ size_t len, loff_t *pos)
+{
+ struct via_camera *cam = video_drvdata(filp);
+ int ret;
+
+ mutex_lock(&cam->lock);
+ /*
+ * Enforce the V4l2 "only one owner gets to read data" rule.
+ */
+ if (cam->owner && cam->owner != filp) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ cam->owner = filp;
+ /*
+ * Do we need to configure the hardware?
+ */
+ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+ ret = viacam_configure_sensor(cam);
+ if (!ret)
+ ret = viacam_config_controller(cam);
+ if (ret)
+ goto out_unlock;
+ }
+ /*
+ * Fire up the capture engine, then have videobuf do
+ * the heavy lifting. Someday it would be good to avoid
+ * stopping and restarting the engine each time.
+ */
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ viacam_start_engine(cam);
+ ret = videobuf_read_stream(&cam->vb_queue, buffer, len, pos, 0,
+ filp->f_flags & O_NONBLOCK);
+ viacam_stop_engine(cam);
+ /* videobuf_stop() ?? */
+
+out_unlock:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static unsigned int viacam_poll(struct file *filp, struct poll_table_struct *pt)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ return videobuf_poll_stream(filp, &cam->vb_queue, pt);
+}
+
+
+static int viacam_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct via_camera *cam = video_drvdata(filp);
+
+ return videobuf_mmap_mapper(&cam->vb_queue, vma);
+}
+
+
+
+static const struct v4l2_file_operations viacam_fops = {
+ .owner = THIS_MODULE,
+ .open = viacam_open,
+ .release = viacam_release,
+ .read = viacam_read,
+ .poll = viacam_poll,
+ .mmap = viacam_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * The long list of v4l2 ioctl ops
+ */
+
+static int viacam_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *ident)
+{
+ struct via_camera *cam = priv;
+
+ ident->ident = V4L2_IDENT_NONE;
+ ident->revision = 0;
+ if (v4l2_chip_match_host(&ident->match)) {
+ ident->ident = V4L2_IDENT_VIA_VX855;
+ return 0;
+ }
+ return sensor_call(cam, core, g_chip_ident, ident);
+}
+
+/*
+ * Control ops are passed through to the sensor.
+ */
+static int viacam_queryctrl(struct file *filp, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, core, queryctrl, qc);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static int viacam_g_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, core, g_ctrl, ctrl);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static int viacam_s_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, core, s_ctrl, ctrl);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+/*
+ * Only one input.
+ */
+static int viacam_enum_input(struct file *filp, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = V4L2_STD_ALL; /* Not sure what should go here */
+ strcpy(input->name, "Camera");
+ return 0;
+}
+
+static int viacam_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int viacam_s_input(struct file *filp, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int viacam_s_std(struct file *filp, void *priv, v4l2_std_id *std)
+{
+ return 0;
+}
+
+/*
+ * Video format stuff. Here is our default format until
+ * user space messes with things.
+ */
+static const struct v4l2_pix_format viacam_def_pix_format = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = VGA_WIDTH * 2,
+ .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+};
+
+static const enum v4l2_mbus_pixelcode via_def_mbus_code = V4L2_MBUS_FMT_YUYV8_2X8;
+
+static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index >= N_VIA_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, via_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = via_formats[fmt->index].pixelformat;
+ return 0;
+}
+
+/*
+ * Figure out proper image dimensions, but always force the
+ * sensor to VGA.
+ */
+static void viacam_fmt_pre(struct v4l2_pix_format *userfmt,
+ struct v4l2_pix_format *sensorfmt)
+{
+ *sensorfmt = *userfmt;
+ if (userfmt->width < QCIF_WIDTH || userfmt->height < QCIF_HEIGHT) {
+ userfmt->width = QCIF_WIDTH;
+ userfmt->height = QCIF_HEIGHT;
+ }
+ if (userfmt->width > VGA_WIDTH || userfmt->height > VGA_HEIGHT) {
+ userfmt->width = VGA_WIDTH;
+ userfmt->height = VGA_HEIGHT;
+ }
+ sensorfmt->width = VGA_WIDTH;
+ sensorfmt->height = VGA_HEIGHT;
+}
+
+static void viacam_fmt_post(struct v4l2_pix_format *userfmt,
+ struct v4l2_pix_format *sensorfmt)
+{
+ struct via_format *f = via_find_format(userfmt->pixelformat);
+
+ sensorfmt->bytesperline = sensorfmt->width * f->bpp;
+ sensorfmt->sizeimage = sensorfmt->height * sensorfmt->bytesperline;
+ userfmt->pixelformat = sensorfmt->pixelformat;
+ userfmt->field = sensorfmt->field;
+ userfmt->bytesperline = 2 * userfmt->width;
+ userfmt->sizeimage = userfmt->bytesperline * userfmt->height;
+}
+
+
+/*
+ * The real work of figuring out a workable format.
+ */
+static int viacam_do_try_fmt(struct via_camera *cam,
+ struct v4l2_pix_format *upix, struct v4l2_pix_format *spix)
+{
+ int ret;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct via_format *f = via_find_format(upix->pixelformat);
+
+ upix->pixelformat = f->pixelformat;
+ viacam_fmt_pre(upix, spix);
+ v4l2_fill_mbus_format(&mbus_fmt, upix, f->mbus_code);
+ ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
+ v4l2_fill_pix_format(spix, &mbus_fmt);
+ viacam_fmt_post(upix, spix);
+ return ret;
+}
+
+
+
+static int viacam_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+ struct v4l2_format sfmt;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+static int viacam_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+
+ mutex_lock(&cam->lock);
+ fmt->fmt.pix = cam->user_format;
+ mutex_unlock(&cam->lock);
+ return 0;
+}
+
+static int viacam_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct via_camera *cam = priv;
+ int ret;
+ struct v4l2_format sfmt;
+ struct via_format *f = via_find_format(fmt->fmt.pix.pixelformat);
+
+ /*
+ * Camera must be idle or we can't mess with the
+ * video setup.
+ */
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_IDLE) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Let the sensor code look over and tweak the
+ * requested formatting.
+ */
+ ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
+ if (ret)
+ goto out;
+ /*
+ * OK, let's commit to the new format.
+ */
+ cam->user_format = fmt->fmt.pix;
+ cam->sensor_format = sfmt.fmt.pix;
+ cam->mbus_code = f->mbus_code;
+ ret = viacam_configure_sensor(cam);
+ if (!ret)
+ ret = viacam_config_controller(cam);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+static int viacam_querycap(struct file *filp, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "via-camera");
+ strcpy(cap->card, "via-camera");
+ cap->version = 1;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+/*
+ * Streaming operations - pure videobuf stuff.
+ */
+static int viacam_reqbufs(struct file *filp, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_reqbufs(&cam->vb_queue, rb);
+}
+
+static int viacam_querybuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_querybuf(&cam->vb_queue, buf);
+}
+
+static int viacam_qbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_qbuf(&cam->vb_queue, buf);
+}
+
+static int viacam_dqbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+}
+
+static int viacam_streamon(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+ struct via_camera *cam = priv;
+ int ret = 0;
+
+ if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_IDLE) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Enforce the V4l2 "only one owner gets to read data" rule.
+ */
+ if (cam->owner && cam->owner != filp) {
+ ret = -EBUSY;
+ goto out;
+ }
+ cam->owner = filp;
+ /*
+ * Configure things if need be.
+ */
+ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+ ret = viacam_configure_sensor(cam);
+ if (ret)
+ goto out;
+ ret = viacam_config_controller(cam);
+ if (ret)
+ goto out;
+ }
+ /*
+ * If the CPU goes into C3, the DMA transfer gets corrupted and
+ * users start filing unsightly bug reports. Put in a "latency"
+ * requirement which will keep the CPU out of the deeper sleep
+ * states.
+ */
+ pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+ /*
+ * Fire things up.
+ */
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ ret = videobuf_streamon(&cam->vb_queue);
+ if (!ret)
+ viacam_start_engine(cam);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+static int viacam_streamoff(struct file *filp, void *priv, enum v4l2_buf_type t)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ mutex_lock(&cam->lock);
+ if (cam->opstate != S_RUNNING) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pm_qos_remove_request(&cam->qos_request);
+ viacam_stop_engine(cam);
+ /*
+ * Videobuf will recycle all of the outstanding buffers, but
+ * we should be sure we don't retain any references to
+ * any of them.
+ */
+ ret = videobuf_streamoff(&cam->vb_queue);
+ INIT_LIST_HEAD(&cam->buffer_queue);
+out:
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+static int viacam_vidiocgmbuf(struct file *filp, void *priv,
+ struct video_mbuf *mbuf)
+{
+ struct via_camera *cam = priv;
+
+ return videobuf_cgmbuf(&cam->vb_queue, mbuf, 6);
+}
+#endif
+
+/* G/S_PARM */
+
+static int viacam_g_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, video, g_parm, parm);
+ mutex_unlock(&cam->lock);
+ parm->parm.capture.readbuffers = cam->n_cap_bufs;
+ return ret;
+}
+
+static int viacam_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, video, s_parm, parm);
+ mutex_unlock(&cam->lock);
+ parm->parm.capture.readbuffers = cam->n_cap_bufs;
+ return ret;
+}
+
+static int viacam_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ if (sizes->index != 0)
+ return -EINVAL;
+ sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ sizes->stepwise.min_width = QCIF_WIDTH;
+ sizes->stepwise.min_height = QCIF_HEIGHT;
+ sizes->stepwise.max_width = VGA_WIDTH;
+ sizes->stepwise.max_height = VGA_HEIGHT;
+ sizes->stepwise.step_width = sizes->stepwise.step_height = 1;
+ return 0;
+}
+
+static int viacam_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct via_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->lock);
+ ret = sensor_call(cam, video, enum_frameintervals, interval);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
+
+
+
+static const struct v4l2_ioctl_ops viacam_ioctl_ops = {
+ .vidioc_g_chip_ident = viacam_g_chip_ident,
+ .vidioc_queryctrl = viacam_queryctrl,
+ .vidioc_g_ctrl = viacam_g_ctrl,
+ .vidioc_s_ctrl = viacam_s_ctrl,
+ .vidioc_enum_input = viacam_enum_input,
+ .vidioc_g_input = viacam_g_input,
+ .vidioc_s_input = viacam_s_input,
+ .vidioc_s_std = viacam_s_std,
+ .vidioc_enum_fmt_vid_cap = viacam_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = viacam_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = viacam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = viacam_s_fmt_vid_cap,
+ .vidioc_querycap = viacam_querycap,
+ .vidioc_reqbufs = viacam_reqbufs,
+ .vidioc_querybuf = viacam_querybuf,
+ .vidioc_qbuf = viacam_qbuf,
+ .vidioc_dqbuf = viacam_dqbuf,
+ .vidioc_streamon = viacam_streamon,
+ .vidioc_streamoff = viacam_streamoff,
+ .vidioc_g_parm = viacam_g_parm,
+ .vidioc_s_parm = viacam_s_parm,
+ .vidioc_enum_framesizes = viacam_enum_framesizes,
+ .vidioc_enum_frameintervals = viacam_enum_frameintervals,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ .vidiocgmbuf = viacam_vidiocgmbuf,
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Power management.
+ */
+
+/*
+ * Setup stuff.
+ */
+
+static struct video_device viacam_v4l_template = {
+ .name = "via-camera",
+ .minor = -1,
+ .tvnorms = V4L2_STD_NTSC_M,
+ .current_norm = V4L2_STD_NTSC_M,
+ .fops = &viacam_fops,
+ .ioctl_ops = &viacam_ioctl_ops,
+ .release = video_device_release_empty, /* Check this */
+};
+
+
+static __devinit int viacam_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct i2c_adapter *sensor_adapter;
+ struct viafb_dev *viadev = pdev->dev.platform_data;
+
+ /*
+ * Note that there are actually two capture channels on
+ * the device. We only deal with one for now. That
+ * is encoded here; nothing else assumes it's dealing with
+ * a unique capture device.
+ */
+ struct via_camera *cam;
+
+ /*
+ * Ensure that frame buffer memory has been set aside for
+ * this purpose. As an arbitrary limit, refuse to work
+ * with less than two frames of VGA 16-bit data.
+ *
+ * If we ever support the second port, we'll need to set
+ * aside more memory.
+ */
+ if (viadev->camera_fbmem_size < (VGA_HEIGHT*VGA_WIDTH*4)) {
+ printk(KERN_ERR "viacam: insufficient FB memory reserved\n");
+ return -ENOMEM;
+ }
+ if (viadev->engine_mmio == NULL) {
+ printk(KERN_ERR "viacam: No I/O memory, so no pictures\n");
+ return -ENOMEM;
+ }
+ /*
+ * Basic structure initialization.
+ */
+ cam = kzalloc (sizeof(struct via_camera), GFP_KERNEL);
+ if (cam == NULL)
+ return -ENOMEM;
+ via_cam_info = cam;
+ cam->platdev = pdev;
+ cam->viadev = viadev;
+ cam->users = 0;
+ cam->owner = NULL;
+ cam->opstate = S_IDLE;
+ cam->user_format = cam->sensor_format = viacam_def_pix_format;
+ mutex_init(&cam->lock);
+ INIT_LIST_HEAD(&cam->buffer_queue);
+ cam->mmio = viadev->engine_mmio;
+ cam->fbmem = viadev->fbmem;
+ cam->fb_offset = viadev->camera_fbmem_offset;
+ cam->flags = 1 << CF_CONFIG_NEEDED;
+ cam->mbus_code = via_def_mbus_code;
+ /*
+ * Tell V4L that we exist.
+ */
+ ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register v4l2 device\n");
+ return ret;
+ }
+ /*
+ * Convince the system that we can do DMA.
+ */
+ pdev->dev.dma_mask = &viadev->pdev->dma_mask;
+ dma_set_mask(&pdev->dev, 0xffffffff);
+ /*
+ * Fire up the capture port. The write to 0x78 looks purely
+ * OLPCish; any system will need to tweak 0x1e.
+ */
+ via_write_reg_mask(VIASR, 0x78, 0, 0x80);
+ via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0);
+ /*
+ * Get the sensor powered up.
+ */
+ ret = via_sensor_power_setup(cam);
+ if (ret)
+ goto out_unregister;
+ via_sensor_power_up(cam);
+
+ /*
+ * See if we can't find it on the bus. The VIA_PORT_31 assumption
+ * is OLPC-specific. 0x42 assumption is ov7670-specific.
+ */
+ sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31);
+ cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter,
+ "ov7670", "ov7670", 0x42 >> 1, NULL);
+ if (cam->sensor == NULL) {
+ dev_err(&pdev->dev, "Unable to find the sensor!\n");
+ ret = -ENODEV;
+ goto out_power_down;
+ }
+ /*
+ * Get the IRQ.
+ */
+ viacam_int_disable(cam);
+ ret = request_threaded_irq(viadev->pdev->irq, viacam_quick_irq,
+ viacam_irq, IRQF_SHARED, "via-camera", cam);
+ if (ret)
+ goto out_power_down;
+ /*
+ * Tell V4l2 that we exist.
+ */
+ cam->vdev = viacam_v4l_template;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto out_irq;
+ video_set_drvdata(&cam->vdev, cam);
+
+ /* Power the sensor down until somebody opens the device */
+ via_sensor_power_down(cam);
+ return 0;
+
+out_irq:
+ free_irq(viadev->pdev->irq, cam);
+out_power_down:
+ via_sensor_power_release(cam);
+out_unregister:
+ v4l2_device_unregister(&cam->v4l2_dev);
+ return ret;
+}
+
+static __devexit int viacam_remove(struct platform_device *pdev)
+{
+ struct via_camera *cam = via_cam_info;
+ struct viafb_dev *viadev = pdev->dev.platform_data;
+
+ video_unregister_device(&cam->vdev);
+ v4l2_device_unregister(&cam->v4l2_dev);
+ free_irq(viadev->pdev->irq, cam);
+ via_sensor_power_release(cam);
+ via_cam_info = NULL;
+ return 0;
+}
+
+
+static struct platform_driver viacam_driver = {
+ .driver = {
+ .name = "viafb-camera",
+ },
+ .probe = viacam_probe,
+ .remove = viacam_remove,
+};
+
+
+#ifdef CONFIG_OLPC_XO_1_5
+/*
+ * The OLPC folks put the serial port on the same pin as
+ * the camera. They also get grumpy if we break the
+ * serial port and keep them from using it. So we have
+ * to check the serial enable bit and not step on it.
+ */
+#define VIACAM_SERIAL_DEVFN 0x88
+#define VIACAM_SERIAL_CREG 0x46
+#define VIACAM_SERIAL_BIT 0x40
+
+static __devinit int viacam_check_serial_port(void)
+{
+ struct pci_bus *pbus = pci_find_bus(0, 0);
+ u8 cbyte;
+
+ pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+ VIACAM_SERIAL_CREG, &cbyte);
+ if ((cbyte & VIACAM_SERIAL_BIT) == 0)
+ return 0; /* Not enabled */
+ if (override_serial == 0) {
+ printk(KERN_NOTICE "Via camera: serial port is enabled, " \
+ "refusing to load.\n");
+ printk(KERN_NOTICE "Specify override_serial=1 to force " \
+ "module loading.\n");
+ return -EBUSY;
+ }
+ printk(KERN_NOTICE "Via camera: overriding serial port\n");
+ pci_bus_write_config_byte(pbus, VIACAM_SERIAL_DEVFN,
+ VIACAM_SERIAL_CREG, cbyte & ~VIACAM_SERIAL_BIT);
+ return 0;
+}
+#endif
+
+
+
+
+static int viacam_init(void)
+{
+#ifdef CONFIG_OLPC_XO_1_5
+ if (viacam_check_serial_port())
+ return -EBUSY;
+#endif
+ return platform_driver_register(&viacam_driver);
+}
+module_init(viacam_init);
+
+static void viacam_exit(void)
+{
+ platform_driver_unregister(&viacam_driver);
+}
+module_exit(viacam_exit);
diff --git a/drivers/media/video/via-camera.h b/drivers/media/video/via-camera.h
new file mode 100644
index 000000000000..b12a4b3d616f
--- /dev/null
+++ b/drivers/media/video/via-camera.h
@@ -0,0 +1,93 @@
+/*
+ * VIA Camera register definitions.
+ */
+#define VCR_INTCTRL 0x300 /* Capture interrupt control */
+#define VCR_IC_EAV 0x0001 /* End of active video status */
+#define VCR_IC_EVBI 0x0002 /* End of VBI status */
+#define VCR_IC_FBOTFLD 0x0004 /* "flipping" Bottom field is active */
+#define VCR_IC_ACTBUF 0x0018 /* Active video buffer */
+#define VCR_IC_VSYNC 0x0020 /* 0 = VB, 1 = active video */
+#define VCR_IC_BOTFLD 0x0040 /* Bottom field is active */
+#define VCR_IC_FFULL 0x0080 /* FIFO full */
+#define VCR_IC_INTEN 0x0100 /* End of active video int. enable */
+#define VCR_IC_VBIINT 0x0200 /* End of VBI int enable */
+#define VCR_IC_VBIBUF 0x0400 /* Current VBI buffer */
+
+#define VCR_TSC 0x308 /* Transport stream control */
+#define VCR_TSC_ENABLE 0x000001 /* Transport stream input enable */
+#define VCR_TSC_DROPERR 0x000002 /* Drop error packets */
+#define VCR_TSC_METHOD 0x00000c /* DMA method (non-functional) */
+#define VCR_TSC_COUNT 0x07fff0 /* KByte or packet count */
+#define VCR_TSC_CBMODE 0x080000 /* Change buffer by byte count */
+#define VCR_TSC_PSSIG 0x100000 /* Packet starting signal disable */
+#define VCR_TSC_BE 0x200000 /* MSB first (serial mode) */
+#define VCR_TSC_SERIAL 0x400000 /* Serial input (0 = parallel) */
+
+#define VCR_CAPINTC 0x310 /* Capture interface control */
+#define VCR_CI_ENABLE 0x00000001 /* Capture enable */
+#define VCR_CI_BSS 0x00000002 /* WTF "bit stream selection" */
+#define VCR_CI_3BUFS 0x00000004 /* 1 = 3 buffers, 0 = 2 buffers */
+#define VCR_CI_VIPEN 0x00000008 /* VIP enable */
+#define VCR_CI_CCIR601_8 0 /* CCIR601 input stream, 8 bit */
+#define VCR_CI_CCIR656_8 0x00000010 /* ... CCIR656, 8 bit */
+#define VCR_CI_CCIR601_16 0x00000020 /* ... CCIR601, 16 bit */
+#define VCR_CI_CCIR656_16 0x00000030 /* ... CCIR656, 16 bit */
+#define VCR_CI_HDMODE 0x00000040 /* CCIR656-16 hdr decode mode; 1=16b */
+#define VCR_CI_BSWAP 0x00000080 /* Swap bytes (16-bit) */
+#define VCR_CI_YUYV 0 /* Byte order 0123 */
+#define VCR_CI_UYVY 0x00000100 /* Byte order 1032 */
+#define VCR_CI_YVYU 0x00000200 /* Byte order 0321 */
+#define VCR_CI_VYUY 0x00000300 /* Byte order 3012 */
+#define VCR_CI_VIPTYPE 0x00000400 /* VIP type */
+#define VCR_CI_IFSEN 0x00000800 /* Input field signal enable */
+#define VCR_CI_DIODD 0 /* De-interlace odd, 30fps */
+#define VCR_CI_DIEVEN 0x00001000 /* ...even field, 30fps */
+#define VCR_CI_DIBOTH 0x00002000 /* ...both fields, 60fps */
+#define VCR_CI_DIBOTH30 0x00003000 /* ...both fields, 30fps interlace */
+#define VCR_CI_CONVTYPE 0x00004000 /* 4:2:2 to 4:4:4; 1 = interpolate */
+#define VCR_CI_CFC 0x00008000 /* Capture flipping control */
+#define VCR_CI_FILTER 0x00070000 /* Horiz filter mode select
+ 000 = none
+ 001 = 2 tap
+ 010 = 3 tap
+ 011 = 4 tap
+ 100 = 5 tap */
+#define VCR_CI_CLKINV 0x00080000 /* Input CLK inverted */
+#define VCR_CI_VREFINV 0x00100000 /* VREF inverted */
+#define VCR_CI_HREFINV 0x00200000 /* HREF inverted */
+#define VCR_CI_FLDINV 0x00400000 /* Field inverted */
+#define VCR_CI_CLKPIN 0x00800000 /* Capture clock pin */
+#define VCR_CI_THRESH 0x0f000000 /* Capture fifo threshold */
+#define VCR_CI_HRLE 0x10000000 /* Positive edge of HREF */
+#define VCR_CI_VRLE 0x20000000 /* Positive edge of VREF */
+#define VCR_CI_OFLDINV 0x40000000 /* Field output inverted */
+#define VCR_CI_CLKEN 0x80000000 /* Capture clock enable */
+
+#define VCR_HORRANGE 0x314 /* Active video horizontal range */
+#define VCR_VERTRANGE 0x318 /* Active video vertical range */
+#define VCR_AVSCALE 0x31c /* Active video scaling control */
+#define VCR_AVS_HEN 0x00000800 /* Horizontal scale enable */
+#define VCR_AVS_VEN 0x04000000 /* Vertical enable */
+#define VCR_VBIHOR 0x320 /* VBI Data horizontal range */
+#define VCR_VBIVERT 0x324 /* VBI data vertical range */
+#define VCR_VBIBUF1 0x328 /* First VBI buffer */
+#define VCR_VBISTRIDE 0x32c /* VBI stride */
+#define VCR_ANCDATACNT 0x330 /* Ancillary data count setting */
+#define VCR_MAXDATA 0x334 /* Active data count of active video */
+#define VCR_MAXVBI 0x338 /* Maximum data count of VBI */
+#define VCR_CAPDATA 0x33c /* Capture data count */
+#define VCR_VBUF1 0x340 /* First video buffer */
+#define VCR_VBUF2 0x344 /* Second video buffer */
+#define VCR_VBUF3 0x348 /* Third video buffer */
+#define VCR_VBUF_MASK 0x1ffffff0 /* Bits 28:4 */
+#define VCR_VBIBUF2 0x34c /* Second VBI buffer */
+#define VCR_VSTRIDE 0x350 /* Stride of video + coring control */
+#define VCR_VS_STRIDE_SHIFT 4
+#define VCR_VS_STRIDE 0x00001ff0 /* Stride (8-byte units) */
+#define VCR_VS_CCD 0x007f0000 /* Coring compare data */
+#define VCR_VS_COREEN 0x00800000 /* Coring enable */
+#define VCR_TS0ERR 0x354 /* TS buffer 0 error indicator */
+#define VCR_TS1ERR 0x358 /* TS buffer 0 error indicator */
+#define VCR_TS2ERR 0x35c /* TS buffer 0 error indicator */
+
+/* Add 0x1000 for the second capture engine registers */
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index ce1595bef629..8979f91fa8e5 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -73,25 +73,46 @@ struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
}
EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
-#define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
- vb->state != VIDEOBUF_QUEUED)
-int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
+static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
+ unsigned long flags;
+ bool rc;
+
+ spin_lock_irqsave(q->irqlock, flags);
+ rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
+ spin_unlock_irqrestore(q->irqlock, flags);
+ return rc;
+};
+
+int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ int non_blocking, int intr)
+{
+ bool is_ext_locked;
+ int ret = 0;
+
MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
if (non_blocking) {
- if (WAITON_CONDITION)
+ if (is_state_active_or_queued(q, vb))
return 0;
- else
- return -EAGAIN;
+ return -EAGAIN;
}
+ is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
+
+ /* Release vdev lock to prevent this wait from blocking outside access to
+ the device. */
+ if (is_ext_locked)
+ mutex_unlock(q->ext_lock);
if (intr)
- return wait_event_interruptible(vb->done, WAITON_CONDITION);
+ ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
else
- wait_event(vb->done, WAITON_CONDITION);
+ wait_event(vb->done, is_state_active_or_queued(q, vb));
+ /* Relock */
+ if (is_ext_locked)
+ mutex_lock(q->ext_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(videobuf_waiton);
@@ -125,11 +146,13 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
enum v4l2_field field,
unsigned int msize,
void *priv,
- struct videobuf_qtype_ops *int_ops)
+ struct videobuf_qtype_ops *int_ops,
+ struct mutex *ext_lock)
{
BUG_ON(!q);
memset(q, 0, sizeof(*q));
q->irqlock = irqlock;
+ q->ext_lock = ext_lock;
q->dev = dev;
q->type = type;
q->field = field;
@@ -350,9 +373,9 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
int videobuf_mmap_free(struct videobuf_queue *q)
{
int ret;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
ret = __videobuf_free(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_mmap_free);
@@ -407,9 +430,9 @@ int videobuf_mmap_setup(struct videobuf_queue *q,
enum v4l2_memory memory)
{
int ret;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
@@ -432,7 +455,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
return -EINVAL;
}
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (req->type != q->type) {
dprintk(1, "reqbufs: queue type invalid\n");
retval = -EINVAL;
@@ -469,7 +492,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
retval = 0;
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_reqbufs);
@@ -478,7 +501,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
int ret = -EINVAL;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (unlikely(b->type != q->type)) {
dprintk(1, "querybuf: Wrong type.\n");
goto done;
@@ -496,7 +519,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
ret = 0;
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return ret;
}
EXPORT_SYMBOL_GPL(videobuf_querybuf);
@@ -513,7 +536,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
if (b->memory == V4L2_MEMORY_MMAP)
down_read(&current->mm->mmap_sem);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = -EBUSY;
if (q->reading) {
dprintk(1, "qbuf: Reading running...\n");
@@ -605,7 +628,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
wake_up_interruptible_sync(&q->wait);
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
if (b->memory == V4L2_MEMORY_MMAP)
up_read(&current->mm->mmap_sem);
@@ -635,14 +658,14 @@ checks:
dprintk(2, "next_buffer: waiting on buffer\n");
/* Drop lock to avoid deadlock with qbuf */
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
/* Checking list_empty and streaming is safe without
* locks because we goto checks to validate while
* holding locks before proceeding */
retval = wait_event_interruptible(q->wait,
!list_empty(&q->stream) || !q->streaming);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (retval)
goto done;
@@ -669,7 +692,7 @@ static int stream_next_buffer(struct videobuf_queue *q,
goto done;
buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
- retval = videobuf_waiton(buf, nonblocking, 1);
+ retval = videobuf_waiton(q, buf, nonblocking, 1);
if (retval < 0)
goto done;
@@ -687,7 +710,7 @@ int videobuf_dqbuf(struct videobuf_queue *q,
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
memset(b, 0, sizeof(*b));
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = stream_next_buffer(q, &buf, nonblocking);
if (retval < 0) {
@@ -713,7 +736,7 @@ int videobuf_dqbuf(struct videobuf_queue *q,
buf->state = VIDEOBUF_IDLE;
b->flags &= ~V4L2_BUF_FLAG_DONE;
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_dqbuf);
@@ -724,7 +747,7 @@ int videobuf_streamon(struct videobuf_queue *q)
unsigned long flags = 0;
int retval;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = -EBUSY;
if (q->reading)
goto done;
@@ -740,7 +763,7 @@ int videobuf_streamon(struct videobuf_queue *q)
wake_up_interruptible_sync(&q->wait);
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_streamon);
@@ -760,9 +783,9 @@ int videobuf_streamoff(struct videobuf_queue *q)
{
int retval;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = __videobuf_streamoff(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
@@ -797,7 +820,7 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
spin_lock_irqsave(q->irqlock, flags);
q->ops->buf_queue(q, q->read_buf);
spin_unlock_irqrestore(q->irqlock, flags);
- retval = videobuf_waiton(q->read_buf, 0, 0);
+ retval = videobuf_waiton(q, q->read_buf, 0, 0);
if (0 == retval) {
CALL(q, sync, q, q->read_buf);
if (VIDEOBUF_ERROR == q->read_buf->state)
@@ -868,7 +891,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
q->ops->buf_setup(q, &nbufs, &size);
@@ -909,7 +932,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
}
/* wait until capture is done */
- retval = videobuf_waiton(q->read_buf, nonblocking, 1);
+ retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
if (0 != retval)
goto done;
@@ -938,7 +961,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
}
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_read_one);
@@ -999,9 +1022,9 @@ int videobuf_read_start(struct videobuf_queue *q)
{
int rc;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
rc = __videobuf_read_start(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return rc;
}
@@ -1009,15 +1032,15 @@ EXPORT_SYMBOL_GPL(videobuf_read_start);
void videobuf_read_stop(struct videobuf_queue *q)
{
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
__videobuf_read_stop(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
EXPORT_SYMBOL_GPL(videobuf_read_stop);
void videobuf_stop(struct videobuf_queue *q)
{
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (q->streaming)
__videobuf_streamoff(q);
@@ -1025,7 +1048,7 @@ void videobuf_stop(struct videobuf_queue *q)
if (q->reading)
__videobuf_read_stop(q);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
EXPORT_SYMBOL_GPL(videobuf_stop);
@@ -1039,7 +1062,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
dprintk(2, "%s\n", __func__);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
retval = -EBUSY;
if (q->streaming)
goto done;
@@ -1059,7 +1082,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
list_del(&q->read_buf->stream);
q->read_off = 0;
}
- rc = videobuf_waiton(q->read_buf, nonblocking, 1);
+ rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
if (rc < 0) {
if (0 == retval)
retval = rc;
@@ -1097,7 +1120,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
}
done:
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return retval;
}
EXPORT_SYMBOL_GPL(videobuf_read_stream);
@@ -1109,7 +1132,7 @@ unsigned int videobuf_poll_stream(struct file *file,
struct videobuf_buffer *buf = NULL;
unsigned int rc = 0;
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
if (q->streaming) {
if (!list_empty(&q->stream))
buf = list_entry(q->stream.next,
@@ -1147,7 +1170,7 @@ unsigned int videobuf_poll_stream(struct file *file,
}
}
}
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return rc;
}
EXPORT_SYMBOL_GPL(videobuf_poll_stream);
@@ -1164,7 +1187,7 @@ int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
return -EINVAL;
}
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
struct videobuf_buffer *buf = q->bufs[i];
@@ -1174,7 +1197,7 @@ int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
break;
}
}
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
return rc;
}
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 6ff9e4bac3ea..c9691115f2d2 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -28,7 +28,6 @@ struct videobuf_dma_contig_memory {
void *vaddr;
dma_addr_t dma_handle;
unsigned long size;
- int is_userptr;
};
#define MAGIC_DC_MEM 0x0733ac61
@@ -63,7 +62,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -103,7 +102,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
}
@@ -120,7 +119,6 @@ static const struct vm_operations_struct videobuf_vm_ops = {
*/
static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
{
- mem->is_userptr = 0;
mem->dma_handle = 0;
mem->size = 0;
}
@@ -147,7 +145,6 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
offset = vb->baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
- mem->is_userptr = 0;
ret = -EINVAL;
down_read(&mm->mmap_sem);
@@ -181,9 +178,6 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
pages_done++;
}
- if (!ret)
- mem->is_userptr = 1;
-
out_up:
up_read(&current->mm->mmap_sem);
@@ -349,10 +343,11 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv)
+ void *priv,
+ struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops);
+ priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 2ad0bc252b0e..20f227ee2b3e 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -116,8 +116,8 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
goto nopage;
if (PageHighMem(pages[i]))
goto highmem;
- sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0);
- size -= min(PAGE_SIZE, size);
+ sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
+ size -= min_t(size_t, PAGE_SIZE, size);
}
return sglist;
@@ -358,7 +358,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
map->count--;
if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
@@ -374,7 +374,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
kfree(map);
}
return;
@@ -654,10 +654,11 @@ void videobuf_queue_sg_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv)
+ void *priv,
+ struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &sg_ops);
+ priv, &sg_ops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 3f76398968b8..3de7c7e4402d 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -57,7 +57,7 @@ static int videobuf_dvb_thread(void *data)
buf = list_entry(dvb->dvbq.stream.next,
struct videobuf_buffer, stream);
list_del(&buf->stream);
- err = videobuf_waiton(buf,0,1);
+ err = videobuf_waiton(&dvb->dvbq, buf, 0, 1);
/* no more feeds left or stop_feed() asked us to quit */
if (0 == dvb->nfeeds)
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index e7fe31d54f07..df142580e44c 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -75,7 +75,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
- mutex_lock(&q->vb_lock);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -114,7 +114,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
- mutex_unlock(&q->vb_lock);
+ videobuf_queue_unlock(q);
}
return;
@@ -304,10 +304,11 @@ void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv)
+ void *priv,
+ struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops);
+ priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 3eb15f72ac09..e5e005dc1554 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -4334,10 +4334,10 @@ static int __init vino_module_init(void)
vino_drvdata->decoder =
v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
- "saa7191", "saa7191", 0, I2C_ADDRS(0x45));
+ NULL, "saa7191", 0, I2C_ADDRS(0x45));
vino_drvdata->camera =
v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
- "indycam", "indycam", 0, I2C_ADDRS(0x2b));
+ NULL, "indycam", 0, I2C_ADDRS(0x2b));
dprintk("init complete!\n");
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index e17b6fee046b..9797e5a69265 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -820,14 +820,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vivi_dev *dev = video_drvdata(file);
- struct videobuf_queue *q = &dev->vb_vidq;
int ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret < 0)
return ret;
- mutex_lock(&q->vb_lock);
-
if (vivi_is_generating(dev)) {
dprintk(dev, 1, "%s device busy\n", __func__);
ret = -EBUSY;
@@ -840,7 +837,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
dev->vb_vidq.field = f->fmt.pix.field;
ret = 0;
out:
- mutex_unlock(&q->vb_lock);
return ret;
}
@@ -1086,7 +1082,7 @@ static const struct v4l2_file_operations vivi_fops = {
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
- .ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = vivi_mmap,
};
@@ -1173,19 +1169,19 @@ static int __init vivi_create_instance(int inst)
dev->saturation = 127;
dev->hue = 0;
+ /* initialize locks */
+ spin_lock_init(&dev->slock);
+ mutex_init(&dev->mutex);
+
videobuf_queue_vmalloc_init(&dev->vb_vidq, &vivi_video_qops,
NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
- sizeof(struct vivi_buffer), dev);
+ sizeof(struct vivi_buffer), dev, &dev->mutex);
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
init_waitqueue_head(&dev->vidq.wq);
- /* initialize locks */
- spin_lock_init(&dev->slock);
- mutex_init(&dev->mutex);
-
ret = -ENOMEM;
vfd = video_device_alloc();
if (!vfd)
@@ -1194,6 +1190,7 @@ static int __init vivi_create_instance(int inst)
*vfd = vivi_template;
vfd->debug = debug;
vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->lock = &dev->mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
diff --git a/drivers/media/video/vp27smpx.c b/drivers/media/video/vp27smpx.c
index ca8303bd2401..c15efb6e7771 100644
--- a/drivers/media/video/vp27smpx.c
+++ b/drivers/media/video/vp27smpx.c
@@ -27,11 +27,9 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("vp27smpx driver");
MODULE_AUTHOR("Hans Verkuil");
@@ -200,9 +198,25 @@ static const struct i2c_device_id vp27smpx_id[] = {
};
MODULE_DEVICE_TABLE(i2c, vp27smpx_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "vp27smpx",
- .probe = vp27smpx_probe,
- .remove = vp27smpx_remove,
- .id_table = vp27smpx_id,
+static struct i2c_driver vp27smpx_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vp27smpx",
+ },
+ .probe = vp27smpx_probe,
+ .remove = vp27smpx_remove,
+ .id_table = vp27smpx_id,
};
+
+static __init int init_vp27smpx(void)
+{
+ return i2c_add_driver(&vp27smpx_driver);
+}
+
+static __exit void exit_vp27smpx(void)
+{
+ i2c_del_driver(&vp27smpx_driver);
+}
+
+module_init(init_vp27smpx);
+module_exit(exit_vp27smpx);
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index 77ebcea7c3da..91a01b3cdf8c 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -28,7 +28,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
MODULE_DESCRIPTION("vpx3220a/vpx3216b/vpx3214c video decoder driver");
MODULE_AUTHOR("Laurent Pinchart");
@@ -614,9 +613,25 @@ static const struct i2c_device_id vpx3220_id[] = {
};
MODULE_DEVICE_TABLE(i2c, vpx3220_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "vpx3220",
- .probe = vpx3220_probe,
- .remove = vpx3220_remove,
- .id_table = vpx3220_id,
+static struct i2c_driver vpx3220_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vpx3220",
+ },
+ .probe = vpx3220_probe,
+ .remove = vpx3220_remove,
+ .id_table = vpx3220_id,
};
+
+static __init int init_vpx3220(void)
+{
+ return i2c_add_driver(&vpx3220_driver);
+}
+
+static __exit void exit_vpx3220(void)
+{
+ i2c_del_driver(&vpx3220_driver);
+}
+
+module_init(init_vpx3220);
+module_exit(exit_vpx3220);
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index d5965543ecab..a22f765e968a 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -30,7 +30,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("wm8739 driver");
@@ -282,9 +281,25 @@ static const struct i2c_device_id wm8739_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8739_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "wm8739",
- .probe = wm8739_probe,
- .remove = wm8739_remove,
- .id_table = wm8739_id,
+static struct i2c_driver wm8739_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "wm8739",
+ },
+ .probe = wm8739_probe,
+ .remove = wm8739_remove,
+ .id_table = wm8739_id,
};
+
+static __init int init_wm8739(void)
+{
+ return i2c_add_driver(&wm8739_driver);
+}
+
+static __exit void exit_wm8739(void)
+{
+ i2c_del_driver(&wm8739_driver);
+}
+
+module_init(init_wm8739);
+module_exit(exit_wm8739);
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index 23bad3fd6dc5..135525649086 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -31,12 +31,11 @@
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
-#include <linux/i2c-id.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-i2c-drv.h>
+#include <media/wm8775.h>
MODULE_DESCRIPTION("wm8775 driver");
MODULE_AUTHOR("Ulf Eklund, Hans Verkuil");
@@ -52,10 +51,16 @@ enum {
TOT_REGS
};
+#define ALC_HOLD 0x85 /* R17: use zero cross detection, ALC hold time 42.6 ms */
+#define ALC_EN 0x100 /* R17: ALC enable */
+
struct wm8775_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *mute;
+ struct v4l2_ctrl *vol;
+ struct v4l2_ctrl *bal;
+ struct v4l2_ctrl *loud;
u8 input; /* Last selected input (0-0xf) */
};
@@ -87,6 +92,30 @@ static int wm8775_write(struct v4l2_subdev *sd, int reg, u16 val)
return -1;
}
+static void wm8775_set_audio(struct v4l2_subdev *sd, int quietly)
+{
+ struct wm8775_state *state = to_state(sd);
+ u8 vol_l, vol_r;
+ int muted = 0 != state->mute->val;
+ u16 volume = (u16)state->vol->val;
+ u16 balance = (u16)state->bal->val;
+
+ /* normalize ( 65535 to 0 -> 255 to 0 (+24dB to -103dB) ) */
+ vol_l = (min(65536 - balance, 32768) * volume) >> 23;
+ vol_r = (min(balance, (u16)32768) * volume) >> 23;
+
+ /* Mute */
+ if (muted || quietly)
+ wm8775_write(sd, R21, 0x0c0 | state->input);
+
+ wm8775_write(sd, R14, vol_l | 0x100); /* 0x100= Left channel ADC zero cross enable */
+ wm8775_write(sd, R15, vol_r | 0x100); /* 0x100= Right channel ADC zero cross enable */
+
+ /* Un-mute */
+ if (!muted)
+ wm8775_write(sd, R21, state->input);
+}
+
static int wm8775_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
@@ -104,25 +133,26 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
state->input = input;
if (!v4l2_ctrl_g_ctrl(state->mute))
return 0;
- wm8775_write(sd, R21, 0x0c0);
- wm8775_write(sd, R14, 0x1d4);
- wm8775_write(sd, R15, 0x1d4);
- wm8775_write(sd, R21, 0x100 + state->input);
+ if (!v4l2_ctrl_g_ctrl(state->vol))
+ return 0;
+ if (!v4l2_ctrl_g_ctrl(state->bal))
+ return 0;
+ wm8775_set_audio(sd, 1);
return 0;
}
static int wm8775_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
- struct wm8775_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- wm8775_write(sd, R21, 0x0c0);
- wm8775_write(sd, R14, 0x1d4);
- wm8775_write(sd, R15, 0x1d4);
- if (!ctrl->val)
- wm8775_write(sd, R21, 0x100 + state->input);
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ wm8775_set_audio(sd, 0);
+ return 0;
+ case V4L2_CID_AUDIO_LOUDNESS:
+ wm8775_write(sd, R17, (ctrl->val ? ALC_EN : 0) | ALC_HOLD);
return 0;
}
return -EINVAL;
@@ -146,16 +176,7 @@ static int wm8775_log_status(struct v4l2_subdev *sd)
static int wm8775_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
{
- struct wm8775_state *state = to_state(sd);
-
- /* If I remove this, then it can happen that I have no
- sound the first time I tune from static to a valid channel.
- It's difficult to reproduce and is almost certainly related
- to the zero cross detect circuit. */
- wm8775_write(sd, R21, 0x0c0);
- wm8775_write(sd, R14, 0x1d4);
- wm8775_write(sd, R15, 0x1d4);
- wm8775_write(sd, R21, 0x100 + state->input);
+ wm8775_set_audio(sd, 0);
return 0;
}
@@ -205,6 +226,7 @@ static int wm8775_probe(struct i2c_client *client,
{
struct wm8775_state *state;
struct v4l2_subdev *sd;
+ int err;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -218,15 +240,21 @@ static int wm8775_probe(struct i2c_client *client,
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &wm8775_ops);
+ sd->grp_id = WM8775_GID; /* subdev group id */
state->input = 2;
- v4l2_ctrl_handler_init(&state->hdl, 1);
+ v4l2_ctrl_handler_init(&state->hdl, 4);
state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ state->vol = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 65535, (65535+99)/100, 0xCF00); /* 0dB*/
+ state->bal = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
+ V4L2_CID_AUDIO_BALANCE, 0, 65535, (65535+99)/100, 32768);
+ state->loud = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
+ V4L2_CID_AUDIO_LOUDNESS, 0, 1, 1, 1);
sd->ctrl_handler = &state->hdl;
- if (state->hdl.error) {
- int err = state->hdl.error;
-
+ err = state->hdl.error;
+ if (err) {
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return err;
@@ -238,29 +266,25 @@ static int wm8775_probe(struct i2c_client *client,
wm8775_write(sd, R23, 0x000);
/* Disable zero cross detect timeout */
wm8775_write(sd, R7, 0x000);
- /* Left justified, 24-bit mode */
- wm8775_write(sd, R11, 0x021);
+ /* HPF enable, I2S mode, 24-bit */
+ wm8775_write(sd, R11, 0x022);
/* Master mode, clock ratio 256fs */
wm8775_write(sd, R12, 0x102);
/* Powered up */
wm8775_write(sd, R13, 0x000);
- /* ADC gain +2.5dB, enable zero cross */
- wm8775_write(sd, R14, 0x1d4);
- /* ADC gain +2.5dB, enable zero cross */
- wm8775_write(sd, R15, 0x1d4);
- /* ALC Stereo, ALC target level -1dB FS max gain +8dB */
- wm8775_write(sd, R16, 0x1bf);
- /* Enable gain control, use zero cross detection,
- ALC hold time 42.6 ms */
- wm8775_write(sd, R17, 0x185);
+ /* ALC stereo, ALC target level -5dB FS, ALC max gain +8dB */
+ wm8775_write(sd, R16, 0x1bb);
+ /* Set ALC mode and hold time */
+ wm8775_write(sd, R17, (state->loud->val ? ALC_EN : 0) | ALC_HOLD);
/* ALC gain ramp up delay 34 s, ALC gain ramp down delay 33 ms */
wm8775_write(sd, R18, 0x0a2);
/* Enable noise gate, threshold -72dBfs */
wm8775_write(sd, R19, 0x005);
- /* Transient window 4ms, lower PGA gain limit -1dB */
- wm8775_write(sd, R20, 0x07a);
- /* LRBOTH = 1, use input 2. */
- wm8775_write(sd, R21, 0x102);
+ /* Transient window 4ms, ALC min gain -5dB */
+ wm8775_write(sd, R20, 0x0fb);
+
+ wm8775_set_audio(sd, 1); /* set volume/mute/mux */
+
return 0;
}
@@ -281,9 +305,25 @@ static const struct i2c_device_id wm8775_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8775_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "wm8775",
- .probe = wm8775_probe,
- .remove = wm8775_remove,
- .id_table = wm8775_id,
+static struct i2c_driver wm8775_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "wm8775",
+ },
+ .probe = wm8775_probe,
+ .remove = wm8775_remove,
+ .id_table = wm8775_id,
};
+
+static __init int init_wm8775(void)
+{
+ return i2c_add_driver(&wm8775_driver);
+}
+
+static __exit void exit_wm8775(void)
+{
+ i2c_del_driver(&wm8775_driver);
+}
+
+module_init(init_wm8775);
+module_exit(exit_wm8775);
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index 307e847fe1cd..37fe16181e3c 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -341,10 +341,8 @@ struct card_info {
enum card_type type;
char name[32];
const char *i2c_decoder; /* i2c decoder device */
- const char *mod_decoder; /* i2c decoder module */
const unsigned short *addrs_decoder;
const char *i2c_encoder; /* i2c encoder device */
- const char *mod_encoder; /* i2c encoder module */
const unsigned short *addrs_encoder;
u16 video_vfe, video_codec; /* videocodec types */
u16 audio_chip; /* audio type */
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index bfcd3aef50f9..0aac376c3f7a 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -379,7 +379,6 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC10_old,
.name = "DC10(old)",
.i2c_decoder = "vpx3220a",
- .mod_decoder = "vpx3220",
.addrs_decoder = vpx3220_addrs,
.video_codec = CODEC_TYPE_ZR36050,
.video_vfe = CODEC_TYPE_ZR36016,
@@ -409,10 +408,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC10_new,
.name = "DC10(new)",
.i2c_decoder = "saa7110",
- .mod_decoder = "saa7110",
.addrs_decoder = saa7110_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -440,10 +437,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC10plus,
.name = "DC10plus",
.i2c_decoder = "saa7110",
- .mod_decoder = "saa7110",
.addrs_decoder = saa7110_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -472,10 +467,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC30,
.name = "DC30",
.i2c_decoder = "vpx3220a",
- .mod_decoder = "vpx3220",
.addrs_decoder = vpx3220_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36050,
.video_vfe = CODEC_TYPE_ZR36016,
@@ -505,10 +498,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = DC30plus,
.name = "DC30plus",
.i2c_decoder = "vpx3220a",
- .mod_decoder = "vpx3220",
.addrs_decoder = vpx3220_addrs,
.i2c_encoder = "adv7175",
- .mod_encoder = "adv7175",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36050,
.video_vfe = CODEC_TYPE_ZR36016,
@@ -538,10 +529,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = LML33,
.name = "LML33",
.i2c_decoder = "bt819a",
- .mod_decoder = "bt819",
.addrs_decoder = bt819_addrs,
.i2c_encoder = "bt856",
- .mod_encoder = "bt856",
.addrs_encoder = bt856_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -569,10 +558,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = LML33R10,
.name = "LML33R10",
.i2c_decoder = "saa7114",
- .mod_decoder = "saa7115",
.addrs_decoder = saa7114_addrs,
.i2c_encoder = "adv7170",
- .mod_encoder = "adv7170",
.addrs_encoder = adv717x_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -600,10 +587,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
.type = BUZ,
.name = "Buz",
.i2c_decoder = "saa7111",
- .mod_decoder = "saa7115",
.addrs_decoder = saa7111_addrs,
.i2c_encoder = "saa7185",
- .mod_encoder = "saa7185",
.addrs_encoder = saa7185_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -633,10 +618,8 @@ static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
/* AverMedia chose not to brand the 6-Eyes. Thus it
can't be autodetected, and requires card=x. */
.i2c_decoder = "ks0127",
- .mod_decoder = "ks0127",
.addrs_decoder = ks0127_addrs,
.i2c_encoder = "bt866",
- .mod_encoder = "bt866",
.addrs_encoder = bt866_addrs,
.video_codec = CODEC_TYPE_ZR36060,
@@ -1359,13 +1342,13 @@ static int __devinit zoran_probe(struct pci_dev *pdev,
}
zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
- &zr->i2c_adapter, zr->card.mod_decoder, zr->card.i2c_decoder,
+ &zr->i2c_adapter, NULL, zr->card.i2c_decoder,
0, zr->card.addrs_decoder);
- if (zr->card.mod_encoder)
+ if (zr->card.i2c_encoder)
zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
&zr->i2c_adapter,
- zr->card.mod_encoder, zr->card.i2c_encoder,
+ NULL, zr->card.i2c_encoder,
0, zr->card.addrs_encoder);
dprintk(2,
diff --git a/drivers/media/video/zoran/zoran_device.c b/drivers/media/video/zoran/zoran_device.c
index 6f846abee3e4..b02007e42150 100644
--- a/drivers/media/video/zoran/zoran_device.c
+++ b/drivers/media/video/zoran/zoran_device.c
@@ -1470,8 +1470,7 @@ zoran_irq (int irq,
(zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS ||
zr->codec_mode == BUZ_MODE_MOTION_COMPRESS)) {
if (zr36067_debug > 1 && (!zr->frame_num || zr->JPEG_error)) {
- char sc[] = "0000";
- char sv[5];
+ char sv[BUZ_NUM_STAT_COM + 1];
int i;
printk(KERN_INFO
@@ -1481,12 +1480,9 @@ zoran_irq (int irq,
zr->jpg_settings.field_per_buff,
zr->JPEG_missed);
- strcpy(sv, sc);
- for (i = 0; i < 4; i++) {
- if (le32_to_cpu(zr->stat_com[i]) & 1)
- sv[i] = '1';
- }
- sv[4] = 0;
+ for (i = 0; i < BUZ_NUM_STAT_COM; i++)
+ sv[i] = le32_to_cpu(zr->stat_com[i]) & 1 ? '1' : '0';
+ sv[BUZ_NUM_STAT_COM] = 0;
printk(KERN_INFO
"%s: stat_com=%s queue_state=%ld/%ld/%ld/%ld\n",
ZR_DEVNAME(zr), sv,
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 3c471a4e3e4a..401082b853f0 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -3322,7 +3322,7 @@ zoran_mmap (struct file *file,
mmap_unlock_and_return:
mutex_unlock(&zr->resource_lock);
- return 0;
+ return res;
}
static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index a82b5bd18d26..7dfb01e9930e 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -572,7 +572,7 @@ static int zr364xx_got_frame(struct zr364xx_camera *cam, int jpgsize)
DBG("wakeup [buf/i] [%p/%d]\n", buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&cam->slock, flags);
- return 0;
+ return rc;
}
/* this function moves the usb stream read pipe data
@@ -1304,7 +1304,7 @@ static int zr364xx_open(struct file *file)
NULL, &cam->slock,
cam->type,
V4L2_FIELD_NONE,
- sizeof(struct zr364xx_buffer), cam);
+ sizeof(struct zr364xx_buffer), cam, NULL);
/* Added some delay here, since opening/closing the camera quickly,
* like Ekiga does during its startup, can crash the webcam
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1f69743b12ec..5a74db75f66f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -4,7 +4,6 @@
menuconfig MISC_DEVICES
bool "Misc devices"
- default y
---help---
Say Y here to get to see options for device drivers from various
different categories. This option alone does not add any kernel code.
@@ -24,7 +23,8 @@ config AD525X_DPOT
AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
- ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173
+ ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270,
+ AD5271, AD5272, AD5274
digital potentiometer chips.
See Documentation/misc-devices/ad525x_dpot.txt for the
@@ -284,6 +284,16 @@ config SGI_GRU_DEBUG
This option enables addition debugging code for the SGI GRU driver. If
you are unsure, say N.
+config APDS9802ALS
+ tristate "Medfield Avago APDS9802 ALS Sensor module"
+ depends on I2C
+ help
+ If you say yes here you get support for the ALS APDS9802 ambient
+ light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called apds9802als.
+
config ISL29003
tristate "Intersil ISL29003 ambient light sensor"
depends on I2C && SYSFS
@@ -294,6 +304,16 @@ config ISL29003
This driver can also be built as a module. If so, the module
will be called isl29003.
+config ISL29020
+ tristate "Intersil ISL29020 ambient light sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the Intersil ISL29020
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called isl29020.
+
config SENSORS_TSL2550
tristate "Taos TSL2550 ambient light sensor"
depends on I2C && SYSFS
@@ -314,6 +334,27 @@ config SENSORS_BH1780
This driver can also be built as a module. If so, the module
will be called bh1780gli.
+config SENSORS_BH1770
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+ SFH7770 (Osram) combined ambient light and proximity sensor chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bh1770glc. If unsure, say N here.
+
+config SENSORS_APDS990X
+ tristate "APDS990X combined als and proximity sensors"
+ depends on I2C
+ default n
+ ---help---
+ Say Y here if you want to build a driver for Avago APDS990x
+ combined ambient light and proximity sensor chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds990x. If unsure, say N here.
+
config HMC6352
tristate "Honeywell HMC6352 compass"
depends on I2C
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9f2986b4da2f..4be5c6fc5ef4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
+obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
+obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
@@ -23,7 +25,9 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
obj-$(CONFIG_HP_ILO) += hpilo.o
+obj-$(CONFIG_APDS9802ALS) += apds9802als.o
obj-$(CONFIG_ISL29003) += isl29003.o
+obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 374352af7979..4ff73c215746 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -102,6 +102,8 @@ static const struct i2c_device_id ad_dpot_id[] = {
{"ad5170", AD5170_ID},
{"ad5172", AD5172_ID},
{"ad5173", AD5173_ID},
+ {"ad5272", AD5272_ID},
+ {"ad5274", AD5274_ID},
{}
};
MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index b8c6df9c8437..7f9a55afe05d 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -38,6 +38,8 @@ static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
{.name = "ad8402", .devid = AD8402_ID},
{.name = "ad8403", .devid = AD8403_ID},
{.name = "adn2850", .devid = ADN2850_ID},
+ {.name = "ad5270", .devid = AD5270_ID},
+ {.name = "ad5271", .devid = AD5271_ID},
{}
};
@@ -53,13 +55,13 @@ static int write8(void *client, u8 val)
static int write16(void *client, u8 reg, u8 val)
{
u8 data[2] = {reg, val};
- return spi_write(client, data, 1);
+ return spi_write(client, data, 2);
}
static int write24(void *client, u8 reg, u16 val)
{
u8 data[3] = {reg, val >> 8, val};
- return spi_write(client, data, 1);
+ return spi_write(client, data, 3);
}
static int read8(void *client)
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 5e6fa8449e8b..7cb911028d09 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -29,9 +29,9 @@
* AD5262 2 256 20, 50, 200
* AD5263 4 256 20, 50, 200
* AD5290 1 256 10, 50, 100
- * AD5291 1 256 20
- * AD5292 1 1024 20
- * AD5293 1 1024 20
+ * AD5291 1 256 20, 50, 100 (20-TP)
+ * AD5292 1 1024 20, 50, 100 (20-TP)
+ * AD5293 1 1024 20, 50, 100
* AD7376 1 128 10, 50, 100, 1M
* AD8400 1 256 1, 10, 50, 100
* AD8402 2 256 1, 10, 50, 100
@@ -52,6 +52,10 @@
* AD5170 1 256 2.5, 10, 50, 100 (OTP)
* AD5172 2 256 2.5, 10, 50, 100 (OTP)
* AD5173 2 256 2.5, 10, 50, 100 (OTP)
+ * AD5270 1 1024 20, 50, 100 (50-TP)
+ * AD5271 1 256 20, 50, 100 (50-TP)
+ * AD5272 1 1024 20, 50, 100 (50-TP)
+ * AD5274 1 256 20, 50, 100 (50-TP)
*
* See Documentation/misc-devices/ad525x_dpot.txt for more info.
*
@@ -126,18 +130,38 @@ static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
{
unsigned ctrl = 0;
+ int value;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
if (dpot->feat & F_RDACS_WONLY)
return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
-
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) ||
- dpot->uid == DPOT_UID(AD5293_ID))
- return dpot_read_r8d8(dpot,
+ dpot->uid == DPOT_UID(AD5293_ID)) {
+
+ value = dpot_read_r8d8(dpot,
DPOT_AD5291_READ_RDAC << 2);
+ if (dpot->uid == DPOT_UID(AD5291_ID))
+ value = value >> 2;
+
+ return value;
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+
+ value = dpot_read_r8d8(dpot,
+ DPOT_AD5270_1_2_4_READ_RDAC << 2);
+
+ if (value < 0)
+ return value;
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value >> 2;
+
+ return value;
+ }
+
ctrl = DPOT_SPI_READ_RDAC;
} else if (reg & DPOT_ADDR_EEPROM) {
ctrl = DPOT_SPI_READ_EEPROM;
@@ -153,6 +177,7 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
{
+ int value;
unsigned ctrl = 0;
switch (dpot->uid) {
case DPOT_UID(AD5246_ID):
@@ -166,7 +191,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5291_RDAC_AB;
+ 0 : DPOT_AD5282_RDAC_AB;
return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5170_ID):
case DPOT_UID(AD5171_ID):
@@ -175,8 +200,27 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5272_3_A0;
+ 0 : DPOT_AD5172_3_A0;
return dpot_read_r8d8(dpot, ctrl);
+ case DPOT_UID(AD5272_ID):
+ case DPOT_UID(AD5274_ID):
+ dpot_write_r8d8(dpot,
+ (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
+
+ value = dpot_read_r8d16(dpot,
+ DPOT_AD5270_1_2_4_RDAC << 2);
+
+ if (value < 0)
+ return value;
+ /*
+ * AD5272/AD5274 returns high byte first, however
+ * underling smbus expects low byte first.
+ */
+ value = swab16(value);
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value >> 2;
+ return value;
default:
if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
return dpot_read_r8d16(dpot, (reg & 0xF8) |
@@ -198,7 +242,7 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
{
unsigned val = 0;
- if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
if (dpot->feat & F_RDACS_WONLY)
dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
@@ -219,11 +263,30 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
} else {
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) ||
- dpot->uid == DPOT_UID(AD5293_ID))
+ dpot->uid == DPOT_UID(AD5293_ID)) {
+
+ dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2,
+ DPOT_AD5291_UNLOCK_CMD);
+
+ if (dpot->uid == DPOT_UID(AD5291_ID))
+ value = value << 2;
+
return dpot_write_r8d8(dpot,
(DPOT_AD5291_RDAC << 2) |
(value >> 8), value & 0xFF);
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+ dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_CTRLREG << 2,
+ DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+ if (dpot->uid == DPOT_UID(AD5271_ID))
+ value = value << 2;
+ return dpot_write_r8d8(dpot,
+ (DPOT_AD5270_1_2_4_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+ }
val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
}
} else if (reg & DPOT_ADDR_EEPROM) {
@@ -243,6 +306,16 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
val = DPOT_SPI_INC_ALL;
break;
}
+ } else if (reg & DPOT_ADDR_OTP) {
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID)) {
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5291_STORE_XTPM << 2, 0);
+ } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+ dpot->uid == DPOT_UID(AD5271_ID)) {
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+ }
} else
BUG();
@@ -273,7 +346,7 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5291_RDAC_AB;
+ 0 : DPOT_AD5282_RDAC_AB;
return dpot_write_r8d8(dpot, ctrl, value);
break;
case DPOT_UID(AD5171_ID):
@@ -289,12 +362,12 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
- 0 : DPOT_AD5272_3_A0;
+ 0 : DPOT_AD5172_3_A0;
if (reg & DPOT_ADDR_OTP) {
tmp = dpot_read_r8d16(dpot, ctrl);
if (tmp >> 14) /* Ready to Program? */
return -EFAULT;
- ctrl |= DPOT_AD5270_2_3_FUSE;
+ ctrl |= DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
break;
@@ -303,10 +376,25 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
tmp = dpot_read_r8d16(dpot, tmp);
if (tmp >> 14) /* Ready to Program? */
return -EFAULT;
- ctrl = DPOT_AD5270_2_3_FUSE;
+ ctrl = DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
break;
+ case DPOT_UID(AD5272_ID):
+ case DPOT_UID(AD5274_ID):
+ dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
+ DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+ if (reg & DPOT_ADDR_OTP)
+ return dpot_write_r8d8(dpot,
+ DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+
+ if (dpot->uid == DPOT_UID(AD5274_ID))
+ value = value << 2;
+
+ return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+ break;
default:
if (reg & DPOT_ADDR_CMD)
return dpot_write_d8(dpot, reg);
@@ -320,7 +408,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
}
}
-
static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
{
if (dpot->feat & F_SPI)
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 78b89fd2e2fd..a662f5987b68 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -47,9 +47,9 @@ enum dpot_devid {
AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
- BRDAC0 | BRDAC3, 6, 2),
+ BRDAC1 | BRDAC3, 6, 2),
AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
- BRDAC0 | BRDAC3, 8, 3),
+ BRDAC1 | BRDAC3, 8, 3),
AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
@@ -93,8 +93,10 @@ enum dpot_devid {
BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
BRDAC0, 8, 24),
- AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25),
- AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26),
+ AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+ BRDAC0, 8, 25),
+ AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+ BRDAC0, 10, 26),
AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
BRDAC0, 7, 28),
@@ -122,6 +124,12 @@ enum dpot_devid {
AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
+ AD5270_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+ BRDAC0, 10, 48),
+ AD5271_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+ BRDAC0, 8, 49),
+ AD5272_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 10, 50),
+ AD5274_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 51),
};
#define DPOT_RDAC0 0
@@ -165,15 +173,24 @@ enum dpot_devid {
/* AD5291/2/3 use special commands */
#define DPOT_AD5291_RDAC 0x01
#define DPOT_AD5291_READ_RDAC 0x02
+#define DPOT_AD5291_STORE_XTPM 0x03
+#define DPOT_AD5291_CTRLREG 0x06
+#define DPOT_AD5291_UNLOCK_CMD 0x03
-/* AD524x use special commands */
-#define DPOT_AD5291_RDAC_AB 0x80
+/* AD5270/1/2/4 use special commands */
+#define DPOT_AD5270_1_2_4_RDAC 0x01
+#define DPOT_AD5270_1_2_4_READ_RDAC 0x02
+#define DPOT_AD5270_1_2_4_STORE_XTPM 0x03
+#define DPOT_AD5270_1_2_4_CTRLREG 0x07
+#define DPOT_AD5270_1_2_4_UNLOCK_CMD 0x03
+
+#define DPOT_AD5282_RDAC_AB 0x80
#define DPOT_AD5273_FUSE 0x80
-#define DPOT_AD5270_2_3_FUSE 0x20
-#define DPOT_AD5270_2_3_OW 0x08
-#define DPOT_AD5272_3_A0 0x08
-#define DPOT_AD5270_2FUSE 0x80
+#define DPOT_AD5170_2_3_FUSE 0x20
+#define DPOT_AD5170_2_3_OW 0x08
+#define DPOT_AD5172_3_A0 0x08
+#define DPOT_AD5170_2FUSE 0x80
struct dpot_data;
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
new file mode 100644
index 000000000000..f9b91ba8900c
--- /dev/null
+++ b/drivers/misc/apds9802als.c
@@ -0,0 +1,347 @@
+/*
+ * apds9802als.c - apds9802 ALS Driver
+ *
+ * Copyright (C) 2009 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+#define ALS_MIN_RANGE_VAL 1
+#define ALS_MAX_RANGE_VAL 2
+#define POWER_STA_ENABLE 1
+#define POWER_STA_DISABLE 0
+
+#define DRIVER_NAME "apds9802als"
+
+struct als_data {
+ struct mutex mutex;
+};
+
+static ssize_t als_sensing_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, 0x81);
+ if (val < 0)
+ return val;
+ if (val & 1)
+ return sprintf(buf, "4095\n");
+ else
+ return sprintf(buf, "65535\n");
+}
+
+static int als_wait_for_data_ready(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+ int retry = 10;
+
+ do {
+ msleep(30);
+ ret = i2c_smbus_read_byte_data(client, 0x86);
+ } while (!(ret & 0x80) && retry--);
+
+ if (!retry) {
+ dev_warn(dev, "timeout waiting for data ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static ssize_t als_lux0_input_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct als_data *data = i2c_get_clientdata(client);
+ int ret_val;
+ int temp;
+
+ /* Protect against parallel reads */
+ pm_runtime_get_sync(dev);
+ mutex_lock(&data->mutex);
+
+ /* clear EOC interrupt status */
+ i2c_smbus_write_byte(client, 0x40);
+ /* start measurement */
+ temp = i2c_smbus_read_byte_data(client, 0x81);
+ i2c_smbus_write_byte_data(client, 0x81, temp | 0x08);
+
+ ret_val = als_wait_for_data_ready(dev);
+ if (ret_val < 0)
+ goto failed;
+
+ temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */
+ if (temp < 0) {
+ ret_val = temp;
+ goto failed;
+ }
+ ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */
+ if (ret_val < 0)
+ goto failed;
+
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+
+ temp = (ret_val << 8) | temp;
+ return sprintf(buf, "%d\n", temp);
+failed:
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return ret_val;
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct als_data *data = i2c_get_clientdata(client);
+ unsigned int ret_val;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val < 4096)
+ val = 1;
+ else if (val < 65536)
+ val = 2;
+ else
+ return -ERANGE;
+
+ pm_runtime_get_sync(dev);
+
+ /* Make sure nobody else reads/modifies/writes 0x81 while we
+ are active */
+ mutex_lock(&data->mutex);
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x81);
+ if (ret_val < 0)
+ goto fail;
+
+ /* Reset the bits before setting them */
+ ret_val = ret_val & 0xFA;
+
+ if (val == 1) /* Setting detection range up to 4k LUX */
+ ret_val = (ret_val | 0x01);
+ else /* Setting detection range up to 64k LUX*/
+ ret_val = (ret_val | 0x00);
+
+ ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val);
+
+ if (ret_val >= 0) {
+ /* All OK */
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return count;
+ }
+fail:
+ mutex_unlock(&data->mutex);
+ pm_runtime_put_sync(dev);
+ return ret_val;
+}
+
+static int als_set_power_state(struct i2c_client *client, bool on_off)
+{
+ int ret_val;
+ struct als_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->mutex);
+ ret_val = i2c_smbus_read_byte_data(client, 0x80);
+ if (ret_val < 0)
+ goto fail;
+ if (on_off)
+ ret_val = ret_val | 0x01;
+ else
+ ret_val = ret_val & 0xFE;
+ ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val);
+fail:
+ mutex_unlock(&data->mutex);
+ return ret_val;
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+ als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_input.attr,
+ NULL
+};
+
+static struct attribute_group m_als_gr = {
+ .name = "apds9802als",
+ .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+ int ret_val;
+ /* Write the command and then switch on */
+ ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01);
+ if (ret_val < 0) {
+ dev_err(&client->dev, "failed default switch on write\n");
+ return ret_val;
+ }
+ /* detection range: 1~64K Lux, maunal measurement */
+ ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08);
+ if (ret_val < 0)
+ dev_err(&client->dev, "failed default LUX on write\n");
+
+ /* We always get 0 for the 1st measurement after system power on,
+ * so make sure it is finished before user asks for data.
+ */
+ als_wait_for_data_ready(&client->dev);
+
+ return ret_val;
+}
+
+static int apds9802als_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+ struct als_data *data;
+
+ data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&client->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, data);
+ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+ if (res) {
+ dev_err(&client->dev, "device create file failed\n");
+ goto als_error1;
+ }
+ dev_info(&client->dev, "ALS chip found\n");
+ als_set_default_config(client);
+ mutex_init(&data->mutex);
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_get(&client->dev);
+ pm_runtime_put(&client->dev);
+
+ return res;
+als_error1:
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+ return res;
+}
+
+static int apds9802als_remove(struct i2c_client *client)
+{
+ struct als_data *data = i2c_get_clientdata(client);
+
+ als_set_power_state(client, false);
+ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+ kfree(data);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ als_set_power_state(client, false);
+ return 0;
+}
+
+static int apds9802als_resume(struct i2c_client *client)
+{
+ als_set_default_config(client);
+
+ pm_runtime_get(&client->dev);
+ pm_runtime_put(&client->dev);
+ return 0;
+}
+
+static int apds9802als_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ als_set_power_state(client, false);
+ return 0;
+}
+
+static int apds9802als_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ als_set_power_state(client, true);
+ return 0;
+}
+
+static const struct dev_pm_ops apds9802als_pm_ops = {
+ .runtime_suspend = apds9802als_runtime_suspend,
+ .runtime_resume = apds9802als_runtime_resume,
+};
+
+#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops)
+
+#else /* CONFIG_PM */
+#define apds9802als_suspend NULL
+#define apds9802als_resume NULL
+#define APDS9802ALS_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_device_id apds9802als_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, apds9802als_id);
+
+static struct i2c_driver apds9802als_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = APDS9802ALS_PM_OPS,
+ },
+ .probe = apds9802als_probe,
+ .remove = apds9802als_remove,
+ .suspend = apds9802als_suspend,
+ .resume = apds9802als_resume,
+ .id_table = apds9802als_id,
+};
+
+static int __init sensor_apds9802als_init(void)
+{
+ return i2c_add_driver(&apds9802als_driver);
+}
+
+static void __exit sensor_apds9802als_exit(void)
+{
+ i2c_del_driver(&apds9802als_driver);
+}
+module_init(sensor_apds9802als_init);
+module_exit(sensor_apds9802als_exit);
+
+MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com");
+MODULE_DESCRIPTION("Avago apds9802als ALS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
new file mode 100644
index 000000000000..200311fea369
--- /dev/null
+++ b/drivers/misc/apds990x.c
@@ -0,0 +1,1295 @@
+/*
+ * This file is part of the APDS990x sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/i2c/apds990x.h>
+
+/* Register map */
+#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */
+#define APDS990X_ATIME 0x01 /* ALS ADC time */
+#define APDS990X_PTIME 0x02 /* Proximity ADC time */
+#define APDS990X_WTIME 0x03 /* Wait time */
+#define APDS990X_AILTL 0x04 /* ALS interrupt low threshold low byte */
+#define APDS990X_AILTH 0x05 /* ALS interrupt low threshold hi byte */
+#define APDS990X_AIHTL 0x06 /* ALS interrupt hi threshold low byte */
+#define APDS990X_AIHTH 0x07 /* ALS interrupt hi threshold hi byte */
+#define APDS990X_PILTL 0x08 /* Proximity interrupt low threshold low byte */
+#define APDS990X_PILTH 0x09 /* Proximity interrupt low threshold hi byte */
+#define APDS990X_PIHTL 0x0a /* Proximity interrupt hi threshold low byte */
+#define APDS990X_PIHTH 0x0b /* Proximity interrupt hi threshold hi byte */
+#define APDS990X_PERS 0x0c /* Interrupt persistence filters */
+#define APDS990X_CONFIG 0x0d /* Configuration */
+#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */
+#define APDS990X_CONTROL 0x0f /* Gain control register */
+#define APDS990X_REV 0x11 /* Revision Number */
+#define APDS990X_ID 0x12 /* Device ID */
+#define APDS990X_STATUS 0x13 /* Device status */
+#define APDS990X_CDATAL 0x14 /* Clear ADC low data register */
+#define APDS990X_CDATAH 0x15 /* Clear ADC high data register */
+#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */
+#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */
+#define APDS990X_PDATAL 0x18 /* Proximity ADC low data register */
+#define APDS990X_PDATAH 0x19 /* Proximity ADC high data register */
+
+/* Control */
+#define APDS990X_MAX_AGAIN 3
+
+/* Enable register */
+#define APDS990X_EN_PIEN (0x1 << 5)
+#define APDS990X_EN_AIEN (0x1 << 4)
+#define APDS990X_EN_WEN (0x1 << 3)
+#define APDS990X_EN_PEN (0x1 << 2)
+#define APDS990X_EN_AEN (0x1 << 1)
+#define APDS990X_EN_PON (0x1 << 0)
+#define APDS990X_EN_DISABLE_ALL 0
+
+/* Status register */
+#define APDS990X_ST_PINT (0x1 << 5)
+#define APDS990X_ST_AINT (0x1 << 4)
+
+/* I2C access types */
+#define APDS990x_CMD_TYPE_MASK (0x03 << 5)
+#define APDS990x_CMD_TYPE_RB (0x00 << 5) /* Repeated byte */
+#define APDS990x_CMD_TYPE_INC (0x01 << 5) /* Auto increment */
+#define APDS990x_CMD_TYPE_SPE (0x03 << 5) /* Special function */
+
+#define APDS990x_ADDR_SHIFT 0
+#define APDS990x_CMD 0x80
+
+/* Interrupt ack commands */
+#define APDS990X_INT_ACK_ALS 0x6
+#define APDS990X_INT_ACK_PS 0x5
+#define APDS990X_INT_ACK_BOTH 0x7
+
+/* ptime */
+#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/
+
+/* wtime */
+#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */
+
+#define APDS990X_TIME_TO_ADC 1024 /* One timetick as ADC count value */
+
+/* Persistence */
+#define APDS990X_APERS_SHIFT 0
+#define APDS990X_PPERS_SHIFT 4
+
+/* Supported ID:s */
+#define APDS990X_ID_0 0x0
+#define APDS990X_ID_4 0x4
+#define APDS990X_ID_29 0x29
+
+/* pgain and pdiode settings */
+#define APDS_PGAIN_1X 0x0
+#define APDS_PDIODE_IR 0x2
+
+#define APDS990X_LUX_OUTPUT_SCALE 10
+
+/* Reverse chip factors for threshold calculation */
+struct reverse_factors {
+ u32 afactor;
+ int cf1;
+ int irf1;
+ int cf2;
+ int irf2;
+};
+
+struct apds990x_chip {
+ struct apds990x_platform_data *pdata;
+ struct i2c_client *client;
+ struct mutex mutex; /* avoid parallel access */
+ struct regulator_bulk_data regs[2];
+ wait_queue_head_t wait;
+
+ int prox_en;
+ bool prox_continuous_mode;
+ bool lux_wait_fresh_res;
+
+ /* Chip parameters */
+ struct apds990x_chip_factors cf;
+ struct reverse_factors rcf;
+ u16 atime; /* als integration time */
+ u16 arate; /* als reporting rate */
+ u16 a_max_result; /* Max possible ADC value with current atime */
+ u8 again_meas; /* Gain used in last measurement */
+ u8 again_next; /* Next calculated gain */
+ u8 pgain;
+ u8 pdiode;
+ u8 pdrive;
+ u8 lux_persistence;
+ u8 prox_persistence;
+
+ u32 lux_raw;
+ u32 lux;
+ u16 lux_clear;
+ u16 lux_ir;
+ u16 lux_calib;
+ u32 lux_thres_hi;
+ u32 lux_thres_lo;
+
+ u32 prox_thres;
+ u16 prox_data;
+ u16 prox_calib;
+
+ char chipname[10];
+ u8 revision;
+};
+
+#define APDS_CALIB_SCALER 8192
+#define APDS_LUX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
+#define APDS_PROX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
+
+#define APDS_PROX_DEF_THRES 600
+#define APDS_PROX_HYSTERESIS 50
+#define APDS_LUX_DEF_THRES_HI 101
+#define APDS_LUX_DEF_THRES_LO 100
+#define APDS_DEFAULT_PROX_PERS 1
+
+#define APDS_TIMEOUT 2000
+#define APDS_STARTUP_DELAY 25000 /* us */
+#define APDS_RANGE 65535
+#define APDS_PROX_RANGE 1023
+#define APDS_LUX_GAIN_LO_LIMIT 100
+#define APDS_LUX_GAIN_LO_LIMIT_STRICT 25
+
+#define TIMESTEP 87 /* 2.7ms is about 87 / 32 */
+#define TIME_STEP_SCALER 32
+
+#define APDS_LUX_AVERAGING_TIME 50 /* tolerates 50/60Hz ripple */
+#define APDS_LUX_DEFAULT_RATE 200
+
+static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
+static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
+
+/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
+static const u16 arates_hz[] = {10, 5, 2, 1};
+static const u8 apersis[] = {1, 2, 4, 5};
+
+/* Regulators */
+static const char reg_vcc[] = "Vdd";
+static const char reg_vled[] = "Vled";
+
+static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ *data = ret;
+ return (int)ret;
+}
+
+static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ *data = ret;
+ return (int)ret;
+}
+
+static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ return (int)ret;
+}
+
+static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+
+ reg &= ~APDS990x_CMD_TYPE_MASK;
+ reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+ ret = i2c_smbus_write_word_data(client, reg, data);
+ return (int)ret;
+}
+
+static int apds990x_mode_on(struct apds990x_chip *chip)
+{
+ /* ALS is mandatory, proximity optional */
+ u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN |
+ APDS990X_EN_WEN;
+
+ if (chip->prox_en)
+ reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN;
+
+ return apds990x_write_byte(chip, APDS990X_ENABLE, reg);
+}
+
+static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux)
+{
+ u32 thres;
+ u32 cpl;
+ u32 ir;
+
+ if (lux == 0)
+ return 0;
+ else if (lux == APDS_RANGE)
+ return APDS_RANGE;
+
+ /*
+ * Reported LUX value is a combination of the IR and CLEAR channel
+ * values. However, interrupt threshold is only for clear channel.
+ * This function approximates needed HW threshold value for a given
+ * LUX value in the current lightning type.
+ * IR level compared to visible light varies heavily depending on the
+ * source of the light
+ *
+ * Calculate threshold value for the next measurement period.
+ * Math: threshold = lux * cpl where
+ * cpl = atime * again / (glass_attenuation * device_factor)
+ * (count-per-lux)
+ *
+ * First remove calibration. Division by four is to avoid overflow
+ */
+ lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4);
+
+ /* Multiplication by 64 is to increase accuracy */
+ cpl = ((u32)chip->atime * (u32)again[chip->again_next] *
+ APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df);
+
+ thres = lux * cpl / 64;
+ /*
+ * Convert IR light from the latest result to match with
+ * new gain step. This helps to adapt with the current
+ * source of light.
+ */
+ ir = (u32)chip->lux_ir * (u32)again[chip->again_next] /
+ (u32)again[chip->again_meas];
+
+ /*
+ * Compensate count with IR light impact
+ * IAC1 > IAC2 (see apds990x_get_lux for formulas)
+ */
+ if (chip->lux_clear * APDS_PARAM_SCALE >=
+ chip->rcf.afactor * chip->lux_ir)
+ thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) /
+ APDS_PARAM_SCALE;
+ else
+ thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) /
+ APDS_PARAM_SCALE;
+
+ if (thres >= chip->a_max_result)
+ thres = chip->a_max_result - 1;
+ return thres;
+}
+
+static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms)
+{
+ u8 reg_value;
+
+ chip->atime = time_ms;
+ /* Formula is specified in the data sheet */
+ reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP);
+ /* Calculate max ADC value for given integration time */
+ chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC;
+ return apds990x_write_byte(chip, APDS990X_ATIME, reg_value);
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data)
+{
+ int ret, lo, hi;
+
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ if (data < chip->prox_thres) {
+ lo = 0;
+ hi = chip->prox_thres;
+ } else {
+ lo = chip->prox_thres - APDS_PROX_HYSTERESIS;
+ if (chip->prox_continuous_mode)
+ hi = chip->prox_thres;
+ else
+ hi = APDS_RANGE;
+ }
+
+ ret = apds990x_write_word(chip, APDS990X_PILTL, lo);
+ ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi);
+ return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_athres(struct apds990x_chip *chip)
+{
+ int ret;
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ ret = apds990x_write_word(chip, APDS990X_AILTL,
+ apds990x_lux_to_threshold(chip, chip->lux_thres_lo));
+ ret |= apds990x_write_word(chip, APDS990X_AIHTL,
+ apds990x_lux_to_threshold(chip, chip->lux_thres_hi));
+
+ return ret;
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_a_refresh(struct apds990x_chip *chip)
+{
+ /* This will force ALS interrupt after the next measurement. */
+ apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO);
+ apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI);
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_p_refresh(struct apds990x_chip *chip)
+{
+ /* This will force proximity interrupt after the next measurement. */
+ apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1);
+ apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES);
+}
+
+/* Called always with mutex locked */
+static int apds990x_calc_again(struct apds990x_chip *chip)
+{
+ int curr_again = chip->again_meas;
+ int next_again = chip->again_meas;
+ int ret = 0;
+
+ /* Calculate suitable als gain */
+ if (chip->lux_clear == chip->a_max_result)
+ next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */
+ else if (chip->lux_clear > chip->a_max_result / 2)
+ next_again--;
+ else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+ next_again += 2; /* Too dark. Increase gain by 2 steps */
+ else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT)
+ next_again++;
+
+ /* Limit gain to available range */
+ if (next_again < 0)
+ next_again = 0;
+ else if (next_again > APDS990X_MAX_AGAIN)
+ next_again = APDS990X_MAX_AGAIN;
+
+ /* Let's check can we trust the measured result */
+ if (chip->lux_clear == chip->a_max_result)
+ /* Result can be totally garbage due to saturation */
+ ret = -ERANGE;
+ else if (next_again != curr_again &&
+ chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+ /*
+ * Gain is changed and measurement result is very small.
+ * Result can be totally garbage due to underflow
+ */
+ ret = -ERANGE;
+
+ chip->again_next = next_again;
+ apds990x_write_byte(chip, APDS990X_CONTROL,
+ (chip->pdrive << 6) |
+ (chip->pdiode << 4) |
+ (chip->pgain << 2) |
+ (chip->again_next << 0));
+
+ /*
+ * Error means bad result -> re-measurement is needed. The forced
+ * refresh uses fastest possible persistence setting to get result
+ * as soon as possible.
+ */
+ if (ret < 0)
+ apds990x_force_a_refresh(chip);
+ else
+ apds990x_refresh_athres(chip);
+
+ return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir)
+{
+ int iac, iac1, iac2; /* IR adjusted counts */
+ u32 lpc; /* Lux per count */
+
+ /* Formulas:
+ * iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH
+ * iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH
+ */
+ iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE;
+ iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE;
+
+ iac = max(iac1, iac2);
+ iac = max(iac, 0);
+
+ lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) /
+ (u32)(again[chip->again_meas] * (u32)chip->atime);
+
+ return (iac * lpc) / APDS_PARAM_SCALE;
+}
+
+static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+ u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE;
+
+ switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) {
+ case APDS990X_ST_AINT:
+ reg |= APDS990X_INT_ACK_ALS;
+ break;
+ case APDS990X_ST_PINT:
+ reg |= APDS990X_INT_ACK_PS;
+ break;
+ default:
+ reg |= APDS990X_INT_ACK_BOTH;
+ break;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ return (int)ret;
+}
+
+static irqreturn_t apds990x_irq(int irq, void *data)
+{
+ struct apds990x_chip *chip = data;
+ u8 status;
+
+ apds990x_read_byte(chip, APDS990X_STATUS, &status);
+ apds990x_ack_int(chip, status);
+
+ mutex_lock(&chip->mutex);
+ if (!pm_runtime_suspended(&chip->client->dev)) {
+ if (status & APDS990X_ST_AINT) {
+ apds990x_read_word(chip, APDS990X_CDATAL,
+ &chip->lux_clear);
+ apds990x_read_word(chip, APDS990X_IRDATAL,
+ &chip->lux_ir);
+ /* Store used gain for calculations */
+ chip->again_meas = chip->again_next;
+
+ chip->lux_raw = apds990x_get_lux(chip,
+ chip->lux_clear,
+ chip->lux_ir);
+
+ if (apds990x_calc_again(chip) == 0) {
+ /* Result is valid */
+ chip->lux = chip->lux_raw;
+ chip->lux_wait_fresh_res = false;
+ wake_up(&chip->wait);
+ sysfs_notify(&chip->client->dev.kobj,
+ NULL, "lux0_input");
+ }
+ }
+
+ if ((status & APDS990X_ST_PINT) && chip->prox_en) {
+ u16 clr_ch;
+
+ apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch);
+ /*
+ * If ALS channel is saturated at min gain,
+ * proximity gives false posivite values.
+ * Just ignore them.
+ */
+ if (chip->again_meas == 0 &&
+ clr_ch == chip->a_max_result)
+ chip->prox_data = 0;
+ else
+ apds990x_read_word(chip,
+ APDS990X_PDATAL,
+ &chip->prox_data);
+
+ apds990x_refresh_pthres(chip, chip->prox_data);
+ if (chip->prox_data < chip->prox_thres)
+ chip->prox_data = 0;
+ else if (!chip->prox_continuous_mode)
+ chip->prox_data = APDS_PROX_RANGE;
+ sysfs_notify(&chip->client->dev.kobj,
+ NULL, "prox0_raw");
+ }
+ }
+ mutex_unlock(&chip->mutex);
+ return IRQ_HANDLED;
+}
+
+static int apds990x_configure(struct apds990x_chip *chip)
+{
+ /* It is recommended to use disabled mode during these operations */
+ apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+
+ /* conversion and wait times for different state machince states */
+ apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT);
+ apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT);
+ apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME);
+
+ apds990x_write_byte(chip, APDS990X_CONFIG, 0);
+
+ /* Persistence levels */
+ apds990x_write_byte(chip, APDS990X_PERS,
+ (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+ (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+
+ apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount);
+
+ /* Start with relatively small gain */
+ chip->again_meas = 1;
+ chip->again_next = 1;
+ apds990x_write_byte(chip, APDS990X_CONTROL,
+ (chip->pdrive << 6) |
+ (chip->pdiode << 4) |
+ (chip->pgain << 2) |
+ (chip->again_next << 0));
+ return 0;
+}
+
+static int apds990x_detect(struct apds990x_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+ u8 id;
+
+ ret = apds990x_read_byte(chip, APDS990X_ID, &id);
+ if (ret < 0) {
+ dev_err(&client->dev, "ID read failed\n");
+ return ret;
+ }
+
+ ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision);
+ if (ret < 0) {
+ dev_err(&client->dev, "REV read failed\n");
+ return ret;
+ }
+
+ switch (id) {
+ case APDS990X_ID_0:
+ case APDS990X_ID_4:
+ case APDS990X_ID_29:
+ snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x");
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+
+static int apds990x_chip_on(struct apds990x_chip *chip)
+{
+ int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (err < 0)
+ return err;
+
+ usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+ /* Refresh all configs in case of regulators were off */
+ chip->prox_data = 0;
+ apds990x_configure(chip);
+ apds990x_mode_on(chip);
+ return 0;
+}
+
+static int apds990x_chip_off(struct apds990x_chip *chip)
+{
+ apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+ return 0;
+}
+
+static ssize_t apds990x_lux_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 result;
+ long timeout;
+
+ if (pm_runtime_suspended(dev))
+ return -EIO;
+
+ timeout = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_fresh_res,
+ msecs_to_jiffies(APDS_TIMEOUT));
+ if (!timeout)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER;
+ if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE))
+ result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE;
+
+ ret = sprintf(buf, "%d.%d\n",
+ result / APDS990X_LUX_OUTPUT_SCALE,
+ result % APDS990X_LUX_OUTPUT_SCALE);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL);
+
+static ssize_t apds990x_lux_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_RANGE);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL);
+
+static ssize_t apds990x_lux_calib_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_CALIB_SCALER);
+}
+
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+ apds990x_lux_calib_format_show, NULL);
+
+static ssize_t apds990x_lux_calib_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->lux_calib);
+}
+
+static ssize_t apds990x_lux_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (chip->lux_calib > APDS_RANGE)
+ return -EINVAL;
+
+ chip->lux_calib = value;
+
+ return len;
+}
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show,
+ apds990x_lux_calib_store);
+
+static ssize_t apds990x_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", arates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t apds990x_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->arate);
+}
+
+static int apds990x_set_arate(struct apds990x_chip *chip, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+ if (rate >= arates_hz[i])
+ break;
+
+ if (i == ARRAY_SIZE(arates_hz))
+ return -EINVAL;
+
+ /* Pick up corresponding persistence value */
+ chip->lux_persistence = apersis[i];
+ chip->arate = arates_hz[i];
+
+ /* If the chip is not in use, don't try to access it */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /* Persistence levels */
+ return apds990x_write_byte(chip, APDS990X_PERS,
+ (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+ (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+}
+
+static ssize_t apds990x_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ ret = apds990x_set_arate(chip, value);
+ mutex_unlock(&chip->mutex);
+
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show,
+ apds990x_rate_store);
+
+static ssize_t apds990x_prox_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ if (pm_runtime_suspended(dev) || !chip->prox_en)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ ret = sprintf(buf, "%d\n", chip->prox_data);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL);
+
+static ssize_t apds990x_prox_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", APDS_PROX_RANGE);
+}
+
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL);
+
+static ssize_t apds990x_prox_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_en);
+}
+
+static ssize_t apds990x_prox_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+
+ if (!chip->prox_en)
+ chip->prox_data = 0;
+
+ if (value)
+ chip->prox_en++;
+ else if (chip->prox_en > 0)
+ chip->prox_en--;
+
+ if (!pm_runtime_suspended(dev))
+ apds990x_mode_on(chip);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
+ apds990x_prox_enable_store);
+
+static const char reporting_modes[][9] = {"trigger", "periodic"};
+
+static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n",
+ reporting_modes[!!chip->prox_continuous_mode]);
+}
+
+static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+
+ if (sysfs_streq(buf, reporting_modes[0]))
+ chip->prox_continuous_mode = 0;
+ else if (sysfs_streq(buf, reporting_modes[1]))
+ chip->prox_continuous_mode = 1;
+ else
+ return -EINVAL;
+ return len;
+}
+
+static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR,
+ apds990x_prox_reporting_mode_show,
+ apds990x_prox_reporting_mode_store);
+
+static ssize_t apds990x_prox_reporting_avail_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]);
+}
+
+static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR,
+ apds990x_prox_reporting_avail_show, NULL);
+
+
+static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_thres_hi);
+}
+
+static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_thres_lo);
+}
+
+static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target,
+ const char *buf)
+{
+ int ret = 0;
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ if (thresh > APDS_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ *target = thresh;
+ /*
+ * Don't update values in HW if we are still waiting for
+ * first interrupt to come after device handle open call.
+ */
+ if (!chip->lux_wait_fresh_res)
+ apds990x_refresh_athres(chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+
+}
+
+static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+ apds990x_lux_thresh_above_show,
+ apds990x_lux_thresh_above_store);
+
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+ apds990x_lux_thresh_below_show,
+ apds990x_lux_thresh_below_store);
+
+static ssize_t apds990x_prox_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_thres);
+}
+
+static ssize_t apds990x_prox_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if ((value > APDS_RANGE) || (value == 0) ||
+ (value < APDS_PROX_HYSTERESIS))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_thres = value;
+
+ apds990x_force_p_refresh(chip);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR,
+ apds990x_prox_threshold_show,
+ apds990x_prox_threshold_store);
+
+static ssize_t apds990x_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+ return 0;
+}
+
+static ssize_t apds990x_power_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+ if (value) {
+ pm_runtime_get_sync(dev);
+ mutex_lock(&chip->mutex);
+ chip->lux_wait_fresh_res = true;
+ apds990x_force_a_refresh(chip);
+ apds990x_force_p_refresh(chip);
+ mutex_unlock(&chip->mutex);
+ } else {
+ if (!pm_runtime_suspended(dev))
+ pm_runtime_put(dev);
+ }
+ return len;
+}
+
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
+ apds990x_power_state_show,
+ apds990x_power_state_store);
+
+static ssize_t apds990x_chip_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct apds990x_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
+}
+
+static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &dev_attr_lux0_calibscale.attr,
+ &dev_attr_lux0_calibscale_default.attr,
+ &dev_attr_lux0_input.attr,
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_rate.attr,
+ &dev_attr_lux0_rate_avail.attr,
+ &dev_attr_lux0_thresh_above_value.attr,
+ &dev_attr_lux0_thresh_below_value.attr,
+ &dev_attr_prox0_raw_en.attr,
+ &dev_attr_prox0_raw.attr,
+ &dev_attr_prox0_sensor_range.attr,
+ &dev_attr_prox0_thresh_above_value.attr,
+ &dev_attr_prox0_reporting_mode.attr,
+ &dev_attr_prox0_reporting_mode_avail.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static struct attribute_group apds990x_attribute_group[] = {
+ {.attrs = sysfs_attrs_ctrl },
+};
+
+static int __devinit apds990x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct apds990x_chip *chip;
+ int err;
+
+ chip = kzalloc(sizeof *chip, GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ init_waitqueue_head(&chip->wait);
+ mutex_init(&chip->mutex);
+ chip->pdata = client->dev.platform_data;
+
+ if (chip->pdata == NULL) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ err = -EINVAL;
+ goto fail1;
+ }
+
+ if (chip->pdata->cf.ga == 0) {
+ /* set uncovered sensor default parameters */
+ chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */
+ chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */
+ chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */
+ chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */
+ chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */
+ chip->cf.df = 52;
+ } else {
+ chip->cf = chip->pdata->cf;
+ }
+
+ /* precalculate inverse chip factors for threshold control */
+ chip->rcf.afactor =
+ (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE /
+ (chip->cf.cf1 - chip->cf.cf2);
+ chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+ chip->cf.cf1;
+ chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE /
+ chip->cf.cf1;
+ chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+ chip->cf.cf2;
+ chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE /
+ chip->cf.cf2;
+
+ /* Set something to start with */
+ chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI;
+ chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO;
+ chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE;
+
+ chip->prox_thres = APDS_PROX_DEF_THRES;
+ chip->pdrive = chip->pdata->pdrive;
+ chip->pdiode = APDS_PDIODE_IR;
+ chip->pgain = APDS_PGAIN_1X;
+ chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE;
+ chip->prox_persistence = APDS_DEFAULT_PROX_PERS;
+ chip->prox_continuous_mode = false;
+
+ chip->regs[0].supply = reg_vcc;
+ chip->regs[1].supply = reg_vled;
+
+ err = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot get regulators\n");
+ goto fail1;
+ }
+
+ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot enable regulators\n");
+ goto fail2;
+ }
+
+ usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+ err = apds990x_detect(chip);
+ if (err < 0) {
+ dev_err(&client->dev, "APDS990X not found\n");
+ goto fail3;
+ }
+
+ pm_runtime_set_active(&client->dev);
+
+ apds990x_configure(chip);
+ apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE);
+ apds990x_mode_on(chip);
+
+ pm_runtime_enable(&client->dev);
+
+ if (chip->pdata->setup_resources) {
+ err = chip->pdata->setup_resources();
+ if (err) {
+ err = -EINVAL;
+ goto fail3;
+ }
+ }
+
+ err = sysfs_create_group(&chip->client->dev.kobj,
+ apds990x_attribute_group);
+ if (err < 0) {
+ dev_err(&chip->client->dev, "Sysfs registration failed\n");
+ goto fail4;
+ }
+
+ err = request_threaded_irq(client->irq, NULL,
+ apds990x_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT,
+ "apds990x", chip);
+ if (err) {
+ dev_err(&client->dev, "could not get IRQ %d\n",
+ client->irq);
+ goto fail5;
+ }
+ return err;
+fail5:
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &apds990x_attribute_group[0]);
+fail4:
+ if (chip->pdata && chip->pdata->release_resources)
+ chip->pdata->release_resources();
+fail3:
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+ kfree(chip);
+ return err;
+}
+
+static int __devexit apds990x_remove(struct i2c_client *client)
+{
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ free_irq(client->irq, chip);
+ sysfs_remove_group(&chip->client->dev.kobj,
+ apds990x_attribute_group);
+
+ if (chip->pdata && chip->pdata->release_resources)
+ chip->pdata->release_resources();
+
+ if (!pm_runtime_suspended(&client->dev))
+ apds990x_chip_off(chip);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds990x_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_off(chip);
+ return 0;
+}
+
+static int apds990x_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ /*
+ * If we were enabled at suspend time, it is expected
+ * everything works nice and smoothly. Chip_on is enough
+ */
+ apds990x_chip_on(chip);
+
+ return 0;
+}
+#else
+#define apds990x_suspend NULL
+#define apds990x_resume NULL
+#define apds990x_shutdown NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int apds990x_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_off(chip);
+ return 0;
+}
+
+static int apds990x_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+ apds990x_chip_on(chip);
+ return 0;
+}
+
+#endif
+
+static const struct i2c_device_id apds990x_id[] = {
+ {"apds990x", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, apds990x_id);
+
+static const struct dev_pm_ops apds990x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume)
+ SET_RUNTIME_PM_OPS(apds990x_runtime_suspend,
+ apds990x_runtime_resume,
+ NULL)
+};
+
+static struct i2c_driver apds990x_driver = {
+ .driver = {
+ .name = "apds990x",
+ .owner = THIS_MODULE,
+ .pm = &apds990x_pm_ops,
+ },
+ .probe = apds990x_probe,
+ .remove = __devexit_p(apds990x_remove),
+ .id_table = apds990x_id,
+};
+
+static int __init apds990x_init(void)
+{
+ return i2c_add_driver(&apds990x_driver);
+}
+
+static void __exit apds990x_exit(void)
+{
+ i2c_del_driver(&apds990x_driver);
+}
+
+MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
+
+module_init(apds990x_init);
+module_exit(apds990x_exit);
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
new file mode 100644
index 000000000000..cee632e645e1
--- /dev/null
+++ b/drivers/misc/bh1770glc.c
@@ -0,0 +1,1413 @@
+/*
+ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/i2c/bh1770glc.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */
+#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */
+#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */
+#define BH1770_I_LED3 0x83 /* LED3 current setting */
+#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */
+#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */
+#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */
+#define BH1770_PART_ID 0x8a /* Part number and revision ID */
+#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */
+#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */
+#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */
+#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */
+#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */
+#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */
+#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */
+#define BH1770_INTERRUPT 0x92 /* Interrupt setting */
+#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */
+#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */
+#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */
+#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */
+#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */
+#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */
+#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */
+
+/* MANUFACT_ID */
+#define BH1770_MANUFACT_ROHM 0x01
+#define BH1770_MANUFACT_OSRAM 0x03
+
+/* PART_ID */
+#define BH1770_PART 0x90
+#define BH1770_PART_MASK 0xf0
+#define BH1770_REV_MASK 0x0f
+#define BH1770_REV_SHIFT 0
+#define BH1770_REV_0 0x00
+#define BH1770_REV_1 0x01
+
+/* Operating modes for both */
+#define BH1770_STANDBY 0x00
+#define BH1770_FORCED 0x02
+#define BH1770_STANDALONE 0x03
+#define BH1770_SWRESET (0x01 << 2)
+
+#define BH1770_PS_TRIG_MEAS (1 << 0)
+#define BH1770_ALS_TRIG_MEAS (1 << 1)
+
+/* Interrupt control */
+#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
+#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */
+#define BH1770_INT_ALS_ENA (1 << 1)
+#define BH1770_INT_PS_ENA (1 << 0)
+
+/* Interrupt status */
+#define BH1770_INT_LED1_DATA (1 << 0)
+#define BH1770_INT_LED1_INT (1 << 1)
+#define BH1770_INT_LED2_DATA (1 << 2)
+#define BH1770_INT_LED2_INT (1 << 3)
+#define BH1770_INT_LED3_DATA (1 << 4)
+#define BH1770_INT_LED3_INT (1 << 5)
+#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5))
+#define BH1770_INT_ALS_DATA (1 << 6)
+#define BH1770_INT_ALS_INT (1 << 7)
+
+/* Led channels */
+#define BH1770_LED1 0x00
+
+#define BH1770_DISABLE 0
+#define BH1770_ENABLE 1
+#define BH1770_PROX_CHANNELS 1
+
+#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */
+#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */
+#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */
+#define BH1770_STARTUP_DELAY 50
+#define BH1770_RESET_TIME 10
+#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */
+
+#define BH1770_LUX_RANGE 65535
+#define BH1770_PROX_RANGE 255
+#define BH1770_COEF_SCALER 1024
+#define BH1770_CALIB_SCALER 8192
+#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER)
+#define BH1770_LUX_DEF_THRES 1000
+#define BH1770_PROX_DEF_THRES 70
+#define BH1770_PROX_DEF_ABS_THRES 100
+#define BH1770_DEFAULT_PERSISTENCE 10
+#define BH1770_PROX_MAX_PERSISTENCE 50
+#define BH1770_LUX_GA_SCALE 16384
+#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */
+#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE
+#define BH1770_LUX_CORR_SCALE 4096
+
+#define PROX_ABOVE_THRESHOLD 1
+#define PROX_BELOW_THRESHOLD 0
+
+#define PROX_IGNORE_LUX_LIMIT 500
+
+struct bh1770_chip {
+ struct bh1770_platform_data *pdata;
+ char chipname[10];
+ u8 revision;
+ struct i2c_client *client;
+ struct regulator_bulk_data regs[2];
+ struct mutex mutex; /* avoid parallel access */
+ wait_queue_head_t wait;
+
+ bool int_mode_prox;
+ bool int_mode_lux;
+ struct delayed_work prox_work;
+ u32 lux_cf; /* Chip specific factor */
+ u32 lux_ga;
+ u32 lux_calib;
+ int lux_rate_index;
+ u32 lux_corr;
+ u16 lux_data_raw;
+ u16 lux_threshold_hi;
+ u16 lux_threshold_lo;
+ u16 lux_thres_hi_onchip;
+ u16 lux_thres_lo_onchip;
+ bool lux_wait_result;
+
+ int prox_enable_count;
+ u16 prox_coef;
+ u16 prox_const;
+ int prox_rate;
+ int prox_rate_threshold;
+ u8 prox_persistence;
+ u8 prox_persistence_counter;
+ u8 prox_data;
+ u8 prox_threshold;
+ u8 prox_threshold_hw;
+ bool prox_force_update;
+ u8 prox_abs_thres;
+ u8 prox_led;
+};
+
+static const char reg_vcc[] = "Vcc";
+static const char reg_vleds[] = "Vleds";
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
+ */
+static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
+static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
+
+/* Supported IR-led currents in mA */
+static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {100, 200, 500, 1000, 2000};
+ */
+static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0};
+
+/*
+ * interrupt control functions are called while keeping chip->mutex
+ * excluding module probe / remove
+ */
+static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip,
+ int lux)
+{
+ chip->int_mode_lux = lux;
+ /* Set interrupt modes, interrupt active low, latched */
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT,
+ (lux << 1) | chip->int_mode_prox);
+}
+
+static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip,
+ int ps)
+{
+ chip->int_mode_prox = ps;
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT,
+ (chip->int_mode_lux << 1) | (ps << 0));
+}
+
+/* chip->mutex is always kept here */
+static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index)
+{
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /* Proper proximity response needs fastest lux rate (100ms) */
+ if (chip->prox_enable_count)
+ rate_index = 0;
+
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_MEAS_RATE,
+ rate_index);
+}
+
+static int bh1770_prox_rate(struct bh1770_chip *chip, int mode)
+{
+ int rate;
+
+ rate = (mode == PROX_ABOVE_THRESHOLD) ?
+ chip->prox_rate_threshold : chip->prox_rate;
+
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_MEAS_RATE,
+ rate);
+}
+
+/* InfraredLED is controlled by the chip during proximity scanning */
+static inline int bh1770_led_cfg(struct bh1770_chip *chip)
+{
+ /* LED cfg, current for leds 1 and 2 */
+ return i2c_smbus_write_byte_data(chip->client,
+ BH1770_I_LED,
+ (BH1770_LED1 << 6) |
+ (BH1770_LED_5mA << 3) |
+ chip->prox_led);
+}
+
+/*
+ * Following two functions converts raw ps values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions.
+ */
+static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw)
+{
+ u16 adjusted;
+ adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) /
+ BH1770_COEF_SCALER);
+ if (adjusted > BH1770_PROX_RANGE)
+ adjusted = BH1770_PROX_RANGE;
+ return adjusted;
+}
+
+static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps)
+{
+ u16 raw;
+
+ raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef);
+ if (raw > chip->prox_const)
+ raw = raw - chip->prox_const;
+ else
+ raw = 0;
+ return raw;
+}
+
+/*
+ * Following two functions converts raw lux values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions. Chip->mutex is kept when this is called.
+ */
+static int bh1770_prox_set_threshold(struct bh1770_chip *chip)
+{
+ u8 tmp = 0;
+
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold);
+ chip->prox_threshold_hw = tmp;
+
+ return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1,
+ tmp);
+}
+
+static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw)
+{
+ u32 lux;
+ lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE;
+ return min(lux, (u32)BH1770_LUX_RANGE);
+}
+
+static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip,
+ u16 adjusted)
+{
+ return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_lux_update_thresholds(struct bh1770_chip *chip,
+ u16 threshold_hi, u16 threshold_lo)
+{
+ u8 data[4];
+ int ret;
+
+ /* sysfs may call this when the chip is powered off */
+ if (pm_runtime_suspended(&chip->client->dev))
+ return 0;
+
+ /*
+ * Compensate threshold values with the correction factors if not
+ * set to minimum or maximum.
+ * Min & max values disables interrupts.
+ */
+ if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0)
+ threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi);
+
+ if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0)
+ threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo);
+
+ if (chip->lux_thres_hi_onchip == threshold_hi &&
+ chip->lux_thres_lo_onchip == threshold_lo)
+ return 0;
+
+ chip->lux_thres_hi_onchip = threshold_hi;
+ chip->lux_thres_lo_onchip = threshold_lo;
+
+ data[0] = threshold_hi;
+ data[1] = threshold_hi >> 8;
+ data[2] = threshold_lo;
+ data[3] = threshold_lo >> 8;
+
+ ret = i2c_smbus_write_i2c_block_data(chip->client,
+ BH1770_ALS_TH_UP_0,
+ ARRAY_SIZE(data),
+ data);
+ return ret;
+}
+
+static int bh1770_lux_get_result(struct bh1770_chip *chip)
+{
+ u16 data;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0);
+ if (ret < 0)
+ return ret;
+
+ data = ret & 0xff;
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1);
+ if (ret < 0)
+ return ret;
+
+ chip->lux_data_raw = data | ((ret & 0xff) << 8);
+
+ return 0;
+}
+
+/* Calculate correction value which contains chip and device specific parts */
+static u32 bh1770_get_corr_value(struct bh1770_chip *chip)
+{
+ u32 tmp;
+ /* Impact of glass attenuation correction */
+ tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE;
+ /* Impact of chip factor correction */
+ tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE;
+ /* Impact of Device specific calibration correction */
+ tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER;
+ return tmp;
+}
+
+static int bh1770_lux_read_result(struct bh1770_chip *chip)
+{
+ bh1770_lux_get_result(chip);
+ return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw);
+}
+
+/*
+ * Chip on / off functions are called while keeping mutex except probe
+ * or remove phase
+ */
+static int bh1770_chip_on(struct bh1770_chip *chip)
+{
+ int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+
+ /* Reset the chip */
+ i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL,
+ BH1770_SWRESET);
+ usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2);
+
+ /*
+ * ALS is started always since proximity needs als results
+ * for realibility estimation.
+ * Let's assume dark until the first ALS measurement is ready.
+ */
+ chip->lux_data_raw = 0;
+ chip->prox_data = 0;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_CONTROL, BH1770_STANDALONE);
+
+ /* Assume reset defaults */
+ chip->lux_thres_hi_onchip = BH1770_LUX_RANGE;
+ chip->lux_thres_lo_onchip = 0;
+
+ return ret;
+}
+
+static void bh1770_chip_off(struct bh1770_chip *chip)
+{
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_INTERRUPT, BH1770_DISABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_ALS_CONTROL, BH1770_STANDBY);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDBY);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_mode_control(struct bh1770_chip *chip)
+{
+ if (chip->prox_enable_count) {
+ chip->prox_force_update = true; /* Force immediate update */
+
+ bh1770_lux_rate(chip, chip->lux_rate_index);
+ bh1770_prox_set_threshold(chip);
+ bh1770_led_cfg(chip);
+ bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD);
+ bh1770_prox_interrupt_control(chip, BH1770_ENABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDALONE);
+ } else {
+ chip->prox_data = 0;
+ bh1770_lux_rate(chip, chip->lux_rate_index);
+ bh1770_prox_interrupt_control(chip, BH1770_DISABLE);
+ i2c_smbus_write_byte_data(chip->client,
+ BH1770_PS_CONTROL, BH1770_STANDBY);
+ }
+ return 0;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_read_result(struct bh1770_chip *chip)
+{
+ int ret;
+ bool above;
+ u8 mode;
+
+ ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > chip->prox_threshold_hw)
+ above = true;
+ else
+ above = false;
+
+ /*
+ * when ALS levels goes above limit, proximity result may be
+ * false proximity. Thus ignore the result. With real proximity
+ * there is a shadow causing low als levels.
+ */
+ if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT)
+ ret = 0;
+
+ chip->prox_data = bh1770_psraw_to_adjusted(chip, ret);
+
+ /* Strong proximity level or force mode requires immediate response */
+ if (chip->prox_data >= chip->prox_abs_thres ||
+ chip->prox_force_update)
+ chip->prox_persistence_counter = chip->prox_persistence;
+
+ chip->prox_force_update = false;
+
+ /* Persistence filttering to reduce false proximity events */
+ if (likely(above)) {
+ if (chip->prox_persistence_counter < chip->prox_persistence) {
+ chip->prox_persistence_counter++;
+ ret = -ENODATA;
+ } else {
+ mode = PROX_ABOVE_THRESHOLD;
+ ret = 0;
+ }
+ } else {
+ chip->prox_persistence_counter = 0;
+ mode = PROX_BELOW_THRESHOLD;
+ chip->prox_data = 0;
+ ret = 0;
+ }
+
+ /* Set proximity detection rate based on above or below value */
+ if (ret == 0) {
+ bh1770_prox_rate(chip, mode);
+ sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw");
+ }
+out:
+ return ret;
+}
+
+static int bh1770_detect(struct bh1770_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ s32 ret;
+ u8 manu, part;
+
+ ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID);
+ if (ret < 0)
+ goto error;
+ manu = (u8)ret;
+
+ ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID);
+ if (ret < 0)
+ goto error;
+ part = (u8)ret;
+
+ chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT;
+ chip->prox_coef = BH1770_COEF_SCALER;
+ chip->prox_const = 0;
+ chip->lux_cf = BH1770_NEUTRAL_CF;
+
+ if ((manu == BH1770_MANUFACT_ROHM) &&
+ ((part & BH1770_PART_MASK) == BH1770_PART)) {
+ snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
+ return 0;
+ }
+
+ if ((manu == BH1770_MANUFACT_OSRAM) &&
+ ((part & BH1770_PART_MASK) == BH1770_PART)) {
+ snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
+ /* Values selected by comparing different versions */
+ chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */
+ chip->prox_const = 40;
+ return 0;
+ }
+
+ ret = -ENODEV;
+error:
+ dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n");
+
+ return ret;
+}
+
+/*
+ * This work is re-scheduled at every proximity interrupt.
+ * If this work is running, it means that there hasn't been any
+ * proximity interrupt in time. Situation is handled as no-proximity.
+ * It would be nice to have low-threshold interrupt or interrupt
+ * when measurement and hi-threshold are both 0. But neither of those exists.
+ * This is a workaroud for missing HW feature.
+ */
+
+static void bh1770_prox_work(struct work_struct *work)
+{
+ struct bh1770_chip *chip =
+ container_of(work, struct bh1770_chip, prox_work.work);
+
+ mutex_lock(&chip->mutex);
+ bh1770_prox_read_result(chip);
+ mutex_unlock(&chip->mutex);
+}
+
+/* This is threaded irq handler */
+static irqreturn_t bh1770_irq(int irq, void *data)
+{
+ struct bh1770_chip *chip = data;
+ int status;
+ int rate = 0;
+
+ mutex_lock(&chip->mutex);
+ status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS);
+
+ /* Acknowledge interrupt by reading this register */
+ i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT);
+
+ /*
+ * Check if there is fresh data available for als.
+ * If this is the very first data, update thresholds after that.
+ */
+ if (status & BH1770_INT_ALS_DATA) {
+ bh1770_lux_get_result(chip);
+ if (unlikely(chip->lux_wait_result)) {
+ chip->lux_wait_result = false;
+ wake_up(&chip->wait);
+ bh1770_lux_update_thresholds(chip,
+ chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+ }
+ }
+
+ /* Disable interrupt logic to guarantee acknowledgement */
+ i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+ (0 << 1) | (0 << 0));
+
+ if ((status & BH1770_INT_ALS_INT))
+ sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input");
+
+ if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) {
+ rate = prox_rates_ms[chip->prox_rate_threshold];
+ bh1770_prox_read_result(chip);
+ }
+
+ /* Re-enable interrupt logic */
+ i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+ (chip->int_mode_lux << 1) |
+ (chip->int_mode_prox << 0));
+ mutex_unlock(&chip->mutex);
+
+ /*
+ * Can't cancel work while keeping mutex since the work uses the
+ * same mutex.
+ */
+ if (rate) {
+ /*
+ * Simulate missing no-proximity interrupt 50ms after the
+ * next expected interrupt time.
+ */
+ cancel_delayed_work_sync(&chip->prox_work);
+ schedule_delayed_work(&chip->prox_work,
+ msecs_to_jiffies(rate + 50));
+ }
+ return IRQ_HANDLED;
+}
+
+static ssize_t bh1770_power_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ size_t ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ if (value) {
+ pm_runtime_get_sync(dev);
+
+ ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+ ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+
+ if (ret < 0) {
+ pm_runtime_put(dev);
+ goto leave;
+ }
+
+ /* This causes interrupt after the next measurement cycle */
+ bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+ BH1770_LUX_DEF_THRES);
+ /* Inform that we are waiting for a result from ALS */
+ chip->lux_wait_result = true;
+ bh1770_prox_mode_control(chip);
+ } else if (!pm_runtime_suspended(dev)) {
+ pm_runtime_put(dev);
+ }
+ ret = count;
+leave:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static ssize_t bh1770_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+}
+
+static ssize_t bh1770_lux_result_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+ long timeout;
+
+ if (pm_runtime_suspended(dev))
+ return -EIO; /* Chip is not enabled at all */
+
+ timeout = wait_event_interruptible_timeout(chip->wait,
+ !chip->lux_wait_result,
+ msecs_to_jiffies(BH1770_TIMEOUT));
+ if (!timeout)
+ return -EIO;
+
+ mutex_lock(&chip->mutex);
+ ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip));
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static ssize_t bh1770_lux_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", BH1770_LUX_RANGE);
+}
+
+static ssize_t bh1770_prox_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ /* Assume no proximity. Sensor will tell real state soon */
+ if (!chip->prox_enable_count)
+ chip->prox_data = 0;
+
+ if (value)
+ chip->prox_enable_count++;
+ else if (chip->prox_enable_count > 0)
+ chip->prox_enable_count--;
+ else
+ goto leave;
+
+ /* Run control only when chip is powered on */
+ if (!pm_runtime_suspended(dev))
+ bh1770_prox_mode_control(chip);
+leave:
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_prox_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t len;
+
+ mutex_lock(&chip->mutex);
+ len = sprintf(buf, "%d\n", chip->prox_enable_count);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static ssize_t bh1770_prox_result_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ mutex_lock(&chip->mutex);
+ if (chip->prox_enable_count && !pm_runtime_suspended(dev))
+ ret = sprintf(buf, "%d\n", chip->prox_data);
+ else
+ ret = -EIO;
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static ssize_t bh1770_prox_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", BH1770_PROX_RANGE);
+}
+
+static ssize_t bh1770_get_prox_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t bh1770_get_prox_rate_above(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]);
+}
+
+static ssize_t bh1770_get_prox_rate_below(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]);
+}
+
+static int bh1770_prox_rate_validate(int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++)
+ if (rate >= prox_rates_hz[i])
+ break;
+ return i;
+}
+
+static ssize_t bh1770_set_prox_rate_above(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_rate_threshold = bh1770_prox_rate_validate(value);
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_set_prox_rate_below(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_rate = bh1770_prox_rate_validate(value);
+ mutex_unlock(&chip->mutex);
+ return count;
+}
+
+static ssize_t bh1770_get_prox_thres(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->prox_threshold);
+}
+
+static ssize_t bh1770_set_prox_thres(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+ if (value > BH1770_PROX_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ chip->prox_threshold = value;
+ ret = bh1770_prox_set_threshold(chip);
+ mutex_unlock(&chip->mutex);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t bh1770_prox_persistence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->prox_persistence);
+}
+
+static ssize_t bh1770_prox_persistence_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > BH1770_PROX_MAX_PERSISTENCE)
+ return -EINVAL;
+
+ chip->prox_persistence = value;
+
+ return len;
+}
+
+static ssize_t bh1770_prox_abs_thres_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", chip->prox_abs_thres);
+}
+
+static ssize_t bh1770_prox_abs_thres_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > BH1770_PROX_RANGE)
+ return -EINVAL;
+
+ chip->prox_abs_thres = value;
+
+ return len;
+}
+
+static ssize_t bh1770_chip_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
+}
+
+static ssize_t bh1770_lux_calib_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", BH1770_CALIB_SCALER);
+}
+
+static ssize_t bh1770_lux_calib_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ ssize_t len;
+
+ mutex_lock(&chip->mutex);
+ len = sprintf(buf, "%u\n", chip->lux_calib);
+ mutex_unlock(&chip->mutex);
+ return len;
+}
+
+static ssize_t bh1770_lux_calib_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long value;
+ u32 old_calib;
+ u32 new_corr;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ old_calib = chip->lux_calib;
+ chip->lux_calib = value;
+ new_corr = bh1770_get_corr_value(chip);
+ if (new_corr == 0) {
+ chip->lux_calib = old_calib;
+ mutex_unlock(&chip->mutex);
+ return -EINVAL;
+ }
+ chip->lux_corr = new_corr;
+ /* Refresh thresholds on HW after changing correction value */
+ bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+
+ mutex_unlock(&chip->mutex);
+
+ return len;
+}
+
+static ssize_t bh1770_get_lux_rate_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+ int pos = 0;
+ for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++)
+ pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]);
+ sprintf(buf + pos - 1, "\n");
+ return pos;
+}
+
+static ssize_t bh1770_get_lux_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]);
+}
+
+static ssize_t bh1770_set_lux_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ unsigned long rate_hz;
+ int ret, i;
+
+ if (strict_strtoul(buf, 0, &rate_hz))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++)
+ if (rate_hz >= lux_rates_hz[i])
+ break;
+
+ mutex_lock(&chip->mutex);
+ chip->lux_rate_index = i;
+ ret = bh1770_lux_rate(chip, i);
+ mutex_unlock(&chip->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t bh1770_get_lux_thresh_above(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_threshold_hi);
+}
+
+static ssize_t bh1770_get_lux_thresh_below(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", chip->lux_threshold_lo);
+}
+
+static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target,
+ const char *buf)
+{
+ int ret = 0;
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ if (thresh > BH1770_LUX_RANGE)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ *target = thresh;
+ /*
+ * Don't update values in HW if we are still waiting for
+ * first interrupt to come after device handle open call.
+ */
+ if (!chip->lux_wait_result)
+ ret = bh1770_lux_update_thresholds(chip,
+ chip->lux_threshold_hi,
+ chip->lux_threshold_lo);
+ mutex_unlock(&chip->mutex);
+ return ret;
+
+}
+
+static ssize_t bh1770_set_lux_thresh_above(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static ssize_t bh1770_set_lux_thresh_below(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bh1770_chip *chip = dev_get_drvdata(dev);
+ int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show,
+ bh1770_prox_enable_store);
+static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR,
+ bh1770_prox_abs_thres_show,
+ bh1770_prox_abs_thres_store);
+static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_thres,
+ bh1770_set_prox_thres);
+static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL);
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL);
+static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR,
+ bh1770_prox_persistence_show,
+ bh1770_prox_persistence_store);
+static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_rate_above,
+ bh1770_set_prox_rate_above);
+static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR,
+ bh1770_get_prox_rate_below,
+ bh1770_set_prox_rate_below);
+static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show,
+ bh1770_lux_calib_store);
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+ bh1770_lux_calib_default_show,
+ NULL);
+static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL);
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL);
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate,
+ bh1770_set_lux_rate);
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL);
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+ bh1770_get_lux_thresh_above,
+ bh1770_set_lux_thresh_above);
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+ bh1770_get_lux_thresh_below,
+ bh1770_set_lux_thresh_below);
+static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL);
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show,
+ bh1770_power_state_store);
+
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_lux0_calibscale.attr,
+ &dev_attr_lux0_calibscale_default.attr,
+ &dev_attr_lux0_input.attr,
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_rate.attr,
+ &dev_attr_lux0_rate_avail.attr,
+ &dev_attr_lux0_thresh_above_value.attr,
+ &dev_attr_lux0_thresh_below_value.attr,
+ &dev_attr_prox0_raw.attr,
+ &dev_attr_prox0_sensor_range.attr,
+ &dev_attr_prox0_raw_en.attr,
+ &dev_attr_prox0_thresh_above_count.attr,
+ &dev_attr_prox0_rate_above.attr,
+ &dev_attr_prox0_rate_below.attr,
+ &dev_attr_prox0_rate_avail.attr,
+ &dev_attr_prox0_thresh_above0_value.attr,
+ &dev_attr_prox0_thresh_above1_value.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_power_state.attr,
+ NULL
+};
+
+static struct attribute_group bh1770_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
+static int __devinit bh1770_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bh1770_chip *chip;
+ int err;
+
+ chip = kzalloc(sizeof *chip, GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ mutex_init(&chip->mutex);
+ init_waitqueue_head(&chip->wait);
+ INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work);
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ err = -EINVAL;
+ goto fail1;
+ }
+
+ chip->pdata = client->dev.platform_data;
+ chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE;
+ chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE;
+ chip->lux_threshold_lo = BH1770_LUX_DEF_THRES;
+ chip->lux_threshold_hi = BH1770_LUX_DEF_THRES;
+
+ if (chip->pdata->glass_attenuation == 0)
+ chip->lux_ga = BH1770_NEUTRAL_GA;
+ else
+ chip->lux_ga = chip->pdata->glass_attenuation;
+
+ chip->prox_threshold = BH1770_PROX_DEF_THRES;
+ chip->prox_led = chip->pdata->led_def_curr;
+ chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES;
+ chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE;
+ chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH;
+ chip->prox_rate = BH1770_PROX_DEFAULT_RATE;
+ chip->prox_data = 0;
+
+ chip->regs[0].supply = reg_vcc;
+ chip->regs[1].supply = reg_vleds;
+
+ err = regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regs), chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot get regulators\n");
+ goto fail1;
+ }
+
+ err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+ chip->regs);
+ if (err < 0) {
+ dev_err(&client->dev, "Cannot enable regulators\n");
+ goto fail2;
+ }
+
+ usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+ err = bh1770_detect(chip);
+ if (err < 0)
+ goto fail3;
+
+ /* Start chip */
+ bh1770_chip_on(chip);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ chip->lux_corr = bh1770_get_corr_value(chip);
+ if (chip->lux_corr == 0) {
+ dev_err(&client->dev, "Improper correction values\n");
+ err = -EINVAL;
+ goto fail3;
+ }
+
+ if (chip->pdata->setup_resources) {
+ err = chip->pdata->setup_resources();
+ if (err) {
+ err = -EINVAL;
+ goto fail3;
+ }
+ }
+
+ err = sysfs_create_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+ if (err < 0) {
+ dev_err(&chip->client->dev, "Sysfs registration failed\n");
+ goto fail4;
+ }
+
+ /*
+ * Chip needs level triggered interrupt to work. However,
+ * level triggering doesn't work always correctly with power
+ * management. Select both
+ */
+ err = request_threaded_irq(client->irq, NULL,
+ bh1770_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_TRIGGER_LOW,
+ "bh1770", chip);
+ if (err) {
+ dev_err(&client->dev, "could not get IRQ %d\n",
+ client->irq);
+ goto fail5;
+ }
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+ return err;
+fail5:
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+fail4:
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+fail3:
+ regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+ kfree(chip);
+ return err;
+}
+
+static int __devexit bh1770_remove(struct i2c_client *client)
+{
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ free_irq(client->irq, chip);
+
+ sysfs_remove_group(&chip->client->dev.kobj,
+ &bh1770_attribute_group);
+
+ if (chip->pdata->release_resources)
+ chip->pdata->release_resources();
+
+ cancel_delayed_work_sync(&chip->prox_work);
+
+ if (!pm_runtime_suspended(&client->dev))
+ bh1770_chip_off(chip);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1770_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_off(chip);
+
+ return 0;
+}
+
+static int bh1770_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+ int ret = 0;
+
+ bh1770_chip_on(chip);
+
+ if (!pm_runtime_suspended(dev)) {
+ /*
+ * If we were enabled at suspend time, it is expected
+ * everything works nice and smoothly
+ */
+ ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+ ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+
+ /* This causes interrupt after the next measurement cycle */
+ bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+ BH1770_LUX_DEF_THRES);
+ /* Inform that we are waiting for a result from ALS */
+ chip->lux_wait_result = true;
+ bh1770_prox_mode_control(chip);
+ }
+ return ret;
+}
+
+#else
+#define bh1770_suspend NULL
+#define bh1770_shutdown NULL
+#define bh1770_resume NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int bh1770_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_off(chip);
+
+ return 0;
+}
+
+static int bh1770_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+ bh1770_chip_on(chip);
+
+ return 0;
+}
+#endif
+
+static const struct i2c_device_id bh1770_id[] = {
+ {"bh1770glc", 0 },
+ {"sfh7770", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bh1770_id);
+
+static const struct dev_pm_ops bh1770_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume)
+ SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL)
+};
+
+static struct i2c_driver bh1770_driver = {
+ .driver = {
+ .name = "bh1770glc",
+ .owner = THIS_MODULE,
+ .pm = &bh1770_pm_ops,
+ },
+ .probe = bh1770_probe,
+ .remove = __devexit_p(bh1770_remove),
+ .id_table = bh1770_id,
+};
+
+static int __init bh1770_init(void)
+{
+ return i2c_add_driver(&bh1770_driver);
+}
+
+static void __exit bh1770_exit(void)
+{
+ i2c_del_driver(&bh1770_driver);
+}
+
+MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
+
+module_init(bh1770_init);
+module_exit(bh1770_exit);
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index af2497ae5fe3..0a53500636c9 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -146,6 +146,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
struct inode *ret = new_inode(sb);
if (ret) {
+ ret->i_ino = get_next_ino();
ret->i_mode = mode;
ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
}
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
new file mode 100644
index 000000000000..34fe835921c4
--- /dev/null
+++ b/drivers/misc/isl29020.c
@@ -0,0 +1,248 @@
+/*
+ * isl29020.c - Intersil ALS Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+static DEFINE_MUTEX(mutex);
+
+static ssize_t als_sensing_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, 0x00);
+
+ if (val < 0)
+ return val;
+ return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
+
+}
+
+static ssize_t als_lux_input_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val, val;
+ unsigned long int lux;
+ int temp;
+
+ pm_runtime_get_sync(dev);
+ msleep(100);
+
+ mutex_lock(&mutex);
+ temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
+ if (temp < 0) {
+ pm_runtime_put_sync(dev);
+ mutex_unlock(&mutex);
+ return temp;
+ }
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
+ mutex_unlock(&mutex);
+
+ if (ret_val < 0) {
+ pm_runtime_put_sync(dev);
+ return ret_val;
+ }
+
+ ret_val |= temp << 8;
+ val = i2c_smbus_read_byte_data(client, 0x00);
+ pm_runtime_put_sync(dev);
+ if (val < 0)
+ return val;
+ lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536;
+ return sprintf(buf, "%ld\n", lux);
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned int ret_val;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val < 1 || val > 64000)
+ return -EINVAL;
+
+ /* Pick the smallest sensor range that will meet our requirements */
+ if (val <= 1000)
+ val = 1;
+ else if (val <= 4000)
+ val = 2;
+ else if (val <= 16000)
+ val = 3;
+ else
+ val = 4;
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x00);
+
+ ret_val &= 0xFC; /*reset the bit before setting them */
+ ret_val |= val - 1;
+ ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val);
+
+ if (ret_val < 0)
+ return ret_val;
+ return count;
+}
+
+static void als_set_power_state(struct i2c_client *client, int enable)
+{
+ int ret_val;
+
+ ret_val = i2c_smbus_read_byte_data(client, 0x00);
+ if (ret_val < 0)
+ return;
+
+ if (enable)
+ ret_val |= 0x80;
+ else
+ ret_val &= 0x7F;
+
+ i2c_smbus_write_byte_data(client, 0x00, ret_val);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+ als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+ &dev_attr_lux0_sensor_range.attr,
+ &dev_attr_lux0_input.attr,
+ NULL
+};
+
+static struct attribute_group m_als_gr = {
+ .name = "isl29020",
+ .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+ int retval;
+
+ retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0);
+ if (retval < 0) {
+ dev_err(&client->dev, "default write failed.");
+ return retval;
+ }
+ return 0;;
+}
+
+static int isl29020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+
+ res = als_set_default_config(client);
+ if (res < 0)
+ return res;
+
+ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+ if (res) {
+ dev_err(&client->dev, "isl29020: device create file failed\n");
+ return res;
+ }
+ dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name);
+ als_set_power_state(client, 0);
+ pm_runtime_enable(&client->dev);
+ return res;
+}
+
+static int isl29020_remove(struct i2c_client *client)
+{
+ struct als_data *data = i2c_get_clientdata(client);
+ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+ kfree(data);
+ return 0;
+}
+
+static struct i2c_device_id isl29020_id[] = {
+ { "isl29020", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, isl29020_id);
+
+#ifdef CONFIG_PM
+
+static int isl29020_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ als_set_power_state(client, 0);
+ return 0;
+}
+
+static int isl29020_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ als_set_power_state(client, 1);
+ return 0;
+}
+
+static const struct dev_pm_ops isl29020_pm_ops = {
+ .runtime_suspend = isl29020_runtime_suspend,
+ .runtime_resume = isl29020_runtime_resume,
+};
+
+#define ISL29020_PM_OPS (&isl29020_pm_ops)
+#else /* CONFIG_PM */
+#define ISL29020_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_driver isl29020_driver = {
+ .driver = {
+ .name = "isl29020",
+ .pm = ISL29020_PM_OPS,
+ },
+ .probe = isl29020_probe,
+ .remove = isl29020_remove,
+ .id_table = isl29020_id,
+};
+
+static int __init sensor_isl29020_init(void)
+{
+ return i2c_add_driver(&isl29020_driver);
+}
+
+static void __exit sensor_isl29020_exit(void)
+{
+ i2c_del_driver(&isl29020_driver);
+}
+
+module_init(sensor_isl29020_init);
+module_exit(sensor_isl29020_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 343b5d8ea697..81d7fa4ec0db 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -52,32 +52,32 @@
#define REC_NUM_DEFAULT 10
enum cname {
- INVALID,
- INT_HARDWARE_ENTRY,
- INT_HW_IRQ_EN,
- INT_TASKLET_ENTRY,
- FS_DEVRW,
- MEM_SWAPOUT,
- TIMERADD,
- SCSI_DISPATCH_CMD,
- IDE_CORE_CP,
- DIRECT,
+ CN_INVALID,
+ CN_INT_HARDWARE_ENTRY,
+ CN_INT_HW_IRQ_EN,
+ CN_INT_TASKLET_ENTRY,
+ CN_FS_DEVRW,
+ CN_MEM_SWAPOUT,
+ CN_TIMERADD,
+ CN_SCSI_DISPATCH_CMD,
+ CN_IDE_CORE_CP,
+ CN_DIRECT,
};
enum ctype {
- NONE,
- PANIC,
- BUG,
- EXCEPTION,
- LOOP,
- OVERFLOW,
- CORRUPT_STACK,
- UNALIGNED_LOAD_STORE_WRITE,
- OVERWRITE_ALLOCATION,
- WRITE_AFTER_FREE,
- SOFTLOCKUP,
- HARDLOCKUP,
- HUNG_TASK,
+ CT_NONE,
+ CT_PANIC,
+ CT_BUG,
+ CT_EXCEPTION,
+ CT_LOOP,
+ CT_OVERFLOW,
+ CT_CORRUPT_STACK,
+ CT_UNALIGNED_LOAD_STORE_WRITE,
+ CT_OVERWRITE_ALLOCATION,
+ CT_WRITE_AFTER_FREE,
+ CT_SOFTLOCKUP,
+ CT_HARDLOCKUP,
+ CT_HUNG_TASK,
};
static char* cp_name[] = {
@@ -117,8 +117,8 @@ static char* cpoint_type;
static int cpoint_count = DEFAULT_COUNT;
static int recur_count = REC_NUM_DEFAULT;
-static enum cname cpoint = INVALID;
-static enum ctype cptype = NONE;
+static enum cname cpoint = CN_INVALID;
+static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
module_param(recur_count, int, 0644);
@@ -207,12 +207,12 @@ static enum ctype parse_cp_type(const char *what, size_t count)
return i + 1;
}
- return NONE;
+ return CT_NONE;
}
static const char *cp_type_to_str(enum ctype type)
{
- if (type == NONE || type < 0 || type > ARRAY_SIZE(cp_type))
+ if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
return "None";
return cp_type[type - 1];
@@ -220,7 +220,7 @@ static const char *cp_type_to_str(enum ctype type)
static const char *cp_name_to_str(enum cname name)
{
- if (name == INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
+ if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
return "INVALID";
return cp_name[name - 1];
@@ -245,7 +245,7 @@ static int lkdtm_parse_commandline(void)
return -EINVAL;
cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
- if (cptype == NONE)
+ if (cptype == CT_NONE)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
@@ -274,30 +274,30 @@ static int recursive_loop(int a)
static void lkdtm_do_action(enum ctype which)
{
switch (which) {
- case PANIC:
+ case CT_PANIC:
panic("dumptest");
break;
- case BUG:
+ case CT_BUG:
BUG();
break;
- case EXCEPTION:
+ case CT_EXCEPTION:
*((int *) 0) = 0;
break;
- case LOOP:
+ case CT_LOOP:
for (;;)
;
break;
- case OVERFLOW:
+ case CT_OVERFLOW:
(void) recursive_loop(0);
break;
- case CORRUPT_STACK: {
+ case CT_CORRUPT_STACK: {
volatile u32 data[8];
volatile u32 *p = data;
p[12] = 0x12345678;
break;
}
- case UNALIGNED_LOAD_STORE_WRITE: {
+ case CT_UNALIGNED_LOAD_STORE_WRITE: {
static u8 data[5] __attribute__((aligned(4))) = {1, 2,
3, 4, 5};
u32 *p;
@@ -309,7 +309,7 @@ static void lkdtm_do_action(enum ctype which)
*p = val;
break;
}
- case OVERWRITE_ALLOCATION: {
+ case CT_OVERWRITE_ALLOCATION: {
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
@@ -317,7 +317,7 @@ static void lkdtm_do_action(enum ctype which)
kfree(data);
break;
}
- case WRITE_AFTER_FREE: {
+ case CT_WRITE_AFTER_FREE: {
size_t len = 1024;
u32 *data = kmalloc(len, GFP_KERNEL);
@@ -326,21 +326,21 @@ static void lkdtm_do_action(enum ctype which)
memset(data, 0x78, len);
break;
}
- case SOFTLOCKUP:
+ case CT_SOFTLOCKUP:
preempt_disable();
for (;;)
cpu_relax();
break;
- case HARDLOCKUP:
+ case CT_HARDLOCKUP:
local_irq_disable();
for (;;)
cpu_relax();
break;
- case HUNG_TASK:
+ case CT_HUNG_TASK:
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
break;
- case NONE:
+ case CT_NONE:
default:
break;
}
@@ -363,43 +363,43 @@ static int lkdtm_register_cpoint(enum cname which)
{
int ret;
- cpoint = INVALID;
+ cpoint = CN_INVALID;
if (lkdtm.entry != NULL)
unregister_jprobe(&lkdtm);
switch (which) {
- case DIRECT:
+ case CN_DIRECT:
lkdtm_do_action(cptype);
return 0;
- case INT_HARDWARE_ENTRY:
+ case CN_INT_HARDWARE_ENTRY:
lkdtm.kp.symbol_name = "do_IRQ";
lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
break;
- case INT_HW_IRQ_EN:
+ case CN_INT_HW_IRQ_EN:
lkdtm.kp.symbol_name = "handle_IRQ_event";
lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
break;
- case INT_TASKLET_ENTRY:
+ case CN_INT_TASKLET_ENTRY:
lkdtm.kp.symbol_name = "tasklet_action";
lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
break;
- case FS_DEVRW:
+ case CN_FS_DEVRW:
lkdtm.kp.symbol_name = "ll_rw_block";
lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
break;
- case MEM_SWAPOUT:
+ case CN_MEM_SWAPOUT:
lkdtm.kp.symbol_name = "shrink_inactive_list";
lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
break;
- case TIMERADD:
+ case CN_TIMERADD:
lkdtm.kp.symbol_name = "hrtimer_start";
lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
break;
- case SCSI_DISPATCH_CMD:
+ case CN_SCSI_DISPATCH_CMD:
lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
break;
- case IDE_CORE_CP:
+ case CN_IDE_CORE_CP:
#ifdef CONFIG_IDE
lkdtm.kp.symbol_name = "generic_ide_ioctl";
lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
@@ -416,7 +416,7 @@ static int lkdtm_register_cpoint(enum cname which)
cpoint = which;
if ((ret = register_jprobe(&lkdtm)) < 0) {
printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
- cpoint = INVALID;
+ cpoint = CN_INVALID;
}
return ret;
@@ -445,7 +445,7 @@ static ssize_t do_register_entry(enum cname which, struct file *f,
cptype = parse_cp_type(buf, count);
free_page((unsigned long) buf);
- if (cptype == NONE)
+ if (cptype == CT_NONE)
return -EINVAL;
err = lkdtm_register_cpoint(which);
@@ -487,49 +487,49 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(INT_HARDWARE_ENTRY, f, buf, count, off);
+ return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
}
static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(INT_HW_IRQ_EN, f, buf, count, off);
+ return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
}
static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(INT_TASKLET_ENTRY, f, buf, count, off);
+ return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
}
static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(FS_DEVRW, f, buf, count, off);
+ return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
}
static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(MEM_SWAPOUT, f, buf, count, off);
+ return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
}
static ssize_t timeradd_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(TIMERADD, f, buf, count, off);
+ return do_register_entry(CN_TIMERADD, f, buf, count, off);
}
static ssize_t scsi_dispatch_cmd_entry(struct file *f,
const char __user *buf, size_t count, loff_t *off)
{
- return do_register_entry(SCSI_DISPATCH_CMD, f, buf, count, off);
+ return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
}
static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
- return do_register_entry(IDE_CORE_CP, f, buf, count, off);
+ return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
}
/* Special entry to just crash directly. Available without KPROBEs */
@@ -557,7 +557,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
type = parse_cp_type(buf, count);
free_page((unsigned long) buf);
- if (type == NONE)
+ if (type == CT_NONE)
return -EINVAL;
printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
@@ -649,7 +649,7 @@ static int __init lkdtm_module_init(void)
goto out_err;
}
- if (cpoint != INVALID && cptype != NONE) {
+ if (cpoint != CN_INVALID && cptype != CT_NONE) {
ret = lkdtm_register_cpoint(cpoint);
if (ret < 0) {
printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 4197a3cb26ba..b05db55c8c8e 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -343,8 +343,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
int retval;
retval = pci_enable_device(pdev);
- if (retval)
+ if (retval) {
+ dev_err(&pdev->dev, "pci_enable_device failed!\n");
goto err;
+ }
minor = phantom_get_free();
if (minor == PHANTOM_MAX_MINORS) {
@@ -356,8 +358,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
phantom_devices[minor] = 1;
retval = pci_request_regions(pdev, "phantom");
- if (retval)
+ if (retval) {
+ dev_err(&pdev->dev, "pci_request_regions failed!\n");
goto err_null;
+ }
retval = -ENOMEM;
pht = kzalloc(sizeof(*pht), GFP_KERNEL);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 1f59ee2226ca..17bbacb1b4b1 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -417,6 +417,7 @@ xpc_process_activate_IRQ_rcvd_uv(void)
static void
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
struct xpc_activate_mq_msghdr_uv *msg_hdr,
+ int part_setup,
int *wakeup_hb_checker)
{
unsigned long irq_flags;
@@ -481,6 +482,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closerequest_uv,
hdr);
@@ -497,6 +501,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closereply_uv,
hdr);
@@ -511,6 +518,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openrequest_uv,
hdr);
@@ -528,6 +538,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
args = &part->remote_openclose_args[msg->ch_number];
@@ -545,6 +558,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+ if (!part_setup)
+ break;
+
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
@@ -621,6 +637,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
part_referenced = xpc_part_ref(part);
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
+ part_referenced,
&wakeup_hb_checker);
if (part_referenced)
xpc_part_deref(part);
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 9979f5e9765b..12eef393e216 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -2,9 +2,7 @@
# Makefile for the kernel mmc device drivers.
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
+subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
obj-$(CONFIG_MMC) += card/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3f2a912659af..57e4416b9ef0 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -14,6 +14,23 @@ config MMC_BLOCK
mount the filesystem. Almost everyone wishing MMC support
should say Y or M here.
+config MMC_BLOCK_MINORS
+ int "Number of minors per block device"
+ range 4 256
+ default 8
+ help
+ Number of minors per block device. One is needed for every
+ partition on the disk (plus one for the whole disk).
+
+ Number of total MMC minors available is 256, so your number
+ of supported block devices will be limited to 256 divided
+ by this number.
+
+ Default is 8 to be backwards compatible with previous
+ hardwired device numbering.
+
+ If unsure, say 8 here.
+
config MMC_BLOCK_BOUNCE
bool "Use bounce buffer for simple hosts"
depends on MMC_BLOCK
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index 0d407514f67d..c73b406a06cd 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -2,10 +2,6 @@
# Makefile for MMC/SD card drivers
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
mmc_block-objs := block.o queue.o
obj-$(CONFIG_MMC_TEST) += mmc_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 00073b7c0368..217f82037fc1 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -43,15 +43,27 @@
#include "queue.h"
MODULE_ALIAS("mmc:block");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "mmcblk."
+
+static DEFINE_MUTEX(block_mutex);
/*
- * max 8 partitions per card
+ * The defaults come from config options but can be overriden by module
+ * or bootarg options.
*/
-#define MMC_SHIFT 3
-#define MMC_NUM_MINORS (256 >> MMC_SHIFT)
+static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
-static DEFINE_MUTEX(block_mutex);
-static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);
+/*
+ * We've only got one major, so number of mmcblk devices is
+ * limited to 256 / number of minors per device.
+ */
+static int max_devices;
+
+/* 256 minors, so at most 256 separate devices */
+static DECLARE_BITMAP(dev_use, 256);
/*
* There is one mmc_blk_data per slot.
@@ -67,6 +79,9 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
+module_param(perdev_minors, int, 0444);
+MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -88,10 +103,10 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
int devmaj = MAJOR(disk_devt(md->disk));
- int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
+ int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
if (!devmaj)
- devidx = md->disk->first_minor >> MMC_SHIFT;
+ devidx = md->disk->first_minor / perdev_minors;
blk_cleanup_queue(md->queue.queue);
@@ -373,7 +388,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
-
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = readcmd;
brq.data.flags |= MMC_DATA_READ;
@@ -567,8 +581,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
struct mmc_blk_data *md;
int devidx, ret;
- devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
- if (devidx >= MMC_NUM_MINORS)
+ devidx = find_first_zero_bit(dev_use, max_devices);
+ if (devidx >= max_devices)
return ERR_PTR(-ENOSPC);
__set_bit(devidx, dev_use);
@@ -585,7 +599,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
*/
md->read_only = mmc_blk_readonly(card);
- md->disk = alloc_disk(1 << MMC_SHIFT);
+ md->disk = alloc_disk(perdev_minors);
if (md->disk == NULL) {
ret = -ENOMEM;
goto err_kfree;
@@ -602,7 +616,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->queue.data = md;
md->disk->major = MMC_BLOCK_MAJOR;
- md->disk->first_minor = devidx << MMC_SHIFT;
+ md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
@@ -620,7 +634,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
* messages to tell when the card is present.
*/
- sprintf(md->disk->disk_name, "mmcblk%d", devidx);
+ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+ "mmcblk%d", devidx);
blk_queue_logical_block_size(md->queue.queue, 512);
@@ -651,23 +666,15 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
static int
mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
{
- struct mmc_command cmd;
int err;
- /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
- if (mmc_card_blockaddr(card))
- return 0;
-
mmc_claim_host(card->host);
- cmd.opcode = MMC_SET_BLOCKLEN;
- cmd.arg = 512;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ err = mmc_set_blocklen(card, 512);
mmc_release_host(card->host);
if (err) {
- printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
- md->disk->disk_name, cmd.arg, err);
+ printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
+ md->disk->disk_name, err);
return -EINVAL;
}
@@ -678,7 +685,6 @@ static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
int err;
-
char cap_str[10];
/*
@@ -768,6 +774,11 @@ static int __init mmc_blk_init(void)
{
int res;
+ if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
+ pr_info("mmcblk: using %d minors per device\n", perdev_minors);
+
+ max_devices = 256 / perdev_minors;
+
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
goto out;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5dd8576b5c18..21adc27f4132 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -17,6 +17,11 @@
#include <linux/scatterlist.h>
#include <linux/swap.h> /* For nr_free_buffer_pages() */
+#include <linux/list.h>
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
#define RESULT_OK 0
#define RESULT_FAIL 1
@@ -56,7 +61,9 @@ struct mmc_test_mem {
* struct mmc_test_area - information for performance tests.
* @max_sz: test area size (in bytes)
* @dev_addr: address on card at which to do performance tests
- * @max_segs: maximum segments in scatterlist @sg
+ * @max_tfr: maximum transfer size allowed by driver (in bytes)
+ * @max_segs: maximum segments allowed by driver in scatterlist @sg
+ * @max_seg_sz: maximum segment size allowed by driver
* @blocks: number of (512 byte) blocks currently mapped by @sg
* @sg_len: length of currently mapped scatterlist @sg
* @mem: allocated memory
@@ -65,7 +72,9 @@ struct mmc_test_mem {
struct mmc_test_area {
unsigned long max_sz;
unsigned int dev_addr;
+ unsigned int max_tfr;
unsigned int max_segs;
+ unsigned int max_seg_sz;
unsigned int blocks;
unsigned int sg_len;
struct mmc_test_mem *mem;
@@ -73,12 +82,57 @@ struct mmc_test_area {
};
/**
+ * struct mmc_test_transfer_result - transfer results for performance tests.
+ * @link: double-linked list
+ * @count: amount of group of sectors to check
+ * @sectors: amount of sectors to check in one group
+ * @ts: time values of transfer
+ * @rate: calculated transfer rate
+ */
+struct mmc_test_transfer_result {
+ struct list_head link;
+ unsigned int count;
+ unsigned int sectors;
+ struct timespec ts;
+ unsigned int rate;
+};
+
+/**
+ * struct mmc_test_general_result - results for tests.
+ * @link: double-linked list
+ * @card: card under test
+ * @testcase: number of test case
+ * @result: result of test run
+ * @tr_lst: transfer measurements if any as mmc_test_transfer_result
+ */
+struct mmc_test_general_result {
+ struct list_head link;
+ struct mmc_card *card;
+ int testcase;
+ int result;
+ struct list_head tr_lst;
+};
+
+/**
+ * struct mmc_test_dbgfs_file - debugfs related file.
+ * @link: double-linked list
+ * @card: card under test
+ * @file: file created under debugfs
+ */
+struct mmc_test_dbgfs_file {
+ struct list_head link;
+ struct mmc_card *card;
+ struct dentry *file;
+};
+
+/**
* struct mmc_test_card - test information.
* @card: card under test
* @scratch: transfer buffer
* @buffer: transfer buffer
* @highmem: buffer for highmem tests
* @area: information for performance tests
+ * @gr: pointer to results of current testcase
*/
struct mmc_test_card {
struct mmc_card *card;
@@ -88,7 +142,8 @@ struct mmc_test_card {
#ifdef CONFIG_HIGHMEM
struct page *highmem;
#endif
- struct mmc_test_area area;
+ struct mmc_test_area area;
+ struct mmc_test_general_result *gr;
};
/*******************************************************************/
@@ -100,17 +155,7 @@ struct mmc_test_card {
*/
static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
{
- struct mmc_command cmd;
- int ret;
-
- cmd.opcode = MMC_SET_BLOCKLEN;
- cmd.arg = size;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_set_blocklen(test->card, size);
}
/*
@@ -245,27 +290,38 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)
/*
* Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
- * there isn't much memory do not exceed 1/16th total lowmem pages.
+ * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
+ * not exceed a maximum number of segments and try not to make segments much
+ * bigger than maximum segment size.
*/
static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
- unsigned long max_sz)
+ unsigned long max_sz,
+ unsigned int max_segs,
+ unsigned int max_seg_sz)
{
unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
+ unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
unsigned long page_cnt = 0;
unsigned long limit = nr_free_buffer_pages() >> 4;
struct mmc_test_mem *mem;
if (max_page_cnt > limit)
max_page_cnt = limit;
- if (max_page_cnt < min_page_cnt)
- max_page_cnt = min_page_cnt;
+ if (min_page_cnt > max_page_cnt)
+ min_page_cnt = max_page_cnt;
+
+ if (max_seg_page_cnt > max_page_cnt)
+ max_seg_page_cnt = max_page_cnt;
+
+ if (max_segs > max_page_cnt)
+ max_segs = max_page_cnt;
mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
if (!mem)
return NULL;
- mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_page_cnt,
+ mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
GFP_KERNEL);
if (!mem->arr)
goto out_free;
@@ -276,7 +332,7 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
__GFP_NORETRY;
- order = get_order(max_page_cnt << PAGE_SHIFT);
+ order = get_order(max_seg_page_cnt << PAGE_SHIFT);
while (1) {
page = alloc_pages(flags, order);
if (page || !order)
@@ -295,6 +351,11 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
break;
max_page_cnt -= 1UL << order;
page_cnt += 1UL << order;
+ if (mem->cnt >= max_segs) {
+ if (page_cnt < min_page_cnt)
+ goto out_free;
+ break;
+ }
}
return mem;
@@ -310,7 +371,8 @@ out_free:
*/
static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
struct scatterlist *sglist, int repeat,
- unsigned int max_segs, unsigned int *sg_len)
+ unsigned int max_segs, unsigned int max_seg_sz,
+ unsigned int *sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
@@ -322,8 +384,10 @@ static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
- if (sz < len)
+ if (len > sz)
len = sz;
+ if (len > max_seg_sz)
+ len = max_seg_sz;
if (sg)
sg = sg_next(sg);
else
@@ -355,6 +419,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
unsigned long sz,
struct scatterlist *sglist,
unsigned int max_segs,
+ unsigned int max_seg_sz,
unsigned int *sg_len)
{
struct scatterlist *sg = NULL;
@@ -365,7 +430,7 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
sg_init_table(sglist, max_segs);
*sg_len = 0;
- while (sz && i) {
+ while (sz) {
base = page_address(mem->arr[--i].page);
cnt = 1 << mem->arr[i].order;
while (sz && cnt) {
@@ -374,7 +439,9 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
continue;
last_addr = addr;
len = PAGE_SIZE;
- if (sz < len)
+ if (len > max_seg_sz)
+ len = max_seg_sz;
+ if (len > sz)
len = sz;
if (sg)
sg = sg_next(sg);
@@ -386,6 +453,8 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
sz -= len;
*sg_len += 1;
}
+ if (i == 0)
+ i = mem->cnt;
}
if (sg)
@@ -421,6 +490,30 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
}
/*
+ * Save transfer results for future usage
+ */
+static void mmc_test_save_transfer_result(struct mmc_test_card *test,
+ unsigned int count, unsigned int sectors, struct timespec ts,
+ unsigned int rate)
+{
+ struct mmc_test_transfer_result *tr;
+
+ if (!test->gr)
+ return;
+
+ tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
+ if (!tr)
+ return;
+
+ tr->count = count;
+ tr->sectors = sectors;
+ tr->ts = ts;
+ tr->rate = rate;
+
+ list_add_tail(&tr->link, &test->gr->tr_lst);
+}
+
+/*
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
@@ -436,8 +529,10 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
"seconds (%u kB/s, %u KiB/s)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
- (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
+ (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
(unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
+
+ mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
}
/*
@@ -458,9 +553,11 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
mmc_hostname(test->card->host), count, sectors, count,
- sectors >> 1, (sectors == 1 ? ".5" : ""),
+ sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
rate / 1000, rate / 1024);
+
+ mmc_test_save_transfer_result(test, count, sectors, ts, rate);
}
/*
@@ -1215,16 +1312,22 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
int max_scatter)
{
struct mmc_test_area *t = &test->area;
+ int err;
t->blocks = sz >> 9;
if (max_scatter) {
- return mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
- t->max_segs, &t->sg_len);
- } else {
- return mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+ err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
+ t->max_segs, t->max_seg_sz,
&t->sg_len);
+ } else {
+ err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+ t->max_seg_sz, &t->sg_len);
}
+ if (err)
+ printk(KERN_INFO "%s: Failed to map sg list\n",
+ mmc_hostname(test->card->host));
+ return err;
}
/*
@@ -1249,6 +1352,22 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
struct timespec ts1, ts2;
int ret;
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ struct mmc_test_area *t = &test->area;
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
ret = mmc_test_area_map(test, sz, max_scatter);
if (ret)
return ret;
@@ -1274,7 +1393,7 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
*/
static int mmc_test_area_fill(struct mmc_test_card *test)
{
- return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
+ return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1, 0, 0);
}
@@ -1328,16 +1447,29 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
t->max_sz = TEST_AREA_MAX_SIZE;
else
t->max_sz = (unsigned long)test->card->pref_erase << 9;
+
+ t->max_segs = test->card->host->max_segs;
+ t->max_seg_sz = test->card->host->max_seg_size;
+
+ t->max_tfr = t->max_sz;
+ if (t->max_tfr >> 9 > test->card->host->max_blk_count)
+ t->max_tfr = test->card->host->max_blk_count << 9;
+ if (t->max_tfr > test->card->host->max_req_size)
+ t->max_tfr = test->card->host->max_req_size;
+ if (t->max_tfr / t->max_seg_sz > t->max_segs)
+ t->max_tfr = t->max_segs * t->max_seg_sz;
+
/*
- * Try to allocate enough memory for the whole area. Less is OK
+ * Try to allocate enough memory for a max. sized transfer. Less is OK
* because the same memory can be mapped into the scatterlist more than
- * once.
+ * once. Also, take into account the limits imposed on scatterlist
+ * segments by the host driver.
*/
- t->mem = mmc_test_alloc_mem(min_sz, t->max_sz);
+ t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
+ t->max_seg_sz);
if (!t->mem)
return -ENOMEM;
- t->max_segs = DIV_ROUND_UP(t->max_sz, PAGE_SIZE);
t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
if (!t->sg) {
ret = -ENOMEM;
@@ -1401,7 +1533,7 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
static int mmc_test_best_performance(struct mmc_test_card *test, int write,
int max_scatter)
{
- return mmc_test_area_io(test, test->area.max_sz, test->area.dev_addr,
+ return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
write, max_scatter, 1);
}
@@ -1446,12 +1578,13 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
unsigned int dev_addr;
int ret;
- for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
dev_addr = test->area.dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
if (ret)
return ret;
}
+ sz = test->area.max_tfr;
dev_addr = test->area.dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
}
@@ -1468,7 +1601,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
ret = mmc_test_area_erase(test);
if (ret)
return ret;
- for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
dev_addr = test->area.dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
if (ret)
@@ -1477,6 +1610,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
ret = mmc_test_area_erase(test);
if (ret)
return ret;
+ sz = test->area.max_tfr;
dev_addr = test->area.dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
}
@@ -1516,29 +1650,63 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
return 0;
}
+static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
+{
+ unsigned int dev_addr, i, cnt;
+ struct timespec ts1, ts2;
+ int ret;
+
+ cnt = test->area.max_sz / sz;
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
+ }
+ getnstimeofday(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
/*
* Consecutive read performance by transfer size.
*/
static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
{
unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ ret = mmc_test_seq_read_perf(test, sz);
+ if (ret)
+ return ret;
+ }
+ sz = test->area.max_tfr;
+ return mmc_test_seq_read_perf(test, sz);
+}
+
+static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
+{
unsigned int dev_addr, i, cnt;
struct timespec ts1, ts2;
int ret;
- for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
- cnt = test->area.max_sz / sz;
- dev_addr = test->area.dev_addr;
- getnstimeofday(&ts1);
- for (i = 0; i < cnt; i++) {
- ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
- if (ret)
- return ret;
- dev_addr += (sz >> 9);
- }
- getnstimeofday(&ts2);
- mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ ret = mmc_test_area_erase(test);
+ if (ret)
+ return ret;
+ cnt = test->area.max_sz / sz;
+ dev_addr = test->area.dev_addr;
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
+ if (ret)
+ return ret;
+ dev_addr += (sz >> 9);
}
+ getnstimeofday(&ts2);
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1548,27 +1716,15 @@ static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
{
unsigned long sz;
- unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
int ret;
- for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
- ret = mmc_test_area_erase(test);
+ for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ ret = mmc_test_seq_write_perf(test, sz);
if (ret)
return ret;
- cnt = test->area.max_sz / sz;
- dev_addr = test->area.dev_addr;
- getnstimeofday(&ts1);
- for (i = 0; i < cnt; i++) {
- ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
- if (ret)
- return ret;
- dev_addr += (sz >> 9);
- }
- getnstimeofday(&ts2);
- mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
- return 0;
+ sz = test->area.max_tfr;
+ return mmc_test_seq_write_perf(test, sz);
}
/*
@@ -1853,6 +2009,8 @@ static const struct mmc_test_case mmc_test_cases[] = {
static DEFINE_MUTEX(mmc_test_lock);
+static LIST_HEAD(mmc_test_result);
+
static void mmc_test_run(struct mmc_test_card *test, int testcase)
{
int i, ret;
@@ -1863,6 +2021,8 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_claim_host(test->card->host);
for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
+ struct mmc_test_general_result *gr;
+
if (testcase && ((i + 1) != testcase))
continue;
@@ -1881,6 +2041,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
}
}
+ gr = kzalloc(sizeof(struct mmc_test_general_result),
+ GFP_KERNEL);
+ if (gr) {
+ INIT_LIST_HEAD(&gr->tr_lst);
+
+ /* Assign data what we know already */
+ gr->card = test->card;
+ gr->testcase = i;
+
+ /* Append container to global one */
+ list_add_tail(&gr->link, &mmc_test_result);
+
+ /*
+ * Save the pointer to created container in our private
+ * structure.
+ */
+ test->gr = gr;
+ }
+
ret = mmc_test_cases[i].run(test);
switch (ret) {
case RESULT_OK:
@@ -1906,6 +2085,10 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_hostname(test->card->host), ret);
}
+ /* Save the result */
+ if (gr)
+ gr->result = ret;
+
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
@@ -1923,30 +2106,95 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_hostname(test->card->host));
}
-static ssize_t mmc_test_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static void mmc_test_free_result(struct mmc_card *card)
{
+ struct mmc_test_general_result *gr, *grs;
+
mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
+ struct mmc_test_transfer_result *tr, *trs;
+
+ if (card && gr->card != card)
+ continue;
+
+ list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
+ list_del(&tr->link);
+ kfree(tr);
+ }
+
+ list_del(&gr->link);
+ kfree(gr);
+ }
+
+ mutex_unlock(&mmc_test_lock);
+}
+
+static LIST_HEAD(mmc_test_file_test);
+
+static int mtf_test_show(struct seq_file *sf, void *data)
+{
+ struct mmc_card *card = (struct mmc_card *)sf->private;
+ struct mmc_test_general_result *gr;
+
+ mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry(gr, &mmc_test_result, link) {
+ struct mmc_test_transfer_result *tr;
+
+ if (gr->card != card)
+ continue;
+
+ seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
+
+ list_for_each_entry(tr, &gr->tr_lst, link) {
+ seq_printf(sf, "%u %d %lu.%09lu %u\n",
+ tr->count, tr->sectors,
+ (unsigned long)tr->ts.tv_sec,
+ (unsigned long)tr->ts.tv_nsec,
+ tr->rate);
+ }
+ }
+
mutex_unlock(&mmc_test_lock);
return 0;
}
-static ssize_t mmc_test_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static int mtf_test_open(struct inode *inode, struct file *file)
{
- struct mmc_card *card;
+ return single_open(file, mtf_test_show, inode->i_private);
+}
+
+static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *sf = (struct seq_file *)file->private_data;
+ struct mmc_card *card = (struct mmc_card *)sf->private;
struct mmc_test_card *test;
- int testcase;
+ char lbuf[12];
+ long testcase;
- card = container_of(dev, struct mmc_card, dev);
+ if (count >= sizeof(lbuf))
+ return -EINVAL;
- testcase = simple_strtol(buf, NULL, 10);
+ if (copy_from_user(lbuf, buf, count))
+ return -EFAULT;
+ lbuf[count] = '\0';
+
+ if (strict_strtol(lbuf, 10, &testcase))
+ return -EINVAL;
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
if (!test)
return -ENOMEM;
+ /*
+ * Remove all test cases associated with given card. Thus we have only
+ * actual data of the last run.
+ */
+ mmc_test_free_result(card);
+
test->card = card;
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
@@ -1973,16 +2221,78 @@ static ssize_t mmc_test_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store);
+static const struct file_operations mmc_test_fops_test = {
+ .open = mtf_test_open,
+ .read = seq_read,
+ .write = mtf_test_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mmc_test_free_file_test(struct mmc_card *card)
+{
+ struct mmc_test_dbgfs_file *df, *dfs;
+
+ mutex_lock(&mmc_test_lock);
+
+ list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
+ if (card && df->card != card)
+ continue;
+ debugfs_remove(df->file);
+ list_del(&df->link);
+ kfree(df);
+ }
+
+ mutex_unlock(&mmc_test_lock);
+}
+
+static int mmc_test_register_file_test(struct mmc_card *card)
+{
+ struct dentry *file = NULL;
+ struct mmc_test_dbgfs_file *df;
+ int ret = 0;
+
+ mutex_lock(&mmc_test_lock);
+
+ if (card->debugfs_root)
+ file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
+ card->debugfs_root, card, &mmc_test_fops_test);
+
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(&card->dev,
+ "Can't create file. Perhaps debugfs is disabled.\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
+ if (!df) {
+ debugfs_remove(file);
+ dev_err(&card->dev,
+ "Can't allocate memory for internal usage.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ df->card = card;
+ df->file = file;
+
+ list_add(&df->link, &mmc_test_file_test);
+
+err:
+ mutex_unlock(&mmc_test_lock);
+
+ return ret;
+}
static int mmc_test_probe(struct mmc_card *card)
{
int ret;
- if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
+ if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
- ret = device_create_file(&card->dev, &dev_attr_test);
+ ret = mmc_test_register_file_test(card);
if (ret)
return ret;
@@ -1993,7 +2303,8 @@ static int mmc_test_probe(struct mmc_card *card)
static void mmc_test_remove(struct mmc_card *card)
{
- device_remove_file(&card->dev, &dev_attr_test);
+ mmc_test_free_result(card);
+ mmc_test_free_file_test(card);
}
static struct mmc_driver mmc_driver = {
@@ -2011,6 +2322,10 @@ static int __init mmc_test_init(void)
static void __exit mmc_test_exit(void)
{
+ /* Clear stalled data if card is still plugged */
+ mmc_test_free_result(NULL);
+ mmc_test_free_file_test(NULL);
+
mmc_unregister_driver(&mmc_driver);
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9c0b42bfe089..4e42d030e097 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -146,7 +146,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
}
#ifdef CONFIG_MMC_BLOCK_BOUNCE
- if (host->max_hw_segs == 1) {
+ if (host->max_segs == 1) {
unsigned int bouncesz;
bouncesz = MMC_QUEUE_BOUNCESZ;
@@ -196,21 +196,23 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_hw_segs);
+ blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_phys_segs, GFP_KERNEL);
+ host->max_segs, GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
- sg_init_table(mq->sg, host->max_phys_segs);
+ sg_init_table(mq->sg, host->max_segs);
}
- init_MUTEX(&mq->thread_sem);
+ sema_init(&mq->thread_sem, 1);
+
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
+ host->index);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto free_bounce_sg;
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 889e5f898f6f..86b479119332 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -2,10 +2,6 @@
# Makefile for the kernel mmc core.
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MMC) += mmc_core.o
mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7cd9749dc21d..af8dc6a2a317 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -22,13 +23,12 @@
#include "sdio_cis.h"
#include "bus.h"
-#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
static ssize_t mmc_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
switch (card->type) {
case MMC_TYPE_MMC:
@@ -62,7 +62,7 @@ static int mmc_bus_match(struct device *dev, struct device_driver *drv)
static int
mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
const char *type;
int retval = 0;
@@ -105,7 +105,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int mmc_bus_probe(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
return drv->probe(card);
}
@@ -113,7 +113,7 @@ static int mmc_bus_probe(struct device *dev)
static int mmc_bus_remove(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
drv->remove(card);
@@ -123,7 +123,7 @@ static int mmc_bus_remove(struct device *dev)
static int mmc_bus_suspend(struct device *dev, pm_message_t state)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
int ret = 0;
if (dev->driver && drv->suspend)
@@ -134,7 +134,7 @@ static int mmc_bus_suspend(struct device *dev, pm_message_t state)
static int mmc_bus_resume(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
int ret = 0;
if (dev->driver && drv->resume)
@@ -142,6 +142,41 @@ static int mmc_bus_resume(struct device *dev)
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
+
+static int mmc_runtime_suspend(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return mmc_power_save_host(card->host);
+}
+
+static int mmc_runtime_resume(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return mmc_power_restore_host(card->host);
+}
+
+static int mmc_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops mmc_bus_pm_ops = {
+ .runtime_suspend = mmc_runtime_suspend,
+ .runtime_resume = mmc_runtime_resume,
+ .runtime_idle = mmc_runtime_idle,
+};
+
+#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define MMC_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
static struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_attrs = mmc_dev_attrs,
@@ -151,6 +186,7 @@ static struct bus_type mmc_bus_type = {
.remove = mmc_bus_remove,
.suspend = mmc_bus_suspend,
.resume = mmc_bus_resume,
+ .pm = MMC_PM_OPS_PTR,
};
int mmc_register_bus(void)
@@ -189,7 +225,7 @@ EXPORT_SYMBOL(mmc_unregister_driver);
static void mmc_release_card(struct device *dev)
{
- struct mmc_card *card = dev_to_mmc_card(dev);
+ struct mmc_card *card = mmc_dev_to_card(dev);
sdio_free_common_cis(card);
@@ -254,14 +290,16 @@ int mmc_add_card(struct mmc_card *card)
}
if (mmc_host_is_spi(card->host)) {
- printk(KERN_INFO "%s: new %s%s card on SPI\n",
+ printk(KERN_INFO "%s: new %s%s%s card on SPI\n",
mmc_hostname(card->host),
mmc_card_highspeed(card) ? "high speed " : "",
+ mmc_card_ddr_mode(card) ? "DDR " : "",
type);
} else {
- printk(KERN_INFO "%s: new %s%s card at address %04x\n",
+ printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_highspeed(card) ? "high speed " : "",
+ mmc_card_ddr_mode(card) ? "DDR " : "",
type, card->rca);
}
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 18178766ab46..00a19710b6b4 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -14,7 +14,7 @@
#define MMC_DEV_ATTR(name, fmt, args...) \
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
- struct mmc_card *card = container_of(dev, struct mmc_card, dev); \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
return sprintf(buf, fmt, args); \
} \
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 09eee6df0653..8f86d702e46e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -58,6 +58,7 @@ int mmc_assume_removable;
#else
int mmc_assume_removable = 1;
#endif
+EXPORT_SYMBOL(mmc_assume_removable);
module_param_named(removable, mmc_assume_removable, bool, 0644);
MODULE_PARM_DESC(
removable,
@@ -650,14 +651,24 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
}
/*
- * Change data bus width of a host.
+ * Change data bus width and DDR mode of a host.
*/
-void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
+void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
+ unsigned int ddr)
{
host->ios.bus_width = width;
+ host->ios.ddr = ddr;
mmc_set_ios(host);
}
+/*
+ * Change data bus width of a host.
+ */
+void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
+{
+ mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE);
+}
+
/**
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
* @vdd: voltage (mV)
@@ -771,8 +782,9 @@ EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
- * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
+ * @mmc: the host to regulate
* @supply: regulator to use
+ * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
*
* Returns zero on success, else negative errno.
*
@@ -780,15 +792,12 @@ EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
* a particular supply voltage. This would normally be called from the
* set_ios() method.
*/
-int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
{
int result = 0;
int min_uV, max_uV;
- int enabled;
-
- enabled = regulator_is_enabled(supply);
- if (enabled < 0)
- return enabled;
if (vdd_bit) {
int tmp;
@@ -819,17 +828,25 @@ int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
else
result = 0;
- if (result == 0 && !enabled)
+ if (result == 0 && !mmc->regulator_enabled) {
result = regulator_enable(supply);
- } else if (enabled) {
+ if (!result)
+ mmc->regulator_enabled = true;
+ }
+ } else if (mmc->regulator_enabled) {
result = regulator_disable(supply);
+ if (result == 0)
+ mmc->regulator_enabled = false;
}
+ if (result)
+ dev_err(mmc_dev(mmc),
+ "could not set regulator OCR (%d)\n", result);
return result;
}
EXPORT_SYMBOL(mmc_regulator_set_ocr);
-#endif
+#endif /* CONFIG_REGULATOR */
/*
* Mask off any voltages we don't support and select
@@ -907,12 +924,7 @@ static void mmc_power_up(struct mmc_host *host)
*/
mmc_delay(10);
- if (host->f_min > 400000) {
- pr_warning("%s: Minimum clock frequency too high for "
- "identification mode\n", mmc_hostname(host));
- host->ios.clock = host->f_min;
- } else
- host->ios.clock = 400000;
+ host->ios.clock = host->f_init;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
@@ -1397,6 +1409,21 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
+int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
+{
+ struct mmc_command cmd;
+
+ if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
+ return 0;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = blocklen;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+EXPORT_SYMBOL(mmc_set_blocklen);
+
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =
@@ -1404,6 +1431,8 @@ void mmc_rescan(struct work_struct *work)
u32 ocr;
int err;
unsigned long flags;
+ int i;
+ const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
spin_lock_irqsave(&host->lock, flags);
@@ -1443,55 +1472,71 @@ void mmc_rescan(struct work_struct *work)
if (host->ops->get_cd && host->ops->get_cd(host) == 0)
goto out;
- mmc_claim_host(host);
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ mmc_claim_host(host);
- mmc_power_up(host);
- sdio_reset(host);
- mmc_go_idle(host);
+ if (freqs[i] >= host->f_min)
+ host->f_init = freqs[i];
+ else if (!i || freqs[i-1] > host->f_min)
+ host->f_init = host->f_min;
+ else {
+ mmc_release_host(host);
+ goto out;
+ }
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: trying to init card at %u Hz\n",
+ mmc_hostname(host), __func__, host->f_init);
+#endif
+ mmc_power_up(host);
+ sdio_reset(host);
+ mmc_go_idle(host);
- mmc_send_if_cond(host, host->ocr_avail);
+ mmc_send_if_cond(host, host->ocr_avail);
- /*
- * First we search for SDIO...
- */
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sdio(host, ocr)) {
- mmc_claim_host(host);
- /* try SDMEM (but not MMC) even if SDIO is broken */
- if (mmc_send_app_op_cond(host, 0, &ocr))
- goto out_fail;
+ /*
+ * First we search for SDIO...
+ */
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (!err) {
+ if (mmc_attach_sdio(host, ocr)) {
+ mmc_claim_host(host);
+ /*
+ * Try SDMEM (but not MMC) even if SDIO
+ * is broken.
+ */
+ if (mmc_send_app_op_cond(host, 0, &ocr))
+ goto out_fail;
+
+ if (mmc_attach_sd(host, ocr))
+ mmc_power_off(host);
+ }
+ goto out;
+ }
+ /*
+ * ...then normal SD...
+ */
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (!err) {
if (mmc_attach_sd(host, ocr))
mmc_power_off(host);
+ goto out;
}
- goto out;
- }
- /*
- * ...then normal SD...
- */
- err = mmc_send_app_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_sd(host, ocr))
- mmc_power_off(host);
- goto out;
- }
-
- /*
- * ...and finally MMC.
- */
- err = mmc_send_op_cond(host, 0, &ocr);
- if (!err) {
- if (mmc_attach_mmc(host, ocr))
- mmc_power_off(host);
- goto out;
- }
+ /*
+ * ...and finally MMC.
+ */
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (!err) {
+ if (mmc_attach_mmc(host, ocr))
+ mmc_power_off(host);
+ goto out;
+ }
out_fail:
- mmc_release_host(host);
- mmc_power_off(host);
-
+ mmc_release_host(host);
+ mmc_power_off(host);
+ }
out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
@@ -1538,37 +1583,45 @@ void mmc_stop_host(struct mmc_host *host)
mmc_power_off(host);
}
-void mmc_power_save_host(struct mmc_host *host)
+int mmc_power_save_host(struct mmc_host *host)
{
+ int ret = 0;
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
mmc_bus_put(host);
- return;
+ return -EINVAL;
}
if (host->bus_ops->power_save)
- host->bus_ops->power_save(host);
+ ret = host->bus_ops->power_save(host);
mmc_bus_put(host);
mmc_power_off(host);
+
+ return ret;
}
EXPORT_SYMBOL(mmc_power_save_host);
-void mmc_power_restore_host(struct mmc_host *host)
+int mmc_power_restore_host(struct mmc_host *host)
{
+ int ret;
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
mmc_bus_put(host);
- return;
+ return -EINVAL;
}
mmc_power_up(host);
- host->bus_ops->power_restore(host);
+ ret = host->bus_ops->power_restore(host);
mmc_bus_put(host);
+
+ return ret;
}
EXPORT_SYMBOL(mmc_power_restore_host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 9d9eef50e5d1..77240cd11bcf 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -22,8 +22,8 @@ struct mmc_bus_ops {
void (*detect)(struct mmc_host *);
int (*suspend)(struct mmc_host *);
int (*resume)(struct mmc_host *);
- void (*power_save)(struct mmc_host *);
- void (*power_restore)(struct mmc_host *);
+ int (*power_save)(struct mmc_host *);
+ int (*power_restore)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -35,6 +35,8 @@ void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
+void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
+ unsigned int ddr);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
@@ -58,7 +60,6 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
/* Module parameters */
extern int use_spi_crc;
-extern int mmc_assume_removable;
/* Debugfs information for hosts and cards */
void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 46bc6d7551a3..eed1405fd742 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -134,6 +134,33 @@ static const struct file_operations mmc_ios_fops = {
.release = single_release,
};
+static int mmc_clock_opt_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ *val = host->ios.clock;
+
+ return 0;
+}
+
+static int mmc_clock_opt_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+
+ /* We need this check due to input value is u64 */
+ if (val > host->f_max)
+ return -EINVAL;
+
+ mmc_claim_host(host);
+ mmc_set_clock(host, (unsigned int) val);
+ mmc_release_host(host);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
+ "%llu\n");
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -150,11 +177,15 @@ void mmc_add_host_debugfs(struct mmc_host *host)
host->debugfs_root = root;
if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
- goto err_ios;
+ goto err_node;
+
+ if (!debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host,
+ &mmc_clock_fops))
+ goto err_node;
return;
-err_ios:
+err_node:
debugfs_remove_recursive(root);
host->debugfs_root = NULL;
err_root:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index d80cfdc8edd2..10b8af27e03a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -94,8 +94,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
*/
- host->max_hw_segs = 1;
- host->max_phys_segs = 1;
+ host->max_segs = 1;
host->max_seg_size = PAGE_CACHE_SIZE;
host->max_req_size = PAGE_CACHE_SIZE;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6909a54c39be..995261f7fd70 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -258,6 +258,21 @@ static int mmc_read_ext_csd(struct mmc_card *card)
}
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
+ case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
+ EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 52000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
+ break;
+ case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
+ EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 52000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
+ break;
+ case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
+ EXT_CSD_CARD_TYPE_26:
+ card->ext_csd.hs_max_dtr = 52000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
+ break;
case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
card->ext_csd.hs_max_dtr = 52000000;
break;
@@ -360,7 +375,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard)
{
struct mmc_card *card;
- int err;
+ int err, ddr = MMC_SDR_MODE;
u32 cid[4];
unsigned int max_dtr;
@@ -503,17 +518,35 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_set_clock(host, max_dtr);
/*
- * Activate wide bus (if supported).
+ * Indicate DDR mode (if supported).
+ */
+ if (mmc_card_highspeed(card)) {
+ if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
+ && (host->caps & (MMC_CAP_1_8V_DDR)))
+ ddr = MMC_1_8V_DDR_MODE;
+ else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
+ && (host->caps & (MMC_CAP_1_2V_DDR)))
+ ddr = MMC_1_2V_DDR_MODE;
+ }
+
+ /*
+ * Activate wide bus and DDR (if supported).
*/
if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
unsigned ext_csd_bit, bus_width;
if (host->caps & MMC_CAP_8_BIT_DATA) {
- ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
+ if (ddr)
+ ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
+ else
+ ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
bus_width = MMC_BUS_WIDTH_8;
} else {
- ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
+ if (ddr)
+ ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
+ else
+ ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
bus_width = MMC_BUS_WIDTH_4;
}
@@ -524,12 +557,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
if (err) {
- printk(KERN_WARNING "%s: switch to bus width %d "
+ printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
"failed\n", mmc_hostname(card->host),
- 1 << bus_width);
+ 1 << bus_width, ddr);
err = 0;
} else {
- mmc_set_bus_width(card->host, bus_width);
+ mmc_card_set_ddr_mode(card);
+ mmc_set_bus_width_ddr(card->host, bus_width, ddr);
}
}
@@ -623,12 +657,16 @@ static int mmc_resume(struct mmc_host *host)
return err;
}
-static void mmc_power_restore(struct mmc_host *host)
+static int mmc_power_restore(struct mmc_host *host)
{
+ int ret;
+
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
- mmc_init_card(host, host->ocr, host->card);
+ ret = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
+
+ return ret;
}
static int mmc_sleep(struct mmc_host *host)
@@ -685,7 +723,7 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
+ if (!mmc_card_is_removable(host))
bus_ops = &mmc_ops_unsafe;
else
bus_ops = &mmc_ops;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 0f5241085557..49da4dffd28e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -722,12 +722,16 @@ static int mmc_sd_resume(struct mmc_host *host)
return err;
}
-static void mmc_sd_power_restore(struct mmc_host *host)
+static int mmc_sd_power_restore(struct mmc_host *host)
{
+ int ret;
+
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_claim_host(host);
- mmc_sd_init_card(host, host->ocr, host->card);
+ ret = mmc_sd_init_card(host, host->ocr, host->card);
mmc_release_host(host);
+
+ return ret;
}
static const struct mmc_bus_ops mmc_sd_ops = {
@@ -750,7 +754,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
+ if (!mmc_card_is_removable(host))
bus_ops = &mmc_sd_ops_unsafe;
else
bus_ops = &mmc_sd_ops;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index f332c52968b7..c3ad1058cd31 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -10,6 +10,7 @@
*/
#include <linux/err.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
@@ -456,7 +457,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
return -ENOENT;
card = oldcard;
- return 0;
}
if (card->type == MMC_TYPE_SD_COMBO) {
@@ -546,6 +546,11 @@ static void mmc_sdio_detect(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
+ /* Make sure card is powered before detecting it */
+ err = pm_runtime_get_sync(&host->card->dev);
+ if (err < 0)
+ goto out;
+
mmc_claim_host(host);
/*
@@ -555,6 +560,7 @@ static void mmc_sdio_detect(struct mmc_host *host)
mmc_release_host(host);
+out:
if (err) {
mmc_sdio_remove(host);
@@ -562,6 +568,9 @@ static void mmc_sdio_detect(struct mmc_host *host)
mmc_detach_bus(host);
mmc_release_host(host);
}
+
+ /* Tell PM core that we're done */
+ pm_runtime_put(&host->card->dev);
}
/*
@@ -614,14 +623,6 @@ static int mmc_sdio_resume(struct mmc_host *host)
mmc_claim_host(host);
err = mmc_sdio_init_card(host, host->ocr, host->card,
(host->pm_flags & MMC_PM_KEEP_POWER));
- if (!err) {
- /* We may have switched to 1-bit mode during suspend. */
- err = sdio_enable_4bit_bus(host->card);
- if (err > 0) {
- mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
- err = 0;
- }
- }
if (!err && host->sdio_irqs)
mmc_signal_sdio_irq(host);
mmc_release_host(host);
@@ -647,11 +648,29 @@ static int mmc_sdio_resume(struct mmc_host *host)
return err;
}
+static int mmc_sdio_power_restore(struct mmc_host *host)
+{
+ int ret;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_claim_host(host);
+ ret = mmc_sdio_init_card(host, host->ocr, host->card,
+ (host->pm_flags & MMC_PM_KEEP_POWER));
+ if (!ret && host->sdio_irqs)
+ mmc_signal_sdio_irq(host);
+ mmc_release_host(host);
+
+ return ret;
+}
+
static const struct mmc_bus_ops mmc_sdio_ops = {
.remove = mmc_sdio_remove,
.detect = mmc_sdio_detect,
.suspend = mmc_sdio_suspend,
.resume = mmc_sdio_resume,
+ .power_restore = mmc_sdio_power_restore,
};
@@ -699,6 +718,18 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
card = host->card;
/*
+ * Let runtime PM core know our card is active
+ */
+ err = pm_runtime_set_active(&card->dev);
+ if (err)
+ goto remove;
+
+ /*
+ * Enable runtime PM for this card
+ */
+ pm_runtime_enable(&card->dev);
+
+ /*
* The number of functions on the card is encoded inside
* the ocr.
*/
@@ -712,6 +743,11 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
err = sdio_init_func(host->card, i + 1);
if (err)
goto remove;
+
+ /*
+ * Enable Runtime PM for this func
+ */
+ pm_runtime_enable(&card->sdio_func[i]->dev);
}
mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 4a890dcb95ab..2716c7ab6bbf 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
@@ -125,21 +126,46 @@ static int sdio_bus_probe(struct device *dev)
if (!id)
return -ENODEV;
+ /* Unbound SDIO functions are always suspended.
+ * During probe, the function is set active and the usage count
+ * is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto out;
+
/* Set the default block size so the driver is sure it's something
* sensible. */
sdio_claim_host(func);
ret = sdio_set_block_size(func, 0);
sdio_release_host(func);
if (ret)
- return ret;
+ goto disable_runtimepm;
+
+ ret = drv->probe(func, id);
+ if (ret)
+ goto disable_runtimepm;
- return drv->probe(func, id);
+ return 0;
+
+disable_runtimepm:
+ pm_runtime_put_noidle(dev);
+out:
+ return ret;
}
static int sdio_bus_remove(struct device *dev)
{
struct sdio_driver *drv = to_sdio_driver(dev->driver);
struct sdio_func *func = dev_to_sdio_func(dev);
+ int ret;
+
+ /* Make sure card is powered before invoking ->remove() */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto out;
drv->remove(func);
@@ -151,9 +177,63 @@ static int sdio_bus_remove(struct device *dev)
sdio_release_host(func);
}
+ /* First, undo the increment made directly above */
+ pm_runtime_put_noidle(dev);
+
+ /* Then undo the runtime PM settings in sdio_bus_probe() */
+ pm_runtime_put_noidle(dev);
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int sdio_bus_pm_prepare(struct device *dev)
+{
+ /*
+ * Resume an SDIO device which was suspended at run time at this
+ * point, in order to allow standard SDIO suspend/resume paths
+ * to keep working as usual.
+ *
+ * Ultimately, the SDIO driver itself will decide (in its
+ * suspend handler, or lack thereof) whether the card should be
+ * removed or kept, and if kept, at what power state.
+ *
+ * At this point, PM core have increased our use count, so it's
+ * safe to directly resume the device. After system is resumed
+ * again, PM core will drop back its runtime PM use count, and if
+ * needed device will be suspended again.
+ *
+ * The end result is guaranteed to be a power state that is
+ * coherent with the device's runtime PM use count.
+ *
+ * The return value of pm_runtime_resume is deliberately unchecked
+ * since there is little point in failing system suspend if a
+ * device can't be resumed.
+ */
+ pm_runtime_resume(dev);
+
return 0;
}
+static const struct dev_pm_ops sdio_bus_pm_ops = {
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
+ .prepare = sdio_bus_pm_prepare,
+};
+
+#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define SDIO_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
static struct bus_type sdio_bus_type = {
.name = "sdio",
.dev_attrs = sdio_dev_attrs,
@@ -161,6 +241,7 @@ static struct bus_type sdio_bus_type = {
.uevent = sdio_bus_uevent,
.probe = sdio_bus_probe,
.remove = sdio_bus_remove,
+ .pm = SDIO_PM_OPS_PTR,
};
int sdio_register_bus(void)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 68d12794cfd9..d618e8673996 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -130,6 +130,16 @@ config MMC_SDHCI_CNS3XXX
If unsure, say N.
+config MMC_SDHCI_ESDHC_IMX
+ bool "SDHCI platform support for the Freescale eSDHC i.MX controller"
+ depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5)
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Freescale eSDHC controller support on the platform
+ bus, found on platforms like mx35/51.
+
+ If unsure, say N.
+
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -145,6 +155,18 @@ config MMC_SDHCI_S3C
If unsure, say N.
+config MMC_SDHCI_PXA
+ tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support"
+ depends on ARCH_PXA || ARCH_MMP
+ select MMC_SDHCI
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller.
+ If you have a PXA168/PXA910/MMP2 platform with SD Host Controller
+ and a card slot, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_SPEAR
tristate "SDHCI support on ST SPEAr platform"
depends on MMC_SDHCI && PLAT_SPEAR
@@ -237,7 +259,7 @@ endchoice
config MMC_ATMELMCI_DMA
bool "Atmel MCI DMA support (EXPERIMENTAL)"
- depends on MMC_ATMELMCI && AVR32 && DMA_ENGINE && EXPERIMENTAL
+ depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL
help
Say Y here to have the Atmel MCI driver use a DMA engine to
do data transfers and thus increase the throughput and
@@ -395,6 +417,7 @@ config MMC_TMIO
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
+ select MISC_DEVICES
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
@@ -451,3 +474,17 @@ config MMC_JZ4740
SoCs.
If you have a board based on such a SoC and with a SD/MMC slot,
say Y or M here.
+
+config MMC_USHC
+ tristate "USB SD Host Controller (USHC) support"
+ depends on USB
+ help
+ This selects support for USB SD Host Controllers based on
+ the Cypress Astoria chip with firmware compliant with CSR's
+ USB SD Host Controller specification (CS-118793-SP).
+
+ CSR boards with this device include: USB<>SDIO (M1985v2),
+ and Ultrasira.
+
+ Note: These controllers only support SDIO cards and do not
+ support MMC or SD memory cards.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 840bcb52d82f..7b645ff43b30 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -2,16 +2,13 @@
# Makefile for MMC/SD host controller drivers
#
-ifeq ($(CONFIG_MMC_DEBUG),y)
- EXTRA_CFLAGS += -DDEBUG
-endif
-
obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
+obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -36,10 +33,12 @@ obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
+obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
sdhci-platform-y := sdhci-pltfm.o
sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 87226cd202a5..591ab540b407 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -928,7 +928,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
+ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME))
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
@@ -947,8 +947,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
mmc->max_blk_size = MCI_MAXBLKSIZE;
mmc->max_blk_count = MCI_BLKATONCE;
mmc->max_req_size = MCI_BUFSIZE;
- mmc->max_phys_segs = MCI_BLKATONCE;
- mmc->max_hw_segs = MCI_BLKATONCE;
+ mmc->max_segs = MCI_BLKATONCE;
mmc->max_seg_size = MCI_BUFSIZE;
host = mmc_priv(mmc);
@@ -1017,7 +1016,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* Map I/O region
*/
- host->baseaddr = ioremap(res->start, res->end - res->start + 1);
+ host->baseaddr = ioremap(res->start, resource_size(res));
if (!host->baseaddr) {
ret = -ENOMEM;
goto fail1;
@@ -1093,7 +1092,7 @@ fail4b:
fail5:
mmc_free_host(mmc);
fail6:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
dev_err(&pdev->dev, "probe failed, err %d\n", ret);
return ret;
}
@@ -1138,7 +1137,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
iounmap(host->baseaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
mmc_free_host(mmc);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 95ef864ad8f9..301351a5d838 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1618,8 +1618,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
if (slot_data->bus_width >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- mmc->max_hw_segs = 64;
- mmc->max_phys_segs = 64;
+ mmc->max_segs = 64;
mmc->max_req_size = 32768 * 512;
mmc->max_blk_size = 32768;
mmc->max_blk_count = 512;
@@ -1777,7 +1776,7 @@ static int __init atmci_probe(struct platform_device *pdev)
}
ret = -ENOMEM;
- host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ host->regs = ioremap(regs->start, resource_size(regs));
if (!host->regs)
goto err_ioremap;
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index c8da5d30a861..41e5a60493ad 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -964,7 +964,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
goto out1;
}
- host->ioarea = request_mem_region(r->start, r->end - r->start + 1,
+ host->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!host->ioarea) {
dev_err(&pdev->dev, "mmio already in use\n");
@@ -998,7 +998,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
mmc->f_max = 24000000;
mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
- mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
+ mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 512;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 4b0e677d7295..bac7d62866b7 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -469,7 +469,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
}
mmc->ops = &sdh_ops;
- mmc->max_phys_segs = 32;
+ mmc->max_segs = 32;
mmc->max_seg_size = 1 << 16;
mmc->max_blk_size = 1 << 11;
mmc->max_blk_count = 1 << 11;
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index ca3bdc831900..66b4ce587f4b 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -25,7 +25,7 @@ static const u8 cb710_src_freq_mhz[16] = {
50, 55, 60, 65, 70, 75, 80, 85
};
-static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
+static void cb710_mmc_select_clock_divider(struct mmc_host *mmc, int hz)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev;
@@ -33,8 +33,11 @@ static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
u32 divider_idx;
int src_hz;
- /* this is magic, unverifiable for me, unless I get
- * MMC card with cables connected to bus signals */
+ /* on CB710 in HP nx9500:
+ * src_freq_idx == 0
+ * indexes 1-7 work as written in the table
+ * indexes 0,8-15 give no clock output
+ */
pci_read_config_dword(pdev, 0x48, &src_freq_idx);
src_freq_idx = (src_freq_idx >> 16) & 0xF;
src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000;
@@ -46,13 +49,15 @@ static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
if (src_freq_idx)
divider_idx |= 0x8;
+ else if (divider_idx == 0)
+ divider_idx = 1;
cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28);
dev_dbg(cb710_slot_dev(slot),
- "clock set to %d Hz, wanted %d Hz; flag = %d\n",
+ "clock set to %d Hz, wanted %d Hz; src_freq_idx = %d, divider_idx = %d|%d\n",
src_hz >> cb710_clock_divider_log2[divider_idx & 7],
- hz, (divider_idx & 8) != 0);
+ hz, src_freq_idx, divider_idx & 7, divider_idx & 8);
}
static void __cb710_mmc_enable_irq(struct cb710_slot *slot,
@@ -95,16 +100,8 @@ static void cb710_mmc_reset_events(struct cb710_slot *slot)
cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF);
}
-static int cb710_mmc_is_card_inserted(struct cb710_slot *slot)
-{
- return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
- & CB710_MMC_S3_CARD_DETECTED;
-}
-
static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable)
{
- dev_dbg(cb710_slot_dev(slot), "configuring %d-data-line%s mode\n",
- enable ? 4 : 1, enable ? "s" : "");
if (enable)
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
CB710_MMC_C1_4BIT_DATA_BUS, 0);
@@ -494,13 +491,8 @@ static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
reader->mrq = mrq;
cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
- if (cb710_mmc_is_card_inserted(slot)) {
- if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
- cb710_mmc_command(mmc, mrq->stop);
- mdelay(1);
- } else {
- mrq->cmd->error = -ENOMEDIUM;
- }
+ if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
+ cb710_mmc_command(mmc, mrq->stop);
tasklet_schedule(&reader->finish_req_tasklet);
}
@@ -512,7 +504,7 @@ static int cb710_mmc_powerup(struct cb710_slot *slot)
#endif
int err;
- /* a lot of magic; see comment in cb710_mmc_set_clock() */
+ /* a lot of magic for now */
dev_dbg(cb710_slot_dev(slot), "bus powerup\n");
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
@@ -572,13 +564,7 @@ static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct cb710_mmc_reader *reader = mmc_priv(mmc);
int err;
- cb710_mmc_set_clock(mmc, ios->clock);
-
- if (!cb710_mmc_is_card_inserted(slot)) {
- dev_dbg(cb710_slot_dev(slot),
- "no card inserted - ignoring bus powerup request\n");
- ios->power_mode = MMC_POWER_OFF;
- }
+ cb710_mmc_select_clock_divider(mmc, ios->clock);
if (ios->power_mode != reader->last_power_mode)
switch (ios->power_mode) {
@@ -619,6 +605,14 @@ static int cb710_mmc_get_ro(struct mmc_host *mmc)
& CB710_MMC_S3_WRITE_PROTECTED;
}
+static int cb710_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+
+ return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
+ & CB710_MMC_S3_CARD_DETECTED;
+}
+
static int cb710_mmc_irq_handler(struct cb710_slot *slot)
{
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
@@ -664,7 +658,8 @@ static void cb710_mmc_finish_request_tasklet(unsigned long data)
static const struct mmc_host_ops cb710_mmc_host = {
.request = cb710_mmc_request,
.set_ios = cb710_mmc_set_ios,
- .get_ro = cb710_mmc_get_ro
+ .get_ro = cb710_mmc_get_ro,
+ .get_cd = cb710_mmc_get_cd,
};
#ifdef CONFIG_PM
@@ -746,6 +741,7 @@ static int __devinit cb710_mmc_init(struct platform_device *pdev)
err_free_mmc:
dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
+ cb710_set_irq_handler(slot, NULL);
mmc_free_host(mmc);
return err;
}
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 33d9f1b00862..e15547cf701f 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -138,7 +138,7 @@
/*
* One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
* and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
- * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
+ * for drivers with max_segs == 1, making the segments bigger (64KB)
* than the page or two that's otherwise typical. nr_sg (passed from
* platform data) == 16 gives at least the same throughput boost, using
* EDMA transfer linkage instead of spending CPU time copying pages.
@@ -1239,8 +1239,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
* Each hw_seg uses one EDMA parameter RAM slot, always one
* channel and then usually some linked slots.
*/
- mmc->max_hw_segs = 1 + host->n_link;
- mmc->max_phys_segs = mmc->max_hw_segs;
+ mmc->max_segs = 1 + host->n_link;
/* EDMA limit per hw segment (one or two MBytes) */
mmc->max_seg_size = MAX_CCNT * rw_threshold;
@@ -1250,8 +1249,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
mmc->max_blk_count = 65535; /* NBLK is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs);
- dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs);
+ dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 5a950b16d9e6..881f7ba545ae 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -966,8 +966,7 @@ static int __init imxmci_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_4_BIT_DATA;
/* MMC core transfer sizes tunable parameters */
- mmc->max_hw_segs = 64;
- mmc->max_phys_segs = 64;
+ mmc->max_segs = 64;
mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_blk_size = 2048;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index ad4f9870e3ca..b3a0ab0e4c2b 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -876,8 +876,7 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
mmc->max_blk_count = (1 << 15) - 1;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- mmc->max_phys_segs = 128;
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
mmc->max_seg_size = mmc->max_req_size;
host->mmc = mmc;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 62a35822003e..fd877f633dd2 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1055,6 +1055,8 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_spi_host *host = mmc_priv(mmc);
int status = -EINVAL;
+ int crc_retry = 5;
+ struct mmc_command stop;
#ifdef DEBUG
/* MMC core and layered drivers *MUST* issue SPI-aware commands */
@@ -1087,10 +1089,29 @@ static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
/* request exclusive bus access */
spi_bus_lock(host->spi->master);
+crc_recover:
/* issue command; then optionally data and stop */
status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
if (status == 0 && mrq->data) {
mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
+
+ /*
+ * The SPI bus is not always reliable for large data transfers.
+ * If an occasional crc error is reported by the SD device with
+ * data read/write over SPI, it may be recovered by repeating
+ * the last SD command again. The retry count is set to 5 to
+ * ensure the driver passes stress tests.
+ */
+ if (mrq->data->error == -EILSEQ && crc_retry) {
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ status = mmc_spi_command_send(host, mrq, &stop, 0);
+ crc_retry--;
+ mrq->data->error = 0;
+ goto crc_recover;
+ }
+
if (mrq->stop)
status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
else
@@ -1345,8 +1366,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->ops = &mmc_spi_ops;
mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
- mmc->max_hw_segs = MMC_SPI_BLOCKSATONCE;
- mmc->max_phys_segs = MMC_SPI_BLOCKSATONCE;
+ mmc->max_segs = MMC_SPI_BLOCKSATONCE;
mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f2e02d7d9f3d..87b4fc6c98c2 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -523,19 +523,27 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmci_host *host = mmc_priv(mmc);
u32 pwr = 0;
unsigned long flags;
+ int ret;
switch (ios->power_mode) {
case MMC_POWER_OFF:
- if(host->vcc &&
- regulator_is_enabled(host->vcc))
- regulator_disable(host->vcc);
+ if (host->vcc)
+ ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
break;
case MMC_POWER_UP:
-#ifdef CONFIG_REGULATOR
- if (host->vcc)
- /* This implicitly enables the regulator */
- mmc_regulator_set_ocr(host->vcc, ios->vdd);
-#endif
+ if (host->vcc) {
+ ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "unable to set OCR\n");
+ /*
+ * The .set_ios() function in the mmc_host_ops
+ * struct return void, and failing to set the
+ * power should be rare so we print an error
+ * and return here.
+ */
+ return;
+ }
+ }
if (host->plat->vdd_handler)
pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
ios->power_mode);
@@ -734,8 +742,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
/*
* We can do SGIO
*/
- mmc->max_hw_segs = 16;
- mmc->max_phys_segs = NR_SG;
+ mmc->max_segs = NR_SG;
/*
* Since only a certain number of bits are valid in the data length
@@ -870,8 +877,8 @@ static int __devexit mmci_remove(struct amba_device *dev)
clk_disable(host->clk);
clk_put(host->clk);
- if (regulator_is_enabled(host->vcc))
- regulator_disable(host->vcc);
+ if (host->vcc)
+ mmc_regulator_set_ocr(mmc, host->vcc, 0);
regulator_put(host->vcc);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index ff7752348b11..1290d14c5839 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1164,8 +1164,7 @@ msmsdcc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_SDIO_IRQ;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
- mmc->max_phys_segs = NR_SG;
- mmc->max_hw_segs = NR_SG;
+ mmc->max_segs = NR_SG;
mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
mmc->max_blk_count = 65536;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 366eefa77c5a..a5bf60e01af4 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -742,8 +742,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
- mmc->max_hw_segs = 1;
- mmc->max_phys_segs = 1;
+ mmc->max_segs = 1;
mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 350f78e86245..bdd2cbb87cba 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -790,8 +790,7 @@ static int mxcmci_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
/* MMC core transfer sizes tunable parameters */
- mmc->max_hw_segs = 64;
- mmc->max_phys_segs = 64;
+ mmc->max_segs = 64;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index d98ddcfac5e5..0c7e37f496ef 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1335,8 +1335,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
* NOTE max_seg_size assumption that small blocks aren't
* normally used (except e.g. for reading SD registers).
*/
- mmc->max_phys_segs = 32;
- mmc->max_hw_segs = 32;
+ mmc->max_segs = 32;
mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4693e62145a6..e865032a52eb 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -250,9 +250,9 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
if (power_on)
- ret = mmc_regulator_set_ocr(host->vcc, vdd);
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
else
- ret = mmc_regulator_set_ocr(host->vcc, 0);
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
if (mmc_slot(host).after_set_reg)
mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
@@ -291,18 +291,23 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
* chips/cards need an interface voltage rail too.
*/
if (power_on) {
- ret = mmc_regulator_set_ocr(host->vcc, vdd);
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
/* Enable interface voltage rail, if needed */
if (ret == 0 && host->vcc_aux) {
ret = regulator_enable(host->vcc_aux);
if (ret < 0)
- ret = mmc_regulator_set_ocr(host->vcc, 0);
+ ret = mmc_regulator_set_ocr(host->mmc,
+ host->vcc, 0);
}
} else {
+ /* Shut down the rail */
if (host->vcc_aux)
ret = regulator_disable(host->vcc_aux);
- if (ret == 0)
- ret = mmc_regulator_set_ocr(host->vcc, 0);
+ if (!ret) {
+ /* Then proceed to shut down the local regulator */
+ ret = mmc_regulator_set_ocr(host->mmc,
+ host->vcc, 0);
+ }
}
if (mmc_slot(host).after_set_reg)
@@ -343,9 +348,9 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
if (cardsleep) {
/* VCC can be turned off if card is asleep */
if (sleep)
- err = mmc_regulator_set_ocr(host->vcc, 0);
+ err = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
else
- err = mmc_regulator_set_ocr(host->vcc, vdd);
+ err = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
} else
err = regulator_set_mode(host->vcc, mode);
if (err)
@@ -2130,8 +2135,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
/* Since we do only SG emulation, we can have as many segs
* as we want. */
- mmc->max_phys_segs = 1024;
- mmc->max_hw_segs = 1024;
+ mmc->max_segs = 1024;
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0a4e43f37140..7257738fd7da 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -99,14 +99,25 @@ static inline void pxamci_init_ocr(struct pxamci_host *host)
}
}
-static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
+static inline int pxamci_set_power(struct pxamci_host *host,
+ unsigned char power_mode,
+ unsigned int vdd)
{
int on;
-#ifdef CONFIG_REGULATOR
- if (host->vcc)
- mmc_regulator_set_ocr(host->vcc, vdd);
-#endif
+ if (host->vcc) {
+ int ret;
+
+ if (power_mode == MMC_POWER_UP) {
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+ if (ret)
+ return ret;
+ } else if (power_mode == MMC_POWER_OFF) {
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+ if (ret)
+ return ret;
+ }
+ }
if (!host->vcc && host->pdata &&
gpio_is_valid(host->pdata->gpio_power)) {
on = ((1 << vdd) & host->pdata->ocr_mask);
@@ -115,6 +126,8 @@ static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
}
if (!host->vcc && host->pdata && host->pdata->setpower)
host->pdata->setpower(mmc_dev(host->mmc), vdd);
+
+ return 0;
}
static void pxamci_stop_clock(struct pxamci_host *host)
@@ -490,9 +503,21 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (host->power_mode != ios->power_mode) {
+ int ret;
+
host->power_mode = ios->power_mode;
- pxamci_set_power(host, ios->vdd);
+ ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "unable to set power\n");
+ /*
+ * The .set_ios() function in the mmc_host_ops
+ * struct return void, and failing to set the
+ * power should be rare so we print an error and
+ * return here.
+ */
+ return;
+ }
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMDAT_INIT;
@@ -503,8 +528,8 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
host->cmdat &= ~CMDAT_SD_4DAT;
- pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
- host->clkrt, host->cmdat);
+ dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
+ host->clkrt, host->cmdat);
}
static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
@@ -576,7 +601,7 @@ static int pxamci_probe(struct platform_device *pdev)
* We can do SG-DMA, but we don't because we never know how much
* data we successfully wrote to the card.
*/
- mmc->max_phys_segs = NR_SG;
+ mmc->max_segs = NR_SG;
/*
* Our hardware DMA can handle a maximum of one page per SG entry.
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 976330de379e..1ccd4b256cee 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1736,8 +1736,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
mmc->max_req_size = 4095 * 512;
mmc->max_seg_size = mmc->max_req_size;
- mmc->max_phys_segs = 128;
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
dbg(host, dbg_debug,
"probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n",
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index b7050b380d5f..9ebd1d7759dc 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
-#include <linux/sdhci-pltfm.h>
+#include <linux/mmc/sdhci-pltfm.h>
#include <mach/cns3xxx.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
new file mode 100644
index 000000000000..2e9cca19c90b
--- /dev/null
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -0,0 +1,143 @@
+/*
+ * Freescale eSDHC i.MX controller driver for the platform bus.
+ *
+ * derived from the OF-version.
+ *
+ * Copyright (c) 2010 Pengutronix e.K.
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdhci-pltfm.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+#include "sdhci-esdhc.h"
+
+static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
+{
+ void __iomem *base = host->ioaddr + (reg & ~0x3);
+ u32 shift = (reg & 0x3) * 8;
+
+ writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
+}
+
+static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
+{
+ if (unlikely(reg == SDHCI_HOST_VERSION))
+ reg ^= 2;
+
+ return readw(host->ioaddr + reg);
+}
+
+static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ pltfm_host->scratchpad = val;
+ return;
+ case SDHCI_COMMAND:
+ writel(val << 16 | pltfm_host->scratchpad,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ return;
+ case SDHCI_BLOCK_SIZE:
+ val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+ break;
+ }
+ esdhc_clrset_le(host, 0xffff, val, reg);
+}
+
+static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 new_val;
+
+ switch (reg) {
+ case SDHCI_POWER_CONTROL:
+ /*
+ * FSL put some DMA bits here
+ * If your board has a regulator, code should be here
+ */
+ return;
+ case SDHCI_HOST_CONTROL:
+ /* FSL messed up here, so we can just keep those two */
+ new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS);
+ /* ensure the endianess */
+ new_val |= ESDHC_HOST_CONTROL_LE;
+ /* DMA mode bits are shifted */
+ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
+
+ esdhc_clrset_le(host, 0xffff, new_val, reg);
+ return;
+ }
+ esdhc_clrset_le(host, 0xff, val, reg);
+}
+
+static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+
+static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk) / 256 / 16;
+}
+
+static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct clk *clk;
+
+ clk = clk_get(mmc_dev(host->mmc), NULL);
+ if (IS_ERR(clk)) {
+ dev_err(mmc_dev(host->mmc), "clk err\n");
+ return PTR_ERR(clk);
+ }
+ clk_enable(clk);
+ pltfm_host->clk = clk;
+
+ return 0;
+}
+
+static void esdhc_pltfm_exit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_w = esdhc_readw_le,
+ .write_w = esdhc_writew_le,
+ .write_b = esdhc_writeb_le,
+ .set_clock = esdhc_set_clock,
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+};
+
+struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_MULTIBLOCK
+ | SDHCI_QUIRK_BROKEN_ADMA,
+ /* ADMA has issues. Might be fixable */
+ /* NO_MULTIBLOCK might be MX35 only (Errata: ENGcm07207) */
+ .ops = &sdhci_esdhc_ops,
+ .init = esdhc_pltfm_init,
+ .exit = esdhc_pltfm_exit,
+};
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
new file mode 100644
index 000000000000..afaf1bc4913a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -0,0 +1,83 @@
+/*
+ * Freescale eSDHC controller driver generics for OF and pltfm.
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Copyright (c) 2010 Pengutronix e.K.
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#ifndef _DRIVERS_MMC_SDHCI_ESDHC_H
+#define _DRIVERS_MMC_SDHCI_ESDHC_H
+
+/*
+ * Ops and quirks for the Freescale eSDHC controller.
+ */
+
+#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION | \
+ SDHCI_QUIRK_NO_BUSY_IRQ | \
+ SDHCI_QUIRK_NONSTANDARD_CLOCK | \
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
+ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+ SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \
+ SDHCI_QUIRK_NO_CARD_NO_RESET)
+
+#define ESDHC_SYSTEM_CONTROL 0x2c
+#define ESDHC_CLOCK_MASK 0x0000fff0
+#define ESDHC_PREDIV_SHIFT 8
+#define ESDHC_DIVIDER_SHIFT 4
+#define ESDHC_CLOCK_PEREN 0x00000004
+#define ESDHC_CLOCK_HCKEN 0x00000002
+#define ESDHC_CLOCK_IPGEN 0x00000001
+
+/* pltfm-specific */
+#define ESDHC_HOST_CONTROL_LE 0x20
+
+/* OF-specific */
+#define ESDHC_DMA_SYSCTL 0x40c
+#define ESDHC_DMA_SNOOP 0x00000040
+
+#define ESDHC_HOST_CONTROL_RES 0x05
+
+static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int pre_div = 2;
+ int div = 1;
+ u32 temp;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ if (clock == 0)
+ goto out;
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ mdelay(100);
+out:
+ host->clock = clock;
+}
+
+#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index c8623de13af3..fcd0e1fcba44 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -18,23 +18,7 @@
#include <linux/mmc/host.h>
#include "sdhci-of.h"
#include "sdhci.h"
-
-/*
- * Ops and quirks for the Freescale eSDHC controller.
- */
-
-#define ESDHC_DMA_SYSCTL 0x40c
-#define ESDHC_DMA_SNOOP 0x00000040
-
-#define ESDHC_SYSTEM_CONTROL 0x2c
-#define ESDHC_CLOCK_MASK 0x0000fff0
-#define ESDHC_PREDIV_SHIFT 8
-#define ESDHC_DIVIDER_SHIFT 4
-#define ESDHC_CLOCK_PEREN 0x00000004
-#define ESDHC_CLOCK_HCKEN 0x00000002
-#define ESDHC_CLOCK_IPGEN 0x00000001
-
-#define ESDHC_HOST_CONTROL_RES 0x05
+#include "sdhci-esdhc.h"
static u16 esdhc_readw(struct sdhci_host *host, int reg)
{
@@ -68,51 +52,20 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
sdhci_be32bs_writeb(host, val, reg);
}
-static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
-{
- int pre_div = 2;
- int div = 1;
-
- clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
-
- if (clock == 0)
- goto out;
-
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
- pre_div *= 2;
-
- while (host->max_clk / pre_div / div > clock && div < 16)
- div++;
-
- dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
-
- pre_div >>= 1;
- div--;
-
- setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
- div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
- mdelay(100);
-out:
- host->clock = clock;
-}
-
-static int esdhc_enable_dma(struct sdhci_host *host)
+static int esdhc_of_enable_dma(struct sdhci_host *host)
{
setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
return 0;
}
-static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
+static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
{
struct sdhci_of_host *of_host = sdhci_priv(host);
return of_host->clock;
}
-static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
+static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
{
struct sdhci_of_host *of_host = sdhci_priv(host);
@@ -120,14 +73,7 @@ static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
}
struct sdhci_of_data sdhci_esdhc = {
- .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
- SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_NO_BUSY_IRQ |
- SDHCI_QUIRK_NONSTANDARD_CLOCK |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_PIO_NEEDS_DELAY |
- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
- SDHCI_QUIRK_NO_CARD_NO_RESET,
+ .quirks = ESDHC_DEFAULT_QUIRKS,
.ops = {
.read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw,
@@ -136,8 +82,8 @@ struct sdhci_of_data sdhci_esdhc = {
.write_w = esdhc_writew,
.write_b = esdhc_writeb,
.set_clock = esdhc_set_clock,
- .enable_dma = esdhc_enable_dma,
- .get_max_clock = esdhc_get_max_clock,
- .get_min_clock = esdhc_get_min_clock,
+ .enable_dma = esdhc_of_enable_dma,
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
},
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index e8aa99deae9a..55746bac2f44 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -145,6 +145,37 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
+/*
+ * ADMA operation is disabled for Moorestown platform due to
+ * hardware bugs.
+ */
+static int mrst_hc1_probe(struct sdhci_pci_chip *chip)
+{
+ /*
+ * slots number is fixed here for MRST as SDIO3 is never used and has
+ * hardware bugs.
+ */
+ chip->num_slots = 1;
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
+ .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1 = {
+ .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+ .probe = mrst_hc1_probe,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+};
+
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -494,6 +525,62 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = (kernel_ulong_t)&sdhci_via,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
@@ -818,6 +905,8 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
goto free;
}
+ slots = chip->num_slots; /* Quirk may have changed this */
+
for (i = 0;i < slots;i++) {
slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
if (IS_ERR(slot)) {
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index e045e3c61dde..0502f89f662b 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -30,7 +30,7 @@
#include <linux/mmc/host.h>
#include <linux/io.h>
-#include <linux/sdhci-pltfm.h>
+#include <linux/mmc/sdhci-pltfm.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -52,14 +52,17 @@ static struct sdhci_ops sdhci_pltfm_ops = {
static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
{
- struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct sdhci_pltfm_data *pdata;
struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
struct resource *iomem;
int ret;
- if (!pdata && platid && platid->driver_data)
+ if (platid && platid->driver_data)
pdata = (void *)platid->driver_data;
+ else
+ pdata = pdev->dev.platform_data;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
@@ -71,16 +74,19 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
- if (pdev->dev.parent)
- host = sdhci_alloc_host(pdev->dev.parent, 0);
+ /* Some PCI-based MFD need the parent here */
+ if (pdev->dev.parent != &platform_bus)
+ host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host));
else
- host = sdhci_alloc_host(&pdev->dev, 0);
+ host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host));
if (IS_ERR(host)) {
ret = PTR_ERR(host);
goto err;
}
+ pltfm_host = sdhci_priv(host);
+
host->hw_name = "platform";
if (pdata && pdata->ops)
host->ops = pdata->ops;
@@ -105,7 +111,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
}
if (pdata && pdata->init) {
- ret = pdata->init(host);
+ ret = pdata->init(host, pdata);
if (ret)
goto err_plat_init;
}
@@ -161,10 +167,32 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
#ifdef CONFIG_MMC_SDHCI_CNS3XXX
{ "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata },
#endif
+#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
+ { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
+#endif
{ },
};
MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
+#ifdef CONFIG_PM
+static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_suspend_host(host, state);
+}
+
+static int sdhci_pltfm_resume(struct platform_device *dev)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_resume_host(host);
+}
+#else
+#define sdhci_pltfm_suspend NULL
+#define sdhci_pltfm_resume NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver sdhci_pltfm_driver = {
.driver = {
.name = "sdhci",
@@ -173,6 +201,8 @@ static struct platform_driver sdhci_pltfm_driver = {
.probe = sdhci_pltfm_probe,
.remove = __devexit_p(sdhci_pltfm_remove),
.id_table = sdhci_pltfm_ids,
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
};
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 900f32902f73..c1bfe48af56a 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -11,8 +11,16 @@
#ifndef _DRIVERS_MMC_SDHCI_PLTFM_H
#define _DRIVERS_MMC_SDHCI_PLTFM_H
-#include <linux/sdhci-pltfm.h>
+#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/mmc/sdhci-pltfm.h>
+
+struct sdhci_pltfm_host {
+ struct clk *clk;
+ u32 scratchpad; /* to handle quirks across io-accessor calls */
+};
extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
+extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
new file mode 100644
index 000000000000..fc406ac5d193
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxa.c
@@ -0,0 +1,253 @@
+/* linux/drivers/mmc/host/sdhci-pxa.c
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Mingwei Wang <mwwang@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Supports:
+ * SDHCI support for MMP2/PXA910/PXA168
+ *
+ * Refer to sdhci-s3c.c.
+ */
+
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <plat/sdhci.h>
+#include "sdhci.h"
+
+#define DRIVER_NAME "sdhci-pxa"
+
+#define SD_FIFO_PARAM 0x104
+#define DIS_PAD_SD_CLK_GATE 0x400
+
+struct sdhci_pxa {
+ struct sdhci_host *host;
+ struct sdhci_pxa_platdata *pdata;
+ struct clk *clk;
+ struct resource *res;
+
+ u8 clk_enable;
+};
+
+/*****************************************************************************\
+ * *
+ * SDHCI core callbacks *
+ * *
+\*****************************************************************************/
+static void set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pxa *pxa = sdhci_priv(host);
+ u32 tmp = 0;
+
+ if (clock == 0) {
+ if (pxa->clk_enable) {
+ clk_disable(pxa->clk);
+ pxa->clk_enable = 0;
+ }
+ } else {
+ if (0 == pxa->clk_enable) {
+ if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) {
+ tmp = readl(host->ioaddr + SD_FIFO_PARAM);
+ tmp |= DIS_PAD_SD_CLK_GATE;
+ writel(tmp, host->ioaddr + SD_FIFO_PARAM);
+ }
+ clk_enable(pxa->clk);
+ pxa->clk_enable = 1;
+ }
+ }
+}
+
+static struct sdhci_ops sdhci_pxa_ops = {
+ .set_clock = set_clock,
+};
+
+/*****************************************************************************\
+ * *
+ * Device probing/removal *
+ * *
+\*****************************************************************************/
+
+static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
+{
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ struct resource *iomem = NULL;
+ struct sdhci_pxa *pxa = NULL;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq specified\n");
+ return irq;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ dev_err(dev, "no memory specified\n");
+ return -ENOENT;
+ }
+
+ host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa));
+ if (IS_ERR(host)) {
+ dev_err(dev, "failed to alloc host\n");
+ return PTR_ERR(host);
+ }
+
+ pxa = sdhci_priv(host);
+ pxa->host = host;
+ pxa->pdata = pdata;
+ pxa->clk_enable = 0;
+
+ pxa->clk = clk_get(dev, "PXA-SDHCLK");
+ if (IS_ERR(pxa->clk)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(pxa->clk);
+ goto out;
+ }
+
+ pxa->res = request_mem_region(iomem->start, resource_size(iomem),
+ mmc_hostname(host->mmc));
+ if (!pxa->res) {
+ dev_err(&pdev->dev, "cannot request region\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ if (!host->ioaddr) {
+ dev_err(&pdev->dev, "failed to remap registers\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->hw_name = "MMC";
+ host->ops = &sdhci_pxa_ops;
+ host->irq = irq;
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto out;
+ }
+
+ if (pxa->pdata->max_speed)
+ host->mmc->f_max = pxa->pdata->max_speed;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+out:
+ if (host) {
+ clk_put(pxa->clk);
+ if (host->ioaddr)
+ iounmap(host->ioaddr);
+ if (pxa->res)
+ release_mem_region(pxa->res->start,
+ resource_size(pxa->res));
+ sdhci_free_host(host);
+ }
+
+ return ret;
+}
+
+static int __devexit sdhci_pxa_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pxa *pxa = sdhci_priv(host);
+ int dead = 0;
+ u32 scratch;
+
+ if (host) {
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ if (scratch == (u32)-1)
+ dead = 1;
+
+ sdhci_remove_host(host, dead);
+
+ if (host->ioaddr)
+ iounmap(host->ioaddr);
+ if (pxa->res)
+ release_mem_region(pxa->res->start,
+ resource_size(pxa->res));
+ if (pxa->clk_enable) {
+ clk_disable(pxa->clk);
+ pxa->clk_enable = 0;
+ }
+ clk_put(pxa->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_suspend_host(host, state);
+}
+
+static int sdhci_pxa_resume(struct platform_device *dev)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ return sdhci_resume_host(host);
+}
+#else
+#define sdhci_pxa_suspend NULL
+#define sdhci_pxa_resume NULL
+#endif
+
+static struct platform_driver sdhci_pxa_driver = {
+ .probe = sdhci_pxa_probe,
+ .remove = __devexit_p(sdhci_pxa_remove),
+ .suspend = sdhci_pxa_suspend,
+ .resume = sdhci_pxa_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*****************************************************************************\
+ * *
+ * Driver init/exit *
+ * *
+\*****************************************************************************/
+
+static int __init sdhci_pxa_init(void)
+{
+ return platform_driver_register(&sdhci_pxa_driver);
+}
+
+static void __exit sdhci_pxa_exit(void)
+{
+ platform_driver_unregister(&sdhci_pxa_driver);
+}
+
+module_init(sdhci_pxa_init);
+module_exit(sdhci_pxa_exit);
+
+MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2");
+MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 401527d273b5..782c0ee3c925 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -47,7 +47,8 @@ static void sdhci_finish_command(struct sdhci_host *);
static void sdhci_dumpregs(struct sdhci_host *host)
{
- printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
+ printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ mmc_hostname(host->mmc));
printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
@@ -1001,13 +1002,28 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 0)
goto out;
- for (div = 1;div < 256;div *= 2) {
- if ((host->max_clk / div) <= clock)
- break;
+ if (host->version >= SDHCI_SPEC_300) {
+ /* Version 3.00 divisors must be a multiple of 2. */
+ if (host->max_clk <= clock)
+ div = 1;
+ else {
+ for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
+ if ((host->max_clk / div) <= clock)
+ break;
+ }
+ }
+ } else {
+ /* Version 2.00 divisors must be a power of 2. */
+ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
+ if ((host->max_clk / div) <= clock)
+ break;
+ }
}
div >>= 1;
- clk = div << SDHCI_DIVIDER_SHIFT;
+ clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
+ clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
+ << SDHCI_DIVIDER_HI_SHIFT;
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1034,11 +1050,9 @@ out:
static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
- u8 pwr;
+ u8 pwr = 0;
- if (power == (unsigned short)-1)
- pwr = 0;
- else {
+ if (power != (unsigned short)-1) {
switch (1 << power) {
case MMC_VDD_165_195:
pwr = SDHCI_POWER_180;
@@ -1168,6 +1182,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
sdhci_set_power(host, ios->vdd);
+ if (host->ops->platform_send_init_74_clocks)
+ host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (ios->bus_width == MMC_BUS_WIDTH_8)
@@ -1180,8 +1197,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
- if (ios->timing == MMC_TIMING_SD_HS &&
- !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+ if ((ios->timing == MMC_TIMING_SD_HS ||
+ ios->timing == MMC_TIMING_MMC_HS)
+ && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
ctrl |= SDHCI_CTRL_HISPD;
else
ctrl &= ~SDHCI_CTRL_HISPD;
@@ -1205,22 +1223,25 @@ static int sdhci_get_ro(struct mmc_host *mmc)
{
struct sdhci_host *host;
unsigned long flags;
- int present;
+ int is_readonly;
host = mmc_priv(mmc);
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
- present = 0;
+ is_readonly = 0;
+ else if (host->ops->get_ro)
+ is_readonly = host->ops->get_ro(host);
else
- present = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
+ & SDHCI_WRITE_PROTECT);
spin_unlock_irqrestore(&host->lock, flags);
- if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
- return !!(present & SDHCI_WRITE_PROTECT);
- return !(present & SDHCI_WRITE_PROTECT);
+ /* This quirk needs to be replaced by a callback-function later */
+ return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
+ !is_readonly : is_readonly;
}
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1427,7 +1448,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
sdhci_finish_command(host);
}
-#ifdef DEBUG
+#ifdef CONFIG_MMC_DEBUG
static void sdhci_show_adma_error(struct sdhci_host *host)
{
const char *name = mmc_hostname(host->mmc);
@@ -1708,7 +1729,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
host->version = (host->version & SDHCI_SPEC_VER_MASK)
>> SDHCI_SPEC_VER_SHIFT;
- if (host->version > SDHCI_SPEC_200) {
+ if (host->version > SDHCI_SPEC_300) {
printk(KERN_ERR "%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
host->version);
@@ -1779,8 +1800,13 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
}
- host->max_clk =
- (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
+ if (host->version >= SDHCI_SPEC_300)
+ host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK)
+ >> SDHCI_CLOCK_BASE_SHIFT;
+ else
+ host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK)
+ >> SDHCI_CLOCK_BASE_SHIFT;
+
host->max_clk *= 1000000;
if (host->max_clk == 0 || host->quirks &
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
@@ -1815,18 +1841,21 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->ops = &sdhci_ops;
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
+ else if (host->version >= SDHCI_SPEC_300)
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
else
- mmc->f_min = host->max_clk / 256;
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
mmc->f_max = host->max_clk;
mmc->caps |= MMC_CAP_SDIO_IRQ;
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
- mmc->caps |= MMC_CAP_4_BIT_DATA;
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
if (caps & SDHCI_CAN_DO_HISPD)
- mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
+ mmc_card_is_removable(mmc))
mmc->caps |= MMC_CAP_NEEDS_POLL;
mmc->ocr_avail = 0;
@@ -1850,12 +1879,11 @@ int sdhci_add_host(struct sdhci_host *host)
* can do scatter/gather or not.
*/
if (host->flags & SDHCI_USE_ADMA)
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
else if (host->flags & SDHCI_USE_SDMA)
- mmc->max_hw_segs = 1;
+ mmc->max_segs = 1;
else /* PIO */
- mmc->max_hw_segs = 128;
- mmc->max_phys_segs = 128;
+ mmc->max_segs = 128;
/*
* Maximum number of sectors in one transfer. Limited by DMA boundary
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d316bc79b636..b7b8a3b28b01 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -1,6 +1,8 @@
/*
* linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver
*
+ * Header file for Host Controller registers and I/O accessors.
+ *
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -8,14 +10,16 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
-#ifndef __SDHCI_H
-#define __SDHCI_H
+#ifndef __SDHCI_HW_H
+#define __SDHCI_HW_H
#include <linux/scatterlist.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/mmc/sdhci.h>
+
/*
* Controller registers
*/
@@ -86,6 +90,10 @@
#define SDHCI_CLOCK_CONTROL 0x2C
#define SDHCI_DIVIDER_SHIFT 8
+#define SDHCI_DIVIDER_HI_SHIFT 6
+#define SDHCI_DIV_MASK 0xFF
+#define SDHCI_DIV_MASK_LEN 8
+#define SDHCI_DIV_HI_MASK 0x300
#define SDHCI_CLOCK_CARD_EN 0x0004
#define SDHCI_CLOCK_INT_STABLE 0x0002
#define SDHCI_CLOCK_INT_EN 0x0001
@@ -140,6 +148,7 @@
#define SDHCI_TIMEOUT_CLK_SHIFT 0
#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
#define SDHCI_CLOCK_BASE_MASK 0x00003F00
+#define SDHCI_CLOCK_V3_BASE_MASK 0x0000FF00
#define SDHCI_CLOCK_BASE_SHIFT 8
#define SDHCI_MAX_BLOCK_MASK 0x00030000
#define SDHCI_MAX_BLOCK_SHIFT 16
@@ -178,134 +187,14 @@
#define SDHCI_SPEC_VER_SHIFT 0
#define SDHCI_SPEC_100 0
#define SDHCI_SPEC_200 1
+#define SDHCI_SPEC_300 2
-struct sdhci_ops;
-
-struct sdhci_host {
- /* Data set by hardware interface driver */
- const char *hw_name; /* Hardware bus name */
-
- unsigned int quirks; /* Deviations from spec. */
-
-/* Controller doesn't honor resets unless we touch the clock register */
-#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
-/* Controller has bad caps bits, but really supports DMA */
-#define SDHCI_QUIRK_FORCE_DMA (1<<1)
-/* Controller doesn't like to be reset when there is no card inserted. */
-#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
-/* Controller doesn't like clearing the power reg before a change */
-#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
-/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
-/* Controller has an unusable DMA engine */
-#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
-/* Controller has an unusable ADMA engine */
-#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
-/* Controller can only DMA from 32-bit aligned addresses */
-#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
-/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
-/* Controller can only ADMA chunks that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
-/* Controller needs to be reset after each request to stay stable */
-#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
-/* Controller needs voltage and power writes to happen separately */
-#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
-/* Controller provides an incorrect timeout value for transfers */
-#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
-/* Controller has an issue with buffer bits for small transfers */
-#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
-/* Controller does not provide transfer-complete interrupt when not busy */
-#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
-/* Controller has unreliable card detection */
-#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
-/* Controller reports inverted write-protect state */
-#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
-/* Controller has nonstandard clock management */
-#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
-/* Controller does not like fast PIO transfers */
-#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
-/* Controller losing signal/interrupt enable states after reset */
-#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
-/* Controller has to be forced to use block size of 2048 bytes */
-#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
-/* Controller cannot do multi-block transfers */
-#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
-/* Controller can only handle 1-bit data transfers */
-#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
-/* Controller needs 10ms delay between applying power and clock */
-#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
-/* Controller uses SDCLK instead of TMCLK for data timeouts */
-#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
-/* Controller reports wrong base clock capability */
-#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
-/* Controller cannot support End Attribute in NOP ADMA descriptor */
-#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
-/* Controller is missing device caps. Use caps provided by host */
-#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
-/* Controller uses Auto CMD12 command to stop the transfer */
-#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
-/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
-#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
-
- int irq; /* Device IRQ */
- void __iomem * ioaddr; /* Mapped address */
-
- const struct sdhci_ops *ops; /* Low level hw interface */
-
- struct regulator *vmmc; /* Power regulator */
-
- /* Internal data */
- struct mmc_host *mmc; /* MMC structure */
- u64 dma_mask; /* custom DMA mask */
-
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
- struct led_classdev led; /* LED control */
- char led_name[32];
-#endif
-
- spinlock_t lock; /* Mutex */
-
- int flags; /* Host attributes */
-#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
-#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
-#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
-#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
-
- unsigned int version; /* SDHCI spec. version */
-
- unsigned int max_clk; /* Max possible freq (MHz) */
- unsigned int timeout_clk; /* Timeout freq (KHz) */
-
- unsigned int clock; /* Current clock (MHz) */
- u8 pwr; /* Current voltage */
-
- struct mmc_request *mrq; /* Current request */
- struct mmc_command *cmd; /* Current command */
- struct mmc_data *data; /* Current data request */
- unsigned int data_early:1; /* Data finished before cmd */
-
- struct sg_mapping_iter sg_miter; /* SG state for PIO */
- unsigned int blocks; /* remaining PIO blocks */
-
- int sg_count; /* Mapped sg entries */
-
- u8 *adma_desc; /* ADMA descriptor table */
- u8 *align_buffer; /* Bounce buffer */
-
- dma_addr_t adma_addr; /* Mapped ADMA descr. table */
- dma_addr_t align_addr; /* Mapped bounce buffer */
-
- struct tasklet_struct card_tasklet; /* Tasklet structures */
- struct tasklet_struct finish_tasklet;
-
- struct timer_list timer; /* Timer for timeouts */
-
- unsigned int caps; /* Alternative capabilities */
-
- unsigned long private[0] ____cacheline_aligned;
-};
+/*
+ * End of controller registers.
+ */
+#define SDHCI_MAX_DIV_SPEC_200 256
+#define SDHCI_MAX_DIV_SPEC_300 2046
struct sdhci_ops {
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -323,6 +212,9 @@ struct sdhci_ops {
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
+ void (*platform_send_init_74_clocks)(struct sdhci_host *host,
+ u8 power_mode);
+ unsigned int (*get_ro)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -427,4 +319,4 @@ extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
extern int sdhci_resume_host(struct sdhci_host *host);
#endif
-#endif /* __SDHCI_H */
+#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5d3f824bb5a3..0f06b8002814 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -846,8 +846,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_MMC_HIGHSPEED;
if (pd->caps)
mmc->caps |= pd->caps;
- mmc->max_phys_segs = 128;
- mmc->max_hw_segs = 128;
+ mmc->max_segs = 128;
mmc->max_blk_size = 512;
mmc->max_blk_count = 65535;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index cec99958b652..457c26ea09de 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -978,11 +978,10 @@ static int tifm_sd_probe(struct tifm_dev *sock)
mmc->f_max = 24000000;
mmc->max_blk_count = 2048;
- mmc->max_hw_segs = mmc->max_blk_count;
+ mmc->max_segs = mmc->max_blk_count;
mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
mmc->max_req_size = mmc->max_seg_size;
- mmc->max_phys_segs = mmc->max_hw_segs;
sock->card_event = tifm_sd_card_event;
sock->data_event = tifm_sd_data_event;
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
new file mode 100644
index 000000000000..b4ead4a13c98
--- /dev/null
+++ b/drivers/mmc/host/ushc.c
@@ -0,0 +1,566 @@
+/*
+ * USB SD Host Controller (USHC) controller driver.
+ *
+ * Copyright (C) 2010 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * Notes:
+ * - Only version 2 devices are supported.
+ * - Version 2 devices only support SDIO cards/devices (R2 response is
+ * unsupported).
+ *
+ * References:
+ * [USHC] USB SD Host Controller specification (CS-118793-SP)
+ */
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+
+enum ushc_request {
+ USHC_GET_CAPS = 0x00,
+ USHC_HOST_CTRL = 0x01,
+ USHC_PWR_CTRL = 0x02,
+ USHC_CLK_FREQ = 0x03,
+ USHC_EXEC_CMD = 0x04,
+ USHC_READ_RESP = 0x05,
+ USHC_RESET = 0x06,
+};
+
+enum ushc_request_type {
+ USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+};
+
+#define USHC_GET_CAPS_VERSION_MASK 0xff
+#define USHC_GET_CAPS_3V3 (1 << 8)
+#define USHC_GET_CAPS_3V0 (1 << 9)
+#define USHC_GET_CAPS_1V8 (1 << 10)
+#define USHC_GET_CAPS_HIGH_SPD (1 << 16)
+
+#define USHC_HOST_CTRL_4BIT (1 << 1)
+#define USHC_HOST_CTRL_HIGH_SPD (1 << 0)
+
+#define USHC_PWR_CTRL_OFF 0x00
+#define USHC_PWR_CTRL_3V3 0x01
+#define USHC_PWR_CTRL_3V0 0x02
+#define USHC_PWR_CTRL_1V8 0x03
+
+#define USHC_READ_RESP_BUSY (1 << 4)
+#define USHC_READ_RESP_ERR_TIMEOUT (1 << 3)
+#define USHC_READ_RESP_ERR_CRC (1 << 2)
+#define USHC_READ_RESP_ERR_DAT (1 << 1)
+#define USHC_READ_RESP_ERR_CMD (1 << 0)
+#define USHC_READ_RESP_ERR_MASK 0x0f
+
+struct ushc_cbw {
+ __u8 signature;
+ __u8 cmd_idx;
+ __le16 block_size;
+ __le32 arg;
+} __attribute__((packed));
+
+#define USHC_CBW_SIGNATURE 'C'
+
+struct ushc_csw {
+ __u8 signature;
+ __u8 status;
+ __le32 response;
+} __attribute__((packed));
+
+#define USHC_CSW_SIGNATURE 'S'
+
+struct ushc_int_data {
+ u8 status;
+ u8 reserved[3];
+};
+
+#define USHC_INT_STATUS_SDIO_INT (1 << 1)
+#define USHC_INT_STATUS_CARD_PRESENT (1 << 0)
+
+
+struct ushc_data {
+ struct usb_device *usb_dev;
+ struct mmc_host *mmc;
+
+ struct urb *int_urb;
+ struct ushc_int_data *int_data;
+
+ struct urb *cbw_urb;
+ struct ushc_cbw *cbw;
+
+ struct urb *data_urb;
+
+ struct urb *csw_urb;
+ struct ushc_csw *csw;
+
+ spinlock_t lock;
+ struct mmc_request *current_req;
+ u32 caps;
+ u16 host_ctrl;
+ unsigned long flags;
+ u8 last_status;
+ int clock_freq;
+};
+
+#define DISCONNECTED 0
+#define INT_EN 1
+#define IGNORE_NEXT_INT 2
+
+static void data_callback(struct urb *urb);
+
+static int ushc_hw_reset(struct ushc_data *ushc)
+{
+ return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_RESET, USHC_RESET_TYPE,
+ 0, 0, NULL, 0, 100);
+}
+
+static int ushc_hw_get_caps(struct ushc_data *ushc)
+{
+ int ret;
+ int version;
+
+ ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0),
+ USHC_GET_CAPS, USHC_GET_CAPS_TYPE,
+ 0, 0, &ushc->caps, sizeof(ushc->caps), 100);
+ if (ret < 0)
+ return ret;
+
+ ushc->caps = le32_to_cpu(ushc->caps);
+
+ version = ushc->caps & USHC_GET_CAPS_VERSION_MASK;
+ if (version != 0x02) {
+ dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val)
+{
+ u16 host_ctrl;
+ int ret;
+
+ host_ctrl = (ushc->host_ctrl & ~mask) | val;
+ ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE,
+ host_ctrl, 0, NULL, 0, 100);
+ if (ret < 0)
+ return ret;
+ ushc->host_ctrl = host_ctrl;
+ return 0;
+}
+
+static void int_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+ u8 status, last_status;
+
+ if (urb->status < 0)
+ return;
+
+ status = ushc->int_data->status;
+ last_status = ushc->last_status;
+ ushc->last_status = status;
+
+ /*
+ * Ignore the card interrupt status on interrupt transfers that
+ * were submitted while card interrupts where disabled.
+ *
+ * This avoid occasional spurious interrupts when enabling
+ * interrupts immediately after clearing the source on the card.
+ */
+
+ if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags)
+ && test_bit(INT_EN, &ushc->flags)
+ && status & USHC_INT_STATUS_SDIO_INT) {
+ mmc_signal_sdio_irq(ushc->mmc);
+ }
+
+ if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT)
+ mmc_detect_change(ushc->mmc, msecs_to_jiffies(100));
+
+ if (!test_bit(INT_EN, &ushc->flags))
+ set_bit(IGNORE_NEXT_INT, &ushc->flags);
+ usb_submit_urb(ushc->int_urb, GFP_ATOMIC);
+}
+
+static void cbw_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+
+ if (urb->status != 0) {
+ usb_unlink_urb(ushc->data_urb);
+ usb_unlink_urb(ushc->csw_urb);
+ }
+}
+
+static void data_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+
+ if (urb->status != 0)
+ usb_unlink_urb(ushc->csw_urb);
+}
+
+static void csw_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+ struct mmc_request *req = ushc->current_req;
+ int status;
+
+ status = ushc->csw->status;
+
+ if (urb->status != 0) {
+ req->cmd->error = urb->status;
+ } else if (status & USHC_READ_RESP_ERR_CMD) {
+ if (status & USHC_READ_RESP_ERR_CRC)
+ req->cmd->error = -EIO;
+ else
+ req->cmd->error = -ETIMEDOUT;
+ }
+ if (req->data) {
+ if (status & USHC_READ_RESP_ERR_DAT) {
+ if (status & USHC_READ_RESP_ERR_CRC)
+ req->data->error = -EIO;
+ else
+ req->data->error = -ETIMEDOUT;
+ req->data->bytes_xfered = 0;
+ } else {
+ req->data->bytes_xfered = req->data->blksz * req->data->blocks;
+ }
+ }
+
+ req->cmd->resp[0] = le32_to_cpu(ushc->csw->response);
+
+ mmc_request_done(ushc->mmc, req);
+}
+
+static void ushc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ushc->lock, flags);
+
+ if (test_bit(DISCONNECTED, &ushc->flags)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Version 2 firmware doesn't support the R2 response format. */
+ if (req->cmd->flags & MMC_RSP_136) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* The Astoria's data FIFOs don't work with clock speeds < 5MHz so
+ limit commands with data to 6MHz or more. */
+ if (req->data && ushc->clock_freq < 6000000) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ushc->current_req = req;
+
+ /* Start cmd with CBW. */
+ ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode);
+ if (req->data)
+ ushc->cbw->block_size = cpu_to_le16(req->data->blksz);
+ else
+ ushc->cbw->block_size = 0;
+ ushc->cbw->arg = cpu_to_le32(req->cmd->arg);
+
+ ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+
+ /* Submit data (if any). */
+ if (req->data) {
+ struct mmc_data *data = req->data;
+ int pipe;
+
+ if (data->flags & MMC_DATA_READ)
+ pipe = usb_rcvbulkpipe(ushc->usb_dev, 6);
+ else
+ pipe = usb_sndbulkpipe(ushc->usb_dev, 2);
+
+ usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe,
+ sg_virt(data->sg), data->sg->length,
+ data_callback, ushc);
+ ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Submit CSW. */
+ ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+
+out:
+ spin_unlock_irqrestore(&ushc->lock, flags);
+ if (ret < 0) {
+ usb_unlink_urb(ushc->cbw_urb);
+ usb_unlink_urb(ushc->data_urb);
+ req->cmd->error = ret;
+ mmc_request_done(mmc, req);
+ }
+}
+
+static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode)
+{
+ u16 voltage;
+
+ switch (power_mode) {
+ case MMC_POWER_OFF:
+ voltage = USHC_PWR_CTRL_OFF;
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ voltage = USHC_PWR_CTRL_3V3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE,
+ voltage, 0, NULL, 0, 100);
+}
+
+static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width)
+{
+ return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT,
+ bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0);
+}
+
+static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs)
+{
+ int ret;
+
+ /* Hardware can't detect interrupts while the clock is off. */
+ if (clk == 0)
+ clk = 400000;
+
+ ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD,
+ enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE,
+ clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100);
+ if (ret < 0)
+ return ret;
+
+ ushc->clock_freq = clk;
+ return 0;
+}
+
+static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ ushc_set_power(ushc, ios->power_mode);
+ ushc_set_bus_width(ushc, 1 << ios->bus_width);
+ ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS);
+}
+
+static int ushc_get_cd(struct mmc_host *mmc)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT);
+}
+
+static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ if (enable)
+ set_bit(INT_EN, &ushc->flags);
+ else
+ clear_bit(INT_EN, &ushc->flags);
+}
+
+static void ushc_clean_up(struct ushc_data *ushc)
+{
+ usb_free_urb(ushc->int_urb);
+ usb_free_urb(ushc->csw_urb);
+ usb_free_urb(ushc->data_urb);
+ usb_free_urb(ushc->cbw_urb);
+
+ kfree(ushc->int_data);
+ kfree(ushc->cbw);
+ kfree(ushc->csw);
+
+ mmc_free_host(ushc->mmc);
+}
+
+static const struct mmc_host_ops ushc_ops = {
+ .request = ushc_request,
+ .set_ios = ushc_set_ios,
+ .get_cd = ushc_get_cd,
+ .enable_sdio_irq = ushc_enable_sdio_irq,
+};
+
+static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct mmc_host *mmc;
+ struct ushc_data *ushc;
+ int ret = -ENOMEM;
+
+ mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
+ if (mmc == NULL)
+ return -ENOMEM;
+ ushc = mmc_priv(mmc);
+ usb_set_intfdata(intf, ushc);
+
+ ushc->usb_dev = usb_dev;
+ ushc->mmc = mmc;
+
+ spin_lock_init(&ushc->lock);
+
+ ret = ushc_hw_reset(ushc);
+ if (ret < 0)
+ goto err;
+
+ /* Read capabilities. */
+ ret = ushc_hw_get_caps(ushc);
+ if (ret < 0)
+ goto err;
+
+ mmc->ops = &ushc_ops;
+
+ mmc->f_min = 400000;
+ mmc->f_max = 50000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0;
+
+ mmc->max_seg_size = 512*511;
+ mmc->max_segs = 1;
+ mmc->max_req_size = 512*511;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 511;
+
+ ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->int_urb == NULL)
+ goto err;
+ ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL);
+ if (ushc->int_data == NULL)
+ goto err;
+ usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,
+ usb_rcvintpipe(usb_dev,
+ intf->cur_altsetting->endpoint[0].desc.bEndpointAddress),
+ ushc->int_data, sizeof(struct ushc_int_data),
+ int_callback, ushc,
+ intf->cur_altsetting->endpoint[0].desc.bInterval);
+
+ ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->cbw_urb == NULL)
+ goto err;
+ ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
+ if (ushc->cbw == NULL)
+ goto err;
+ ushc->cbw->signature = USHC_CBW_SIGNATURE;
+
+ usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2),
+ ushc->cbw, sizeof(struct ushc_cbw),
+ cbw_callback, ushc);
+
+ ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->data_urb == NULL)
+ goto err;
+
+ ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->csw_urb == NULL)
+ goto err;
+ ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
+ if (ushc->csw == NULL)
+ goto err;
+ usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),
+ ushc->csw, sizeof(struct ushc_csw),
+ csw_callback, ushc);
+
+ ret = mmc_add_host(ushc->mmc);
+ if (ret)
+ goto err;
+
+ ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL);
+ if (ret < 0) {
+ mmc_remove_host(ushc->mmc);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ushc_clean_up(ushc);
+ return ret;
+}
+
+static void ushc_disconnect(struct usb_interface *intf)
+{
+ struct ushc_data *ushc = usb_get_intfdata(intf);
+
+ spin_lock_irq(&ushc->lock);
+ set_bit(DISCONNECTED, &ushc->flags);
+ spin_unlock_irq(&ushc->lock);
+
+ usb_kill_urb(ushc->int_urb);
+ usb_kill_urb(ushc->cbw_urb);
+ usb_kill_urb(ushc->data_urb);
+ usb_kill_urb(ushc->csw_urb);
+
+ mmc_remove_host(ushc->mmc);
+
+ ushc_clean_up(ushc);
+}
+
+static struct usb_device_id ushc_id_table[] = {
+ /* CSR USB SD Host Controller */
+ { USB_DEVICE(0x0a12, 0x5d10) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, ushc_id_table);
+
+static struct usb_driver ushc_driver = {
+ .name = "ushc",
+ .id_table = ushc_id_table,
+ .probe = ushc_probe,
+ .disconnect = ushc_disconnect,
+};
+
+static int __init ushc_init(void)
+{
+ return usb_register(&ushc_driver);
+}
+module_init(ushc_init);
+
+static void __exit ushc_exit(void)
+{
+ usb_deregister(&ushc_driver);
+}
+module_exit(ushc_exit);
+
+MODULE_DESCRIPTION("USB SD Host Controller driver");
+MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 19f2d72dbca5..9ed84ddb4780 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1050,8 +1050,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
mmc->ops = &via_sdc_ops;
/*Hardware cannot do scatter lists*/
- mmc->max_hw_segs = 1;
- mmc->max_phys_segs = 1;
+ mmc->max_segs = 1;
mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 0012f5d13d28..7fca0a386ba0 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1235,8 +1235,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
* Maximum number of segments. Worst case is one sector per segment
* so this will be 64kB/512.
*/
- mmc->max_hw_segs = 128;
- mmc->max_phys_segs = 128;
+ mmc->max_segs = 128;
/*
* Maximum request size. Also limited by 64KiB buffer.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 86fe67fd49ba..9334539ebf75 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1041,7 +1041,7 @@ config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on ARM || SUPERH
+ depends on ARM || SUPERH || MN10300
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1055,7 +1055,7 @@ config SMC911X
config SMSC911X
tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
- depends on ARM || SUPERH || BLACKFIN || MIPS
+ depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
select CRC32
select MII
select PHYLIB
@@ -1067,6 +1067,14 @@ config SMSC911X
<file:Documentation/networking/net-modules.txt>. The module
will be called smsc911x.
+config SMSC911X_ARCH_HOOKS
+ def_bool n
+ depends on SMSC911X
+ help
+ If the arch enables this, it allows the arch to implement various
+ hooks for more comprehensive interrupt control and also to override
+ the source of the MAC address.
+
config NET_VENDOR_RACAL
bool "Racal-Interlan (Micom) NI cards"
depends on ISA
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index ef4115b897bf..9ab58097fa2e 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -631,8 +631,6 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
-extern int atl1c_up(struct atl1c_adapter *adapter);
-extern void atl1c_down(struct atl1c_adapter *adapter);
extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
extern void atl1c_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 99ffcf667d1f..09b099bfab2b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do);
+static int atl1c_up(struct atl1c_adapter *adapter);
+static void atl1c_down(struct atl1c_adapter *adapter);
static const u16 atl1c_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
return err;
}
-int atl1c_up(struct atl1c_adapter *adapter)
+static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
@@ -2351,7 +2353,7 @@ err_alloc_rx:
return err;
}
-void atl1c_down(struct atl1c_adapter *adapter)
+static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index dbd27b8e66bd..43579b3b24ac 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
+static const struct ethtool_ops atl1_ethtool_ops;
+
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
{
u32 crc32, value = 0;
int i;
@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
return 0;
}
-void atl1_set_mac_addr(struct atl1_hw *hw)
+static void atl1_set_mac_addr(struct atl1_hw *hw)
{
u32 value;
/*
@@ -3658,7 +3660,7 @@ static int atl1_nway_reset(struct net_device *netdev)
return 0;
}
-const struct ethtool_ops atl1_ethtool_ops = {
+static const struct ethtool_ops atl1_ethtool_ops = {
.get_settings = atl1_get_settings,
.set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 9c0ddb273ac8..68de8cbfb3ec 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -56,16 +56,13 @@ struct atl1_adapter;
struct atl1_hw;
/* function prototypes needed by multiple files */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
-void atl1_set_mac_addr(struct atl1_hw *hw);
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+static void atl1_set_mac_addr(struct atl1_hw *hw);
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static u32 atl1_check_link(struct atl1_adapter *adapter);
-extern const struct ethtool_ops atl1_ethtool_ops;
-
/* hardware definitions specific to L1 */
/* Block IDLE Status Register */
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f979ea2d6d3c..afb7f7dd1bb1 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -41,6 +41,10 @@
#include "atlx.h"
+static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atlx_set_mac_addr(struct atl1_hw *hw);
+
static struct atlx_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e7f305ed00b..36eca1ce75d4 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1471,42 +1471,6 @@ err:
return status;
}
-/* Uses sync mcc */
-int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_port_type *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
- OPCODE_COMMON_READ_TRANSRECV_DATA);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
-
- req->port = cpu_to_le32(port);
- req->page_num = cpu_to_le32(TR_PAGE_A0);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
- *connector = resp->data.connector;
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c7f6cdfe1c73..8469ff061f30 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
u8 port_num, u32 *state);
-extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector);
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 45b1f6635282..c36cd2ffbadc 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_mcast_pkts++;
}
-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
+static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
{
- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
+ u8 l4_cksm, ipv6, ipcksm;
l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- if (ip_version) {
- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
- }
- ipv6_chk = (ip_version && (tcpf || udpf));
+ ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
+ /* Ignore ipcksm for ipv6 pkts */
+ return l4_cksm && (ipcksm || ipv6);
}
static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
- if (do_pkt_csum(rxcp, adapter->rx_csum))
- skb_checksum_none_assert(skb);
- else
+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
return (tcp_frame && !err) ? true : false;
}
-int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_poll_rx(struct napi_struct *napi, int budget)
{
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+ goto reschedule;
+ }
+
if (!adapter->stats_ioctl_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
if (!adapter->ue_detected)
be_detect_dump_ue(adapter);
+reschedule:
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec, i;
- cancel_delayed_work_sync(&adapter->work);
-
be_async_mcc_disable(adapter);
netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-char flash_cookie[2][16] = {"*** SE FLAS",
- "H DIRECTORY *** "};
-
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
int hdr_size)
@@ -2559,7 +2563,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
- netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
@@ -2715,6 +2718,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
if (!adapter)
return;
+ cancel_delayed_work_sync(&adapter->work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
@@ -2868,8 +2873,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
+ netif_carrier_off(netdev);
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9571ecf48f35..9eea225decaf 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index bc5837514074..459614d2d7bc 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include "bnx2x_init.h"
+static int bnx2x_setup_irqs(struct bnx2x *bp);
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
}
-int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG) {
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 5bfe0ab1d2d4..6b28739c5302 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -117,13 +117,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
void bnx2x_int_enable(struct bnx2x *bp);
/**
- * Disable HW interrupts.
- *
- * @param bp
- */
-void bnx2x_int_disable(struct bnx2x *bp);
-
-/**
* Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns.
*
@@ -192,17 +185,6 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/**
- * Bring down an eth client.
- *
- * @param bp
- * @param p
- *
- * @return int
- */
-int bnx2x_stop_fw_client(struct bnx2x *bp,
- struct bnx2x_client_ramrod_params *p);
-
-/**
* Set number of queues according to mode
*
* @param bp
@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). The function will wait until the ramrod completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
-#endif
-
-/**
- * Initialize status block in FW and HW
- *
- * @param bp driver handle
- * @param dma_addr_t mapping
- * @param int sb_id
- * @param int vfid
- * @param u8 vf_valid
- * @param int fw_sb_id
- * @param int igu_sb_id
- */
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
- u8 vf_valid, int fw_sb_id, int igu_sb_id);
-
/**
* Set MAC filtering configurations.
*
@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
* @return int
*/
int bnx2x_func_start(struct bnx2x *bp);
-int bnx2x_func_stop(struct bnx2x *bp);
/**
* Prepare ILT configurations according to current driver
@@ -396,14 +349,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * Request IRQ vectors from OS.
- *
- * @param bp
- *
- * @return int
- */
-int bnx2x_setup_irqs(struct bnx2x *bp);
-/**
* NAPI callback
*
* @param napi
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index e65de784182c..a306b0e46b61 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -16,7 +16,9 @@
#define BNX2X_INIT_OPS_H
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
-
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
return rc;
}
-int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
}
}
-void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
}
}
-void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ u8 initop)
{
int i;
@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
-void bnx2x_ilt_client_init_op(struct bnx2x *bp,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
-void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
-void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
-void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
int port = BP_PORT(bp);
@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
}
/* called during init common stage */
-void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
****************************************************************************/
/* called during init func stage */
-void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
- dma_addr_t t2_mapping, int src_cid_count)
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+ dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 3e99bf9c42b9..2326774df843 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -181,6 +181,12 @@
(_bank + (_addr & 0xf)), \
_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val);
+
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
return 0;
}
-u8 bnx2x_bmac_enable(struct link_params *params,
+static u8 bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
}
}
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requested led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
- u32 led_idx, u32 value)
-{
- u32 reg_val;
-
- /* If port 0 then use EMAC0, else use EMAC1*/
- u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() port %x led_idx %d value %d\n",
- port, led_idx, value);
-
- switch (led_idx) {
- case 0: /* 10MB led */
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 10M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_10MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 1: /*100MB led */
- /*Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 100M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_100MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 2: /* 1000MB led */
- /* Read the current value of the LED register in the
- EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
- reset it. */
- reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 3: /* 2500MB led */
- /* Read the current value of the LED register in the
- EMAC block*/
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 4: /*10G led */
- if (port == 0) {
- REG_WR(bp, NIG_REG_LED_10G_P0,
- value);
- } else {
- REG_WR(bp, NIG_REG_LED_10G_P1,
- value);
- }
- break;
- case 5: /* TRAFFIC led */
- /* Find if the traffic control is via BMAC or EMAC */
- if (port == 0)
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
- else
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
-
- /* Override the traffic led in the EMAC:*/
- if (reg_val == 1) {
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base +
- EMAC_REG_EMAC_LED);
- /* Set the TRAFFIC_OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the TRAFFIC bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
- (reg_val & ~EMAC_LED_TRAFFIC);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- } else { /* Override the traffic led in the BMAC: */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
- value);
- }
- break;
- default:
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() unknown led index %d "
- "(should be 0-5)\n", led_idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
u8 bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed)
{
@@ -4099,9 +3989,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -6819,13 +6709,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
return 0;
}
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
-{
- if (phy_idx < params->num_phys)
- return params->phy[phy_idx].supported;
- return 0;
-}
-
static void set_phy_vars(struct link_params *params)
{
struct bnx2x *bp = params->bp;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 58a4c7199276..171abf8097ee 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
-
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
-
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
-
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base);
-/* Returns the aggregative supported attributes of the phys on board */
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
-
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index ff99a2fc0426..e9ad16f00b56 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
/* used only at init
* locking is done by mcp
*/
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+ int msglvl)
{
u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
- u8 src_type, u8 dst_type)
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
}
/* issue a dmae command over the init-channel and wailt for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
bnx2x_issue_dmae_with_comp(bp, &dmae);
}
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len)
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
{
int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
}
/* must be called under rtnl_lock */
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
u32 mask = (1 << cl_id);
@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
bp->mac_filters.unmatched_unicast & ~mask;
}
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
struct tstorm_eth_function_common_config tcfg = {0};
u16 rss_flgs;
@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
-void bnx2x_pf_init(struct bnx2x *bp)
+static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -6021,6 +6024,9 @@ alloc_mem_err:
/*
* Init service functions
*/
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags);
+
int bnx2x_func_start(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
WAIT_RAMROD_COMMON);
}
-int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
}
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags)
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags)
{
/* can take a while if any port is running */
int cnt = 5000;
@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
return -EBUSY;
}
-u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
+static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
{
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
ETH_CONNECTION_TYPE);
}
-int bnx2x_setup_fw_client(struct bnx2x *bp,
- struct bnx2x_client_init_params *params,
- u8 activate,
- struct client_init_ramrod_data *data,
- dma_addr_t data_mapping)
+static int bnx2x_setup_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_init_params *params,
+ u8 activate,
+ struct client_init_ramrod_data *data,
+ dma_addr_t data_mapping)
{
u16 hc_usec;
int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
-int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
+static int bnx2x_stop_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_ramrod_params *p)
{
int rc;
@@ -7440,7 +7447,7 @@ reset_task_exit:
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index beb3b7cecd52..bdb68a600382 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
struct slave *slave;
int i;
- write_lock(&bond->lock);
+ write_lock_bh(&bond->lock);
bond->vlgrp = grp;
- write_unlock(&bond->lock);
+ write_unlock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
struct net_device *slave_dev = slave->dev;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 75bfc3a9d95f..09ed3f42d673 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
Putting the next command and length in the start of the frame can
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
+
+config CAIF_SHM
+ tristate "CAIF shared memory protocol driver"
+ depends on CAIF && U5500_MBOX
+ default n
+ ---help---
+ The CAIF shared memory protocol driver for the STE UX5500 platform.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 3a11d619452b..b38d987da67d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
# SPI slave physical interfaces module
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+
+# Shared memory
+caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
+obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644
index 000000000000..1cd90da86f13
--- /dev/null
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <mach/mbox.h>
+#include <net/caif/caif_shm.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
+
+#define MAX_SHM_INSTANCES 1
+
+enum {
+ MBX_ACC0,
+ MBX_ACC1,
+ MBX_DSP
+};
+
+static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
+
+static unsigned int shm_start;
+static unsigned int shm_size;
+
+module_param(shm_size, uint , 0440);
+MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
+
+module_param(shm_start, uint , 0440);
+MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
+
+static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
+{
+ /* Always block until msg is written successfully */
+ mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
+ return 0;
+}
+
+static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
+ void *pshm_drv)
+{
+ /*
+ * For UX5500, we have only 1 SHM instance which uses MBX0
+ * for communication with the peer modem
+ */
+ pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
+
+ if (!pshm_dev->hmbx)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+static int __init caif_shmdev_init(void)
+{
+ int i, result;
+
+ /* Loop is currently overkill, there is only one instance */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+
+ shmdev_lyr[i].shm_base_addr = shm_start;
+ shmdev_lyr[i].shm_total_sz = shm_size;
+
+ if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
+ || (shmdev_lyr[i].shm_total_sz <= 0)) {
+ pr_warn("ERROR,"
+ "Shared memory Address and/or Size incorrect"
+ ", Bailing out ...\n");
+ result = -EINVAL;
+ goto clean;
+ }
+
+ pr_info("SHM AREA (instance %d) STARTS"
+ " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
+
+ shmdev_lyr[i].shm_id = i;
+ shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
+ shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
+
+ /*
+ * Finally, CAIF core module is called with details in place:
+ * 1. SHM base address
+ * 2. SHM size
+ * 3. MBX handle
+ */
+ result = caif_shmcore_probe(&shmdev_lyr[i]);
+ if (result) {
+ pr_warn("ERROR[%d],"
+ "Could not probe SHM core (instance %d)"
+ " Bailing out ...\n", result, i);
+ goto clean;
+ }
+ }
+
+ return 0;
+
+clean:
+ /*
+ * For now, we assume that even if one instance of SHM fails, we bail
+ * out of the driver support completely. For this, we need to release
+ * any memory allocated and unregister any instance of SHM net device.
+ */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ if (shmdev_lyr[i].pshm_netdev)
+ unregister_netdev(shmdev_lyr[i].pshm_netdev);
+ }
+ return result;
+}
+
+static void __exit caif_shmdev_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
+ kfree((void *)shmdev_lyr[i].shm_base_addr);
+ }
+
+}
+
+module_init(caif_shmdev_init);
+module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644
index 000000000000..19f9c0656667
--- /dev/null
+++ b/drivers/net/caif/caif_shmcore.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
+ * Daniel Martensson / daniel.martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_shm.h>
+
+#define NR_TX_BUF 6
+#define NR_RX_BUF 6
+#define TX_BUF_SZ 0x2000
+#define RX_BUF_SZ 0x2000
+
+#define CAIF_NEEDED_HEADROOM 32
+
+#define CAIF_FLOW_ON 1
+#define CAIF_FLOW_OFF 0
+
+#define LOW_WATERMARK 3
+#define HIGH_WATERMARK 4
+
+/* Maximum number of CAIF buffers per shared memory buffer. */
+#define SHM_MAX_FRMS_PER_BUF 10
+
+/*
+ * Size in bytes of the descriptor area
+ * (With end of descriptor signalling)
+ */
+#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
+ sizeof(struct shm_pck_desc))
+
+/*
+ * Offset to the first CAIF frame within a shared memory buffer.
+ * Aligned on 32 bytes.
+ */
+#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
+
+/* Number of bytes for CAIF shared memory header. */
+#define SHM_HDR_LEN 1
+
+/* Number of padding bytes for the complete CAIF frame. */
+#define SHM_FRM_PAD_LEN 4
+
+#define CAIF_MAX_MTU 4096
+
+#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
+#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
+
+#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
+#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
+
+#define SHM_FULL_MASK (0x0F << 0)
+#define SHM_EMPTY_MASK (0x0F << 4)
+
+struct shm_pck_desc {
+ /*
+ * Offset from start of shared memory area to start of
+ * shared memory CAIF frame.
+ */
+ u32 frm_ofs;
+ u32 frm_len;
+};
+
+struct buf_list {
+ unsigned char *desc_vptr;
+ u32 phy_addr;
+ u32 index;
+ u32 len;
+ u32 frames;
+ u32 frm_ofs;
+ struct list_head list;
+};
+
+struct shm_caif_frm {
+ /* Number of bytes of padding before the CAIF frame. */
+ u8 hdr_ofs;
+};
+
+struct shmdrv_layer {
+ /* caif_dev_common must always be first in the structure*/
+ struct caif_dev_common cfdev;
+
+ u32 shm_tx_addr;
+ u32 shm_rx_addr;
+ u32 shm_base_addr;
+ u32 tx_empty_available;
+ spinlock_t lock;
+
+ struct list_head tx_empty_list;
+ struct list_head tx_pend_list;
+ struct list_head tx_full_list;
+ struct list_head rx_empty_list;
+ struct list_head rx_pend_list;
+ struct list_head rx_full_list;
+
+ struct workqueue_struct *pshm_tx_workqueue;
+ struct workqueue_struct *pshm_rx_workqueue;
+
+ struct work_struct shm_tx_work;
+ struct work_struct shm_rx_work;
+
+ struct sk_buff_head sk_qhead;
+ struct shmdev_layer *pshm_dev;
+};
+
+static int shm_netdev_open(struct net_device *shm_netdev)
+{
+ netif_wake_queue(shm_netdev);
+ return 0;
+}
+
+static int shm_netdev_close(struct net_device *shm_netdev)
+{
+ netif_stop_queue(shm_netdev);
+ return 0;
+}
+
+int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv;
+ struct list_head *pos;
+ u32 avail_emptybuff = 0;
+ unsigned long flags = 0;
+
+ pshm_drv = (struct shmdrv_layer *)priv;
+
+ /* Check for received buffers. */
+ if (mbx_msg & SHM_FULL_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->rx_empty_list)) {
+
+ /* Release spin lock. */
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("No empty Rx buffers to fill: "
+ "mbx_msg:%x\n", mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_FULL(mbx_msg)) {
+
+ /* We print even in IRQ context... */
+ pr_warn(
+ "phyif_shm_mbx_msg_cb: RX full out of sync:"
+ " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
+ idx, mbx_msg, SHM_GET_FULL(mbx_msg));
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule RX work queue. */
+ if (!work_pending(&pshm_drv->shm_rx_work))
+ queue_work(pshm_drv->pshm_rx_workqueue,
+ &pshm_drv->shm_rx_work);
+ }
+
+ /* Check for emptied buffers. */
+ if (mbx_msg & SHM_EMPTY_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->tx_full_list)) {
+
+ /* We print even in IRQ context... */
+ pr_warn("No TX to empty: msg:%x\n", mbx_msg);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_EMPTY(mbx_msg)) {
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("TX empty "
+ "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+ list_del_init(&pbuf->list);
+
+ /* Reset buffer parameters. */
+ pbuf->frames = 0;
+ pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ /* Check whether we have to wake up the transmitter. */
+ if ((avail_emptybuff > HIGH_WATERMARK) &&
+ (!pshm_drv->tx_empty_available)) {
+ pshm_drv->tx_empty_available = 1;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_ON);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue,
+ &pshm_drv->shm_tx_work);
+ } else
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ }
+
+ return 0;
+
+err_sync:
+ return -EIO;
+}
+
+static void shm_rx_work_func(struct work_struct *rx_work)
+{
+ struct shmdrv_layer *pshm_drv;
+ struct buf_list *pbuf;
+ unsigned long flags = 0;
+ struct sk_buff *skb;
+ char *p;
+ int ret;
+
+ pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
+
+ while (1) {
+
+ struct shm_pck_desc *pck_desc;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for received buffers. */
+ if (list_empty(&pshm_drv->rx_full_list)) {
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ break;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_full_list.next, struct buf_list,
+ list);
+ list_del_init(&pbuf->list);
+
+ /* Retrieve pointer to start of the packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
+
+ /*
+ * Check whether descriptor contains a CAIF shared memory
+ * frame.
+ */
+ while (pck_desc->frm_ofs) {
+ unsigned int frm_buf_ofs;
+ unsigned int frm_pck_ofs;
+ unsigned int frm_pck_len;
+ /*
+ * Check whether offset is within buffer limits
+ * (lower).
+ */
+ if (pck_desc->frm_ofs <
+ (pbuf->phy_addr - pshm_drv->shm_base_addr))
+ break;
+ /*
+ * Check whether offset is within buffer limits
+ * (higher).
+ */
+ if (pck_desc->frm_ofs >
+ ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
+ pbuf->len))
+ break;
+
+ /* Calculate offset from start of buffer. */
+ frm_buf_ofs =
+ pck_desc->frm_ofs - (pbuf->phy_addr -
+ pshm_drv->shm_base_addr);
+
+ /*
+ * Calculate offset and length of CAIF packet while
+ * taking care of the shared memory header.
+ */
+ frm_pck_ofs =
+ frm_buf_ofs + SHM_HDR_LEN +
+ (*(pbuf->desc_vptr + frm_buf_ofs));
+ frm_pck_len =
+ (pck_desc->frm_len - SHM_HDR_LEN -
+ (*(pbuf->desc_vptr + frm_buf_ofs)));
+
+ /* Check whether CAIF packet is within buffer limits */
+ if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
+ break;
+
+ /* Get a suitable CAIF packet and copy in data. */
+ skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
+ frm_pck_len + 1);
+ BUG_ON(skb == NULL);
+
+ p = skb_put(skb, frm_pck_len);
+ memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = pshm_drv->pshm_dev->pshm_netdev;
+
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+
+ if (!ret) {
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_bytes += pck_desc->frm_len;
+ } else
+ ++pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_dropped;
+ /* Move to next packet descriptor. */
+ pck_desc++;
+ }
+
+ list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ }
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+}
+
+static void shm_tx_work_func(struct work_struct *tx_work)
+{
+ u32 mbox_msg;
+ unsigned int frmlen, avail_emptybuff, append = 0;
+ unsigned long flags = 0;
+ struct buf_list *pbuf = NULL;
+ struct shmdrv_layer *pshm_drv;
+ struct shm_caif_frm *frm;
+ struct sk_buff *skb;
+ struct shm_pck_desc *pck_desc;
+ struct list_head *pos;
+
+ pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
+
+ do {
+ /* Initialize mailbox message. */
+ mbox_msg = 0x00;
+ avail_emptybuff = 0;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for pending receive buffers. */
+ if (!list_empty(&pshm_drv->rx_pend_list)) {
+
+ pbuf = list_entry(pshm_drv->rx_pend_list.next,
+ struct buf_list, list);
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
+ /*
+ * Value index is never changed,
+ * so read access should be safe.
+ */
+ mbox_msg |= SHM_SET_EMPTY(pbuf->index);
+ }
+
+ skb = skb_peek(&pshm_drv->sk_qhead);
+
+ if (skb == NULL)
+ goto send_msg;
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ if ((avail_emptybuff < LOW_WATERMARK) &&
+ pshm_drv->tx_empty_available) {
+ /* Update blocking condition. */
+ pshm_drv->tx_empty_available = 0;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_OFF);
+ }
+ /*
+ * We simply return back to the caller if we do not have space
+ * either in Tx pending list or Tx empty list. In this case,
+ * we hold the received skb in the skb list, waiting to
+ * be transmitted once Tx buffers become available
+ */
+ if (list_empty(&pshm_drv->tx_empty_list))
+ goto send_msg;
+
+ /* Get the first free Tx buffer. */
+ pbuf = list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ do {
+ if (append) {
+ skb = skb_peek(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
+ }
+
+ frm = (struct shm_caif_frm *)
+ (pbuf->desc_vptr + pbuf->frm_ofs);
+
+ frm->hdr_ofs = 0;
+ frmlen = 0;
+ frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
+
+ /* Add tail padding if needed. */
+ if (frmlen % SHM_FRM_PAD_LEN)
+ frmlen += SHM_FRM_PAD_LEN -
+ (frmlen % SHM_FRM_PAD_LEN);
+
+ /*
+ * Verify that packet, header and additional padding
+ * can fit within the buffer frame area.
+ */
+ if (frmlen >= (pbuf->len - pbuf->frm_ofs))
+ break;
+
+ if (!append) {
+ list_del_init(&pbuf->list);
+ append = 1;
+ }
+
+ skb = skb_dequeue(&pshm_drv->sk_qhead);
+ /* Copy in CAIF frame. */
+ skb_copy_bits(skb, 0, pbuf->desc_vptr +
+ pbuf->frm_ofs + SHM_HDR_LEN +
+ frm->hdr_ofs, skb->len);
+
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
+ frmlen;
+ dev_kfree_skb(skb);
+
+ /* Fill in the shared memory packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
+ /* Forward to current frame. */
+ pck_desc += pbuf->frames;
+ pck_desc->frm_ofs = (pbuf->phy_addr -
+ pshm_drv->shm_base_addr) +
+ pbuf->frm_ofs;
+ pck_desc->frm_len = frmlen;
+ /* Terminate packet descriptor area. */
+ pck_desc++;
+ pck_desc->frm_ofs = 0;
+ /* Update buffer parameters. */
+ pbuf->frames++;
+ pbuf->frm_ofs += frmlen + (frmlen % 32);
+
+ } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+
+ /* Assign buffer as full. */
+ list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
+ append = 0;
+ mbox_msg |= SHM_SET_FULL(pbuf->index);
+send_msg:
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ if (mbox_msg)
+ pshm_drv->pshm_dev->pshmdev_mbxsend
+ (pshm_drv->pshm_dev->shm_id, mbox_msg);
+ } while (mbox_msg);
+}
+
+static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ unsigned long flags = 0;
+
+ pshm_drv = netdev_priv(shm_netdev);
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ skb_queue_tail(&pshm_drv->sk_qhead, skb);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule Tx work queue. for deferred processing of skbs*/
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = shm_netdev_open,
+ .ndo_stop = shm_netdev_close,
+ .ndo_start_xmit = shm_netdev_tx,
+};
+
+static void shm_netdev_setup(struct net_device *pshm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ pshm_netdev->netdev_ops = &netdev_ops;
+
+ pshm_netdev->mtu = CAIF_MAX_MTU;
+ pshm_netdev->type = ARPHRD_CAIF;
+ pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ pshm_netdev->tx_queue_len = 0;
+ pshm_netdev->destructor = free_netdev;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ /* Initialize structures in a clean state. */
+ memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
+
+ pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
+}
+
+int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
+{
+ int result, j;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
+ "cfshm%d", shm_netdev_setup);
+ if (!pshm_dev->pshm_netdev)
+ return -ENOMEM;
+
+ pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
+ pshm_drv->pshm_dev = pshm_dev;
+
+ /*
+ * Initialization starts with the verification of the
+ * availability of MBX driver by calling its setup function.
+ * MBX driver must be available by this time for proper
+ * functioning of SHM driver.
+ */
+ if ((pshm_dev->pshmdev_mbxsetup
+ (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
+ pr_warn("Could not config. SHM Mailbox,"
+ " Bailing out.....\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENODEV;
+ }
+
+ skb_queue_head_init(&pshm_drv->sk_qhead);
+
+ pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
+ " INSTANCE AT pshm_drv =0x%p\n",
+ pshm_drv->pshm_dev->shm_id, pshm_drv);
+
+ if (pshm_dev->shm_total_sz <
+ (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
+
+ pr_warn("ERROR, Amount of available"
+ " Phys. SHM cannot accomodate current SHM "
+ "driver configuration, Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+
+ pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
+ pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
+
+ if (pshm_dev->shm_loopback)
+ pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
+ else
+ pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
+ (NR_TX_BUF * TX_BUF_SZ);
+
+ INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_full_list);
+
+ INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_full_list);
+
+ INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
+ INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+
+ pshm_drv->pshm_tx_workqueue =
+ create_singlethread_workqueue("shm_tx_work");
+ pshm_drv->pshm_rx_workqueue =
+ create_singlethread_workqueue("shm_rx_work");
+
+ for (j = 0; j < NR_TX_BUF; j++) {
+ struct buf_list *tx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (tx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem. for tx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ tx_buf->index = j;
+ tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
+ tx_buf->len = TX_BUF_SZ;
+ tx_buf->frames = 0;
+ tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ if (pshm_dev->shm_loopback)
+ tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ else
+ tx_buf->desc_vptr =
+ ioremap(tx_buf->phy_addr, TX_BUF_SZ);
+
+ list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
+ }
+
+ for (j = 0; j < NR_RX_BUF; j++) {
+ struct buf_list *rx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (rx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem.for rx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ rx_buf->index = j;
+ rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
+ rx_buf->len = RX_BUF_SZ;
+
+ if (pshm_dev->shm_loopback)
+ rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ else
+ rx_buf->desc_vptr =
+ ioremap(rx_buf->phy_addr, RX_BUF_SZ);
+ list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
+ }
+
+ pshm_drv->tx_empty_available = 1;
+ result = register_netdev(pshm_dev->pshm_netdev);
+ if (result)
+ pr_warn("ERROR[%d], SHM could not, "
+ "register with NW FRMWK Bailing out ...\n", result);
+
+ return result;
+}
+
+void caif_shmcore_remove(struct net_device *pshm_netdev)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ while (!(list_empty(&pshm_drv->tx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ /* Destroy work queues. */
+ destroy_workqueue(pshm_drv->pshm_tx_workqueue);
+ destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+
+ unregister_netdev(pshm_netdev);
+}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9d9e45394433..080574b0fff0 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -82,6 +82,14 @@ config CAN_FLEXCAN
---help---
Say Y here if you want to support for Freescale FlexCAN.
+config PCH_CAN
+ tristate "PCH CAN"
+ depends on CAN_DEV && PCI
+ ---help---
+ This driver is for PCH CAN of Topcliff which is an IOH for x86
+ embedded processor.
+ This driver can access CAN bus.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 00575373bbd0..90af15a4f106 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+obj-$(CONFIG_PCH_CAN) += pch_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 2d8bd86bc5e2..cee98fa668bd 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@linutronix.de>
- * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
#include <mach/board.h>
-#define DRV_NAME "at91_can"
#define AT91_NAPI_WEIGHT 12
/*
@@ -172,6 +171,7 @@ struct at91_priv {
};
static struct can_bittiming_const at91_bittiming_const = {
+ .name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
.tseg2_min = 2,
@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
- return readl(priv->reg_base + reg);
+ return __raw_readl(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
- writel(value, priv->reg_base + reg);
+ __raw_writel(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+ /* reset acceptance mask and id register */
+ for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
+ at91_write(priv, AT91_MAM(i), 0x0 );
+ at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
+ }
+
/* The last 4 mailboxes are used for transmitting. */
for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_br;
- reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
- ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+ reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
+ ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
((bt->phase_seg2 - 1) << 0);
- dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+ netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
return 0;
}
+static int at91_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_ecr = at91_read(priv, AT91_ECR);
+
+ bec->rxerr = reg_ecr & 0xff;
+ bec->txerr = reg_ecr >> 16;
+
+ return 0;
+}
+
static void at91_chip_start(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+ at91_set_bittiming(dev);
at91_setup_mailboxes(dev);
at91_transceiver_switch(priv, 1);
@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
- dev_err(dev->dev.parent,
- "BUG! TX buffer full when queue awake!\n");
+ netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
struct sk_buff *skb;
struct can_frame *cf;
- dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+ netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+ /* allow RX of extended frames */
+ at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
+
if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
reg_sr & AT91_MB_RX_LOW_MASK)
- dev_info(dev->dev.parent,
- "order of incoming frames cannot be guaranteed\n");
+ netdev_info(dev,
+ "order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* CRC error */
if (reg_sr & AT91_IRQ_CERR) {
- dev_dbg(dev->dev.parent, "CERR irq\n");
+ netdev_dbg(dev, "CERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Stuffing Error */
if (reg_sr & AT91_IRQ_SERR) {
- dev_dbg(dev->dev.parent, "SERR irq\n");
+ netdev_dbg(dev, "SERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Acknowledgement Error */
if (reg_sr & AT91_IRQ_AERR) {
- dev_dbg(dev->dev.parent, "AERR irq\n");
+ netdev_dbg(dev, "AERR irq\n");
dev->stats.tx_errors++;
cf->can_id |= CAN_ERR_ACK;
}
/* Form error */
if (reg_sr & AT91_IRQ_FERR) {
- dev_dbg(dev->dev.parent, "FERR irq\n");
+ netdev_dbg(dev, "FERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Bit Error */
if (reg_sr & AT91_IRQ_BERR) {
- dev_dbg(dev->dev.parent, "BERR irq\n");
+ netdev_dbg(dev, "BERR irq\n");
dev->stats.tx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
- u32 reg_idr, reg_ier, reg_ecr;
- u8 tec, rec;
+ u32 reg_idr = 0, reg_ier = 0;
+ struct can_berr_counter bec;
- reg_ecr = at91_read(priv, AT91_ECR);
- rec = reg_ecr & 0xff;
- tec = reg_ecr >> 16;
+ at91_get_berr_counter(dev, &bec);
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+ netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+ netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_RESTARTED;
- dev_dbg(dev->dev.parent, "restarted\n");
+ netdev_dbg(dev, "restarted\n");
priv->can.can_stats.restarts++;
netif_carrier_on(dev);
@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
*/
- dev_dbg(dev->dev.parent, "Error Active\n");
+ netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
case CAN_STATE_ERROR_WARNING: /* fallthrough */
@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
cf->can_id |= CAN_ERR_BUSOFF;
- dev_dbg(dev->dev.parent, "bus-off\n");
+ netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can.can_stats.bus_off++;
@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
else if (likely(reg_sr & AT91_IRQ_ERRA))
new_state = CAN_STATE_ERROR_ACTIVE;
else {
- dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+ netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
-static int __init at91_can_probe(struct platform_device *pdev)
+static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct at91_priv *priv;
@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
priv->can.bittiming_const = &at91_bittiming_const;
- priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
return 0;
exit_free:
- free_netdev(dev);
+ free_candev(dev);
exit_iounmap:
iounmap(addr);
exit_release:
@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_netdev(dev);
-
iounmap(priv->reg_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
.remove = __devexit_p(at91_can_remove),
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
static int __init at91_can_module_init(void)
{
- printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
return platform_driver_register(&at91_can_driver);
}
static void __exit at91_can_module_exit(void)
{
platform_driver_unregister(&at91_can_driver);
- printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
}
module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
+MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ef443a090ba7..d4990568baee 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
unregister_flexcandev(dev);
platform_set_drvdata(pdev, NULL);
- free_candev(dev);
iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 6aadc3e32bd5..7ab534aee452 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -169,6 +169,7 @@
# define RXBSIDH_SHIFT 3
#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
# define RXBSIDL_IDE 0x08
+# define RXBSIDL_SRR 0x10
# define RXBSIDL_EID 3
# define RXBSIDL_SHIFT 5
#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
frame->can_id =
(buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+ if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+ frame->can_id |= CAN_RTR_FLAG;
}
/* Data length */
frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644
index 000000000000..55ec324caaf4
--- /dev/null
+++ b/drivers/net/can/pch_can.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_MSG_OBJ 32
+#define MSG_OBJ_RX 0 /* The receive message object flag. */
+#define MSG_OBJ_TX 1 /* The transmit message object flag. */
+
+#define ENABLE 1 /* The enable flag */
+#define DISABLE 0 /* The disable flag */
+#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
+#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
+#define CAN_CTRL_IE_SIE_EIE 0x000e
+#define CAN_CTRL_CCE 0x0040
+#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
+#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
+#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
+#define CAN_CMASK_RX_TX_SET 0x00f3
+#define CAN_CMASK_RX_TX_GET 0x0073
+#define CAN_CMASK_ALL 0xff
+#define CAN_CMASK_RDWR 0x80
+#define CAN_CMASK_ARB 0x20
+#define CAN_CMASK_CTRL 0x10
+#define CAN_CMASK_MASK 0x40
+#define CAN_CMASK_NEWDAT 0x04
+#define CAN_CMASK_CLRINTPND 0x08
+
+#define CAN_IF_MCONT_NEWDAT 0x8000
+#define CAN_IF_MCONT_INTPND 0x2000
+#define CAN_IF_MCONT_UMASK 0x1000
+#define CAN_IF_MCONT_TXIE 0x0800
+#define CAN_IF_MCONT_RXIE 0x0400
+#define CAN_IF_MCONT_RMTEN 0x0200
+#define CAN_IF_MCONT_TXRQXT 0x0100
+#define CAN_IF_MCONT_EOB 0x0080
+#define CAN_IF_MCONT_DLC 0x000f
+#define CAN_IF_MCONT_MSGLOST 0x4000
+#define CAN_MASK2_MDIR_MXTD 0xc000
+#define CAN_ID2_DIR 0x2000
+#define CAN_ID_MSGVAL 0x8000
+
+#define CAN_STATUS_INT 0x8000
+#define CAN_IF_CREQ_BUSY 0x8000
+#define CAN_ID2_XTD 0x4000
+
+#define CAN_REC 0x00007f00
+#define CAN_TEC 0x000000ff
+
+#define PCH_RX_OK 0x00000010
+#define PCH_TX_OK 0x00000008
+#define PCH_BUS_OFF 0x00000080
+#define PCH_EWARN 0x00000040
+#define PCH_EPASSIV 0x00000020
+#define PCH_LEC0 0x00000001
+#define PCH_LEC1 0x00000002
+#define PCH_LEC2 0x00000004
+#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
+#define PCH_STUF_ERR PCH_LEC0
+#define PCH_FORM_ERR PCH_LEC1
+#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
+#define PCH_BIT1_ERR PCH_LEC2
+#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
+#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
+
+/* bit position of certain controller bits. */
+#define BIT_BITT_BRP 0
+#define BIT_BITT_SJW 6
+#define BIT_BITT_TSEG1 8
+#define BIT_BITT_TSEG2 12
+#define BIT_IF1_MCONT_RXIE 10
+#define BIT_IF2_MCONT_TXIE 11
+#define BIT_BRPE_BRPE 6
+#define BIT_ES_TXERRCNT 0
+#define BIT_ES_RXERRCNT 8
+#define MSK_BITT_BRP 0x3f
+#define MSK_BITT_SJW 0xc0
+#define MSK_BITT_TSEG1 0xf00
+#define MSK_BITT_TSEG2 0x7000
+#define MSK_BRPE_BRPE 0x3c0
+#define MSK_BRPE_GET 0x0f
+#define MSK_CTRL_IE_SIE_EIE 0x07
+#define MSK_MCONT_TXIE 0x08
+#define MSK_MCONT_RXIE 0x10
+#define PCH_CAN_NO_TX_BUFF 1
+#define COUNTER_LIMIT 10
+
+#define PCH_CAN_CLK 50000000 /* 50MHz */
+
+/* Define the number of message object.
+ * PCH CAN communications are done via Message RAM.
+ * The Message RAM consists of 32 message objects. */
+#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
+#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
+#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+
+#define PCH_FIFO_THRESH 16
+
+enum pch_can_mode {
+ PCH_CAN_ENABLE,
+ PCH_CAN_DISABLE,
+ PCH_CAN_ALL,
+ PCH_CAN_NONE,
+ PCH_CAN_STOP,
+ PCH_CAN_RUN
+};
+
+struct pch_can_regs {
+ u32 cont;
+ u32 stat;
+ u32 errc;
+ u32 bitt;
+ u32 intr;
+ u32 opt;
+ u32 brpe;
+ u32 reserve1;
+ u32 if1_creq;
+ u32 if1_cmask;
+ u32 if1_mask1;
+ u32 if1_mask2;
+ u32 if1_id1;
+ u32 if1_id2;
+ u32 if1_mcont;
+ u32 if1_dataa1;
+ u32 if1_dataa2;
+ u32 if1_datab1;
+ u32 if1_datab2;
+ u32 reserve2;
+ u32 reserve3[12];
+ u32 if2_creq;
+ u32 if2_cmask;
+ u32 if2_mask1;
+ u32 if2_mask2;
+ u32 if2_id1;
+ u32 if2_id2;
+ u32 if2_mcont;
+ u32 if2_dataa1;
+ u32 if2_dataa2;
+ u32 if2_datab1;
+ u32 if2_datab2;
+ u32 reserve4;
+ u32 reserve5[20];
+ u32 treq1;
+ u32 treq2;
+ u32 reserve6[2];
+ u32 reserve7[56];
+ u32 reserve8[3];
+ u32 srst;
+};
+
+struct pch_can_priv {
+ struct can_priv can;
+ unsigned int can_num;
+ struct pci_dev *dev;
+ unsigned int tx_enable[MAX_MSG_OBJ];
+ unsigned int rx_enable[MAX_MSG_OBJ];
+ unsigned int rx_link[MAX_MSG_OBJ];
+ unsigned int int_enables;
+ unsigned int int_stat;
+ struct net_device *ndev;
+ spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
+ unsigned int msg_obj[MAX_MSG_OBJ];
+ struct pch_can_regs __iomem *regs;
+ struct napi_struct napi;
+ unsigned int tx_obj; /* Point next Tx Obj index */
+ unsigned int use_msi;
+};
+
+static struct can_bittiming_const pch_can_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024, /* 6bit + extended 4bit */
+ .brp_inc = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
+ {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
+
+static inline void pch_can_bit_set(u32 *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) | mask, addr);
+}
+
+static inline void pch_can_bit_clear(u32 *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) & ~mask, addr);
+}
+
+static void pch_can_set_run_mode(struct pch_can_priv *priv,
+ enum pch_can_mode mode)
+{
+ switch (mode) {
+ case PCH_CAN_RUN:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ case PCH_CAN_STOP:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+ break;
+ }
+}
+
+static void pch_can_set_optmode(struct pch_can_priv *priv)
+{
+ u32 reg_val = ioread32(&priv->regs->opt);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_val |= CAN_OPT_SILENT;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ reg_val |= CAN_OPT_LBACK;
+
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+ iowrite32(reg_val, &priv->regs->opt);
+}
+
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+ /* Clearing the IE, SIE and EIE bits of Can control register. */
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+
+ /* Appropriately setting them. */
+ pch_can_bit_set(&priv->regs->cont,
+ ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
+{
+ /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+ *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+}
+
+static void pch_can_set_int_enables(struct pch_can_priv *priv,
+ enum pch_can_mode interrupt_no)
+{
+ switch (interrupt_no) {
+ case PCH_CAN_ENABLE:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_DISABLE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_ALL:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ case PCH_CAN_NONE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+ break;
+ }
+}
+
+static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
+{
+ u32 counter = COUNTER_LIMIT;
+ u32 ifx_creq;
+
+ iowrite32(num, creq_addr);
+ while (counter) {
+ ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+ if (!ifx_creq)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
+}
+
+static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the receive buffer data from RAM to Interface1 registers */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and RxIE bits */
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and RxIE bits */
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_rx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the Msg buffer from Message RAM to Interface2 registers. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ /* Setting the IF2CMASK register for accessing the
+ MsgVal and TxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and TxIE bits */
+ pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and TxIE bits. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if1_mcont)) &
+ CAN_IF_MCONT_RXIE))
+ *enable = ENABLE;
+ else
+ *enable = DISABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if2_mcont)) &
+ CAN_IF_MCONT_TXIE)) {
+ *enable = ENABLE;
+ } else {
+ *enable = DISABLE;
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static int pch_can_int_pending(struct pch_can_priv *priv)
+{
+ return ioread32(&priv->regs->intr) & 0xffff;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
+ if (set == ENABLE)
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+ else
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 *link)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+
+ if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
+ *link = DISABLE;
+ else
+ *link = ENABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_clear_buffers(struct pch_can_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+ iowrite32(0xffff, &priv->regs->if1_mask1);
+ iowrite32(0xffff, &priv->regs->if1_mask2);
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+ iowrite32(0x0, &priv->regs->if1_mcont);
+ iowrite32(0x0, &priv->regs->if1_dataa1);
+ iowrite32(0x0, &priv->regs->if1_dataa2);
+ iowrite32(0x0, &priv->regs->if1_datab1);
+ iowrite32(0x0, &priv->regs->if1_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ }
+
+ for (i = i; i < PCH_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+ iowrite32(0xffff, &priv->regs->if2_mask1);
+ iowrite32(0xffff, &priv->regs->if2_mask2);
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ iowrite32(0x0, &priv->regs->if2_mcont);
+ iowrite32(0x0, &priv->regs->if2_dataa1);
+ iowrite32(0x0, &priv->regs->if2_dataa2);
+ iowrite32(0x0, &priv->regs->if2_datab1);
+ iowrite32(0x0, &priv->regs->if2_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+}
+
+static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ /* Set FIFO mode set to 0 except last Rx Obj*/
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+ /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+ if (i == (PCH_RX_OBJ_NUM - 1))
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+
+ iowrite32(0, &priv->regs->if1_mask1);
+ pch_can_bit_clear(&priv->regs->if1_mask2,
+ 0x1fff | CAN_MASK2_MDIR_MXTD);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+
+ /* Resetting DIR bit for reception */
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ /* Setting EOB bit for transmitter */
+ iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ iowrite32(0, &priv->regs->if2_mask1);
+ pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_init(struct pch_can_priv *priv)
+{
+ /* Stopping the Can device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Clearing all the message object buffers. */
+ pch_can_clear_buffers(priv);
+
+ /* Configuring the respective message object as either rx/tx object. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Enabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+}
+
+static void pch_can_release(struct pch_can_priv *priv)
+{
+ /* Stooping the CAN device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Disabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ /* Disabling all the receive object. */
+ pch_can_rx_disable_all(priv);
+
+ /* Disabling all the transmit object. */
+ pch_can_tx_disable_all(priv);
+}
+
+/* This function clears interrupt(s) from the CAN device. */
+static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
+{
+ if (mask == CAN_STATUS_INT) {
+ ioread32(&priv->regs->stat);
+ return;
+ }
+
+ /* Clear interrupt for transmit object */
+ if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+ /* Setting CMASK for clearing interrupts for
+ frame transmission. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if2_cmask);
+
+ /* Resetting the ID registers. */
+ pch_can_bit_set(&priv->regs->if2_id2,
+ CAN_ID2_DIR | (0x7ff << 2));
+ iowrite32(0x0, &priv->regs->if2_id1);
+
+ /* Claring NewDat, TxRqst & IntPnd */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+ pch_can_check_if_busy(&priv->regs->if2_creq, mask);
+ } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+ /* Setting CMASK for clearing the reception interrupts. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, mask);
+ }
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+ return (ioread32(&priv->regs->treq1) & 0xffff) |
+ ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+}
+
+static void pch_can_reset(struct pch_can_priv *priv)
+{
+ /* write to sw reset register */
+ iowrite32(1, &priv->regs->srst);
+ iowrite32(0, &priv->regs->srst);
+}
+
+static void pch_can_error(struct net_device *ndev, u32 status)
+{
+ struct sk_buff *skb;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ u32 errc;
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ enum can_state state = priv->can.state;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ if (status & PCH_BUS_OFF) {
+ pch_can_tx_disable_all(priv);
+ pch_can_rx_disable_all(priv);
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(ndev);
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+ dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
+ }
+
+ /* Warning interrupt. */
+ if (status & PCH_EWARN) {
+ state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ if ((errc & CAN_TEC) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ dev_warn(&ndev->dev,
+ "%s -> Error Counter is more than 96.\n", __func__);
+ }
+ /* Error passive interrupt. */
+ if (status & PCH_EPASSIV) {
+ priv->can.can_stats.error_passive++;
+ state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if ((errc & CAN_TEC) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ dev_err(&ndev->dev,
+ "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
+ }
+
+ if (status & PCH_LEC_ALL) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ switch (status & PCH_LEC_ALL) {
+ case PCH_STUF_ERR:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case PCH_FORM_ERR:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case PCH_ACK_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL;
+ break;
+ case PCH_BIT1_ERR:
+ case PCH_BIT0_ERR:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case PCH_CRC_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ break;
+ default:
+ iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
+ break;
+ }
+
+ }
+
+ priv->can.state = state;
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+{
+ u32 reg;
+ canid_t id;
+ u32 ide;
+ u32 rtr;
+ int i, j, k;
+ int rcv_pkts = 0;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+
+ /* Reading the MCONT register. */
+ reg = ioread32(&priv->regs->if1_mcont);
+ reg &= 0xffff;
+
+ for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+ /* If MsgLost bit set. */
+ if (reg & CAN_IF_MCONT_MSGLOST) {
+ dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_MSGLOST);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ goto RX_NEXT;
+ }
+ if (!(reg & CAN_IF_MCONT_NEWDAT))
+ goto RX_NEXT;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Get Received data */
+ ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+ if (ide) {
+ id = (ioread32(&priv->regs->if1_id1) & 0xffff);
+ id |= (((ioread32(&priv->regs->if1_id2)) &
+ 0x1fff) << 16);
+ cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ } else {
+ id = (((ioread32(&priv->regs->if1_id2)) &
+ (CAN_SFF_MASK << 2)) >> 2);
+ cf->can_id = (id & CAN_SFF_MASK);
+ }
+
+ rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
+ if (rtr) {
+ cf->can_dlc = 0;
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
+ 0x0f);
+ }
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ reg = ioread32(&priv->regs->if1_dataa1 + j*4);
+ cf->data[i++] = cpu_to_le32(reg & 0xff);
+ if (i == cf->can_dlc)
+ break;
+ cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+ }
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ if (k < PCH_FIFO_THRESH) {
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
+ CAN_CMASK_ARB, &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_INTPND);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+ } else if (k > PCH_FIFO_THRESH) {
+ pch_can_int_clr(priv, k);
+ } else if (k == PCH_FIFO_THRESH) {
+ int cnt;
+ for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+ pch_can_int_clr(priv, cnt+1);
+ }
+RX_NEXT:
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
+ reg = ioread32(&priv->regs->if1_mcont);
+ }
+
+ return rcv_pkts;
+}
+static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ u32 dlc;
+ u32 int_stat;
+ int rcv_pkts = 0;
+ u32 reg_stat;
+ unsigned long flags;
+
+ int_stat = pch_can_int_pending(priv);
+ if (!int_stat)
+ return 0;
+
+INT_STAT:
+ if (int_stat == CAN_STATUS_INT) {
+ reg_stat = ioread32(&priv->regs->stat);
+ if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
+ if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
+ pch_can_error(ndev, reg_stat);
+ }
+
+ if (reg_stat & PCH_TX_OK) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq,
+ ioread32(&priv->regs->intr));
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+ }
+
+ if (reg_stat & PCH_RX_OK)
+ pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ }
+
+MSG_OBJ:
+ if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ rcv_pkts = pch_can_rx_normal(ndev, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (rcv_pkts < 0)
+ return 0;
+ } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
+ if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+ /* Handle transmission interrupt */
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+ &priv->regs->if2_cmask);
+ dlc = ioread32(&priv->regs->if2_mcont) &
+ CAN_IF_MCONT_DLC;
+ pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (dlc > 8)
+ dlc = 8;
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ }
+ }
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ else if (int_stat >= 1 && int_stat <= 32)
+ goto MSG_OBJ;
+
+ napi_complete(napi);
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+
+ return rcv_pkts;
+}
+
+static int pch_set_bittiming(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 canbit;
+ u32 bepe;
+ u32 brp;
+
+ /* Setting the CCE bit for accessing the Can Timing register. */
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+
+ brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
+ canbit = brp & MSK_BITT_BRP;
+ canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
+ canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
+ canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
+ bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+ iowrite32(canbit, &priv->regs->bitt);
+ iowrite32(bepe, &priv->regs->brpe);
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+
+ return 0;
+}
+
+static void pch_can_start(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ if (priv->can.state != CAN_STATE_STOPPED)
+ pch_can_reset(priv);
+
+ pch_set_bittiming(ndev);
+ pch_can_set_optmode(priv);
+
+ pch_can_tx_enable_all(priv);
+ pch_can_rx_enable_all(priv);
+
+ /* Setting the CAN to run mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return;
+}
+
+static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ pch_can_start(ndev);
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int pch_can_open(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ int retval;
+
+ retval = pci_enable_msi(priv->dev);
+ if (retval) {
+ dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
+ priv->use_msi = 0;
+ } else {
+ dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
+ priv->use_msi = 1;
+ }
+
+ /* Regsitering the interrupt. */
+ retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
+ ndev->name, ndev);
+ if (retval) {
+ dev_err(&ndev->dev, "request_irq failed.\n");
+ goto req_irq_err;
+ }
+
+ /* Open common can device */
+ retval = open_candev(ndev);
+ if (retval) {
+ dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+ goto err_open_candev;
+ }
+
+ pch_can_init(priv);
+ pch_can_start(ndev);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_open_candev:
+ free_irq(priv->dev->irq, ndev);
+req_irq_err:
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+
+ pch_can_release(priv);
+
+ return retval;
+}
+
+static int pch_close(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ pch_can_release(priv);
+ free_irq(priv->dev->irq, ndev);
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+ close_candev(ndev);
+ priv->can.state = CAN_STATE_STOPPED;
+ return 0;
+}
+
+static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
+{
+ u32 buffer_status = 0;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ /* Getting the message object status. */
+ buffer_status = (u32) pch_can_get_buffer_status(priv);
+
+ return buffer_status & obj_id;
+}
+
+
+static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int i, j;
+ unsigned long flags;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ int tx_buffer_avail = 0;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
+ while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
+ PCH_RX_OBJ_NUM)))
+ udelay(500);
+
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
+ tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+ } else {
+ tx_buffer_avail = priv->tx_obj;
+ }
+ priv->tx_obj++;
+
+ /* Attaining the lock. */
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ /* Reading the Msg Obj from the Msg RAM to the Interface register. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ /* Setting the CMASK register. */
+ pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+
+ /* If ID extended is set. */
+ pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
+ pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+ if (cf->can_id & CAN_EFF_FLAG) {
+ pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+ } else {
+ pch_can_bit_set(&priv->regs->if2_id1, 0);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ (cf->can_id & CAN_SFF_MASK) << 2);
+ }
+
+ /* If remote frame has to be transmitted.. */
+ if (cf->can_id & CAN_RTR_FLAG)
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ iowrite32(le32_to_cpu(cf->data[i++]),
+ (&priv->regs->if2_dataa1) + j*4);
+ if (i == cf->can_dlc)
+ break;
+ iowrite32(le32_to_cpu(cf->data[i++] << 8),
+ (&priv->regs->if2_dataa1) + j*4);
+ }
+
+ can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+
+ /* Updating the size of the data. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
+ pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+
+ /* Clearing IntPend, NewDat & TxRqst */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+
+ /* Setting NewDat, TxRqst bits */
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops pch_can_netdev_ops = {
+ .ndo_open = pch_can_open,
+ .ndo_stop = pch_close,
+ .ndo_start_xmit = pch_xmit,
+};
+
+static void __devexit pch_can_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(priv->ndev);
+ free_candev(priv->ndev);
+ pci_iounmap(pdev, priv->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pch_can_reset(priv);
+}
+
+#ifdef CONFIG_PM
+static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return value. */
+ u32 buf_stat; /* Variable for reading the transmit buffer status. */
+ u32 counter = 0xFFFFFF;
+
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ /* Stop the CAN controller */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Indicate that we are aboutto/in suspend */
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ /* Waiting for all transmission to complete. */
+ while (counter) {
+ buf_stat = pch_can_get_buffer_status(priv);
+ if (!buf_stat)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+
+ /* Save interrupt configuration and then disable them */
+ pch_can_get_int_enables(priv, &(priv->int_enables));
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Save Tx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_get_tx_enable(priv, i + 1,
+ &(priv->tx_enable[i]));
+ }
+
+ /* Disable all Transmit buffers */
+ pch_can_tx_disable_all(priv);
+
+ /* Save Rx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ pch_can_get_rx_enable(priv, i + 1,
+ &(priv->rx_enable[i]));
+ pch_can_get_rx_buffer_link(priv, i + 1,
+ &(priv->rx_link[i]));
+ }
+ }
+
+ /* Disable all Receive buffers */
+ pch_can_rx_disable_all(priv);
+ retval = pci_save_state(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_save_state failed.\n");
+ } else {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ }
+
+ return retval;
+}
+
+static int pch_can_resume(struct pci_dev *pdev)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return variable. */
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_enable_device failed.\n");
+ return retval;
+ }
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Disabling all interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Setting the CAN device in Stop Mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Configuring the transmit and receive buffers. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Restore the CAN state */
+ pch_set_bittiming(dev);
+
+ /* Listen/Active */
+ pch_can_set_optmode(priv);
+
+ /* Enabling the transmit buffer. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ pch_can_set_tx_enable(priv, i + 1,
+ priv->tx_enable[i]);
+ }
+ }
+
+ /* Configuring the receive buffer and enabling them. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ /* Restore buffer link */
+ pch_can_set_rx_buffer_link(priv, i + 1,
+ priv->rx_link[i]);
+
+ /* Restore buffer enables */
+ pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
+ }
+ }
+
+ /* Enable CAN Interrupts */
+ pch_can_set_int_custom(priv);
+
+ /* Restore Run Mode */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ return retval;
+}
+#else
+#define pch_can_suspend NULL
+#define pch_can_resume NULL
+#endif
+
+static int pch_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
+ bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+
+ return 0;
+}
+
+static int __devinit pch_can_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ struct pch_can_priv *priv;
+ int rc;
+ int index;
+ void __iomem *addr;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
+ goto probe_exit_endev;
+ }
+
+ rc = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
+ goto probe_exit_pcireq;
+ }
+
+ addr = pci_iomap(pdev, 1, 0);
+ if (!addr) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "Failed pci_iomap\n");
+ goto probe_exit_ipmap;
+ }
+
+ ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+ if (!ndev) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "Failed alloc_candev\n");
+ goto probe_exit_alloc_candev;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->dev = pdev;
+ priv->can.bittiming_const = &pch_can_bittiming_const;
+ priv->can.do_set_mode = pch_can_do_set_mode;
+ priv->can.do_get_berr_counter = pch_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_LOOPBACK;
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+
+ ndev->irq = pdev->irq;
+ ndev->flags |= IFF_ECHO;
+
+ pci_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &pch_can_netdev_ops;
+
+ priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
+ for (index = 0; index < PCH_RX_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_RX;
+
+ for (index = index; index < PCH_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_TX;
+
+ netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+
+ rc = register_candev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
+ goto probe_exit_reg_candev;
+ }
+
+ return 0;
+
+probe_exit_reg_candev:
+ free_candev(ndev);
+probe_exit_alloc_candev:
+ pci_iounmap(pdev, addr);
+probe_exit_ipmap:
+ pci_release_regions(pdev);
+probe_exit_pcireq:
+ pci_disable_device(pdev);
+probe_exit_endev:
+ return rc;
+}
+
+static struct pci_driver pch_can_pcidev = {
+ .name = "pch_can",
+ .id_table = pch_pci_tbl,
+ .probe = pch_can_probe,
+ .remove = __devexit_p(pch_can_remove),
+ .suspend = pch_can_suspend,
+ .resume = pch_can_resume,
+};
+
+static int __init pch_can_pci_init(void)
+{
+ return pci_register_driver(&pch_can_pcidev);
+}
+module_init(pch_can_pci_init);
+
+static void __exit pch_can_pci_exit(void)
+{
+ pci_unregister_driver(&pch_can_pcidev);
+}
+module_exit(pch_can_pci_exit);
+
+MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ae3505afd682..6fdc031daaae 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -58,4 +58,16 @@ config CAN_PLX_PCI
- esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config CAN_TSCAN1
+ tristate "TS-CAN1 PC104 boards"
+ depends on ISA
+ help
+ This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+ http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ The driver supports multiple boards and automatically configures them:
+ PLD IO base addresses are read from jumpers JP1 and JP2,
+ IRQ numbers are read from jumpers JP4 and JP5,
+ SJA1000 IO base addresses are chosen heuristically (first that works).
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index ce924553995d..2c591eb321c7 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644
index 000000000000..9756099a883a
--- /dev/null
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -0,0 +1,216 @@
+/*
+ * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
+ *
+ * Copyright 2010 Andre B. Oliveira
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * References:
+ * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
+ * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "sja1000.h"
+
+MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
+MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
+MODULE_LICENSE("GPL");
+
+/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
+#define TSCAN1_MAXDEV 4
+
+/* PLD registers address offsets */
+#define TSCAN1_ID1 0
+#define TSCAN1_ID2 1
+#define TSCAN1_VERSION 2
+#define TSCAN1_LED 3
+#define TSCAN1_PAGE 4
+#define TSCAN1_MODE 5
+#define TSCAN1_JUMPERS 6
+
+/* PLD board identifier registers magic values */
+#define TSCAN1_ID1_VALUE 0xf6
+#define TSCAN1_ID2_VALUE 0xb9
+
+/* PLD mode register SJA1000 IO enable bit */
+#define TSCAN1_MODE_ENABLE 0x40
+
+/* PLD jumpers register bits */
+#define TSCAN1_JP4 0x10
+#define TSCAN1_JP5 0x20
+
+/* PLD IO base addresses start */
+#define TSCAN1_PLD_ADDRESS 0x150
+
+/* PLD register space size */
+#define TSCAN1_PLD_SIZE 8
+
+/* SJA1000 register space size */
+#define TSCAN1_SJA1000_SIZE 32
+
+/* SJA1000 crystal frequency (16MHz) */
+#define TSCAN1_SJA1000_XTAL 16000000
+
+/* SJA1000 IO base addresses */
+static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
+ 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
+};
+
+/* Read SJA1000 register */
+static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+/* Write SJA1000 register */
+static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
+static int __devinit tscan1_probe(struct device *dev, unsigned id)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+ int irq, i;
+
+ pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
+ if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
+ return -EBUSY;
+
+ if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
+ inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENODEV;
+ }
+
+ switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
+ case TSCAN1_JP4:
+ irq = 6;
+ break;
+ case TSCAN1_JP5:
+ irq = 7;
+ break;
+ case TSCAN1_JP4 | TSCAN1_JP5:
+ irq = 5;
+ break;
+ default:
+ dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -EINVAL;
+ }
+
+ netdev = alloc_sja1000dev(0);
+ if (!netdev) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, netdev);
+ SET_NETDEV_DEV(netdev, dev);
+
+ netdev->base_addr = pld_base;
+ netdev->irq = irq;
+
+ priv = netdev_priv(netdev);
+ priv->read_reg = tscan1_read;
+ priv->write_reg = tscan1_write;
+ priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
+ priv->cdr = CDR_CBP | CDR_CLK_OFF;
+ priv->ocr = OCR_TX0_PUSHPULL;
+
+ /* Select the first SJA1000 IO address that is free and that works */
+ for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
+ sja1000_base = tscan1_sja1000_addresses[i];
+ if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
+ dev_name(dev)))
+ continue;
+
+ /* Set SJA1000 IO base address and enable it */
+ outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
+
+ priv->reg_base = (void __iomem *)sja1000_base;
+ if (!register_sja1000dev(netdev)) {
+ /* SJA1000 probe succeeded; turn LED off and return */
+ outb(0, pld_base + TSCAN1_LED);
+ netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
+ pld_base, sja1000_base, irq);
+ return 0;
+ }
+
+ /* SJA1000 probe failed; release and try next address */
+ outb(0, pld_base + TSCAN1_MODE);
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ }
+
+ dev_err(dev, "failed to assign SJA1000 IO address\n");
+ dev_set_drvdata(dev, NULL);
+ free_sja1000dev(netdev);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENXIO;
+}
+
+static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+
+ netdev = dev_get_drvdata(dev);
+ unregister_sja1000dev(netdev);
+ dev_set_drvdata(dev, NULL);
+
+ priv = netdev_priv(netdev);
+ pld_base = netdev->base_addr;
+ sja1000_base = (unsigned long)priv->reg_base;
+
+ outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
+
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+
+ free_sja1000dev(netdev);
+
+ return 0;
+}
+
+static struct isa_driver tscan1_isa_driver = {
+ .probe = tscan1_probe,
+ .remove = __devexit_p(tscan1_remove),
+ .driver = {
+ .name = "tscan1",
+ },
+};
+
+static int __init tscan1_init(void)
+{
+ return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
+}
+module_init(tscan1_init);
+
+static void __exit tscan1_exit(void)
+{
+ isa_unregister_driver(&tscan1_isa_driver);
+}
+module_exit(tscan1_exit);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index a04ce6a5f637..4e3c12371aae 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
}
if (!(adap->flags & QUEUES_BOUND)) {
- err = bind_qsets(adap);
- if (err) {
- CH_ERR(adap, "failed to bind qsets, err %d\n", err);
+ int ret = bind_qsets(adap);
+
+ if (ret < 0) {
+ CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
free_irq_resources(adap);
+ err = ret;
goto out;
}
adap->flags |= QUEUES_BOUND;
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index eaa49e4119f1..3d4253d311eb 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -281,7 +281,6 @@ struct sge_rspq;
struct port_info {
struct adapter *adapter;
- struct vlan_group *vlan_grp;
u16 viid;
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 87054e0a5746..f17703f410b3 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
- pi->vlan_grp != NULL, true);
+ !!(dev->features & NETIF_F_HW_VLAN_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
static int set_flags(struct net_device *dev, u32 flags)
{
- return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
+ int err;
+ unsigned long old_feat = dev->features;
+
+ err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+ if (err)
+ return err;
+
+ if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
+ const struct port_info *pi = netdev_priv(dev);
+
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
+ true);
+ if (err)
+ dev->features = old_feat;
+ }
+ return err;
}
static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct port_info *pi = netdev_priv(dev);
-
- pi->vlan_grp = grp;
- t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
- grp != NULL, true);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
- .ndo_vlan_rx_register = vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = RX_CSO;
pi->port_id = i;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
@@ -3730,6 +3736,7 @@ static int __devinit init_one(struct pci_dev *pdev,
__set_bit(i, &adapter->registered_device_map);
adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
+ netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 9967f3debce7..17022258ed68 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
- struct port_info *pi = netdev_priv(rxq->rspq.netdev);
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp)) {
- ret = vlan_gro_frags(&rxq->rspq.napi, grp,
- ntohs(pkt->vlan));
- goto stats;
- }
}
ret = napi_gro_frags(&rxq->rspq.napi);
-stats: if (ret == GRO_HELD)
+ if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp))
- vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
- else
- dev_kfree_skb_any(skb);
- } else
- netif_receive_skb(skb);
-
+ }
+ netif_receive_skb(skb);
return 0;
}
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index a117f2a0252e..4686c3983fc3 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -521,7 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
-void e1000_reinit_safe(struct e1000_adapter *adapter)
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1321cb6401cf..8e745e74828d 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -396,7 +396,9 @@ struct ehea_port_res {
int swqe_ll_count;
u32 swqe_id_counter;
u64 tx_packets;
+ u64 tx_bytes;
u64 rx_packets;
+ u64 rx_bytes;
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index bb7d306fb446..182b2a7be8dc 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
- u64 hret, rx_packets, tx_packets;
+ u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i;
memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0;
- for (i = 0; i < port->num_def_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets;
+ rx_bytes += port->port_res[i].rx_bytes;
+ }
tx_packets = 0;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
+ tx_bytes += port->port_res[i].tx_bytes;
+ }
stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
- stats->rx_bytes = cb2->rxo;
- stats->tx_bytes = cb2->txo;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
out_herr:
@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3;
+ u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
processed_rq3++;
}
+ processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb);
} else {
pr->p_stats.poll_receive_errors++;
@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
+ pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO;
+ u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+ tx_bytes = pr->tx_bytes;
+ tx_packets = pr->tx_packets;
+ rx_bytes = pr->rx_bytes;
+ rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res));
+ pr->tx_bytes = rx_bytes;
+ pr->tx_packets = tx_packets;
+ pr->rx_bytes = rx_bytes;
+ pr->rx_packets = rx_packets;
+
pr->port = port;
spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2267,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
+ if (vlan_tx_tag_present(skb)) {
+ swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+ swqe->vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ pr->tx_packets++;
+ pr->tx_bytes += skb->len;
+
if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2305,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
pr->swqe_id_counter += 1;
- if (vlan_tx_tag_present(skb)) {
- swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
- swqe->vlan_tag = vlan_tx_tag_get(skb);
- }
-
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
ehea_post_swqe(pr->qp, swqe);
- pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4c4cc80ec0a1..49e4ce1246a7 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2511,7 +2511,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else
dev_kfree_skb_any(skb);
@@ -2594,7 +2594,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- skb = __skb_dequeue(&priv->rx_recycle);
+ skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);
@@ -2750,7 +2750,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d7a975ee2add..d85edf3119c2 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1623,12 +1623,12 @@ err_out:
return rc;
}
-#ifdef CONFIG_PM
static void
jme_set_100m_half(struct jme_adapter *jme)
{
u32 bmcr, tmp;
+ jme_phy_on(jme);
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
phylink = jme_linkstat_from_phy(jme);
}
}
-#endif
static inline void
jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
}
+static void
+jme_powersave_phy(struct jme_adapter *jme)
+{
+ if (jme->reg_pmcs) {
+ jme_set_100m_half(jme);
+
+ if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ jme_wait_link(jme);
+
+ jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ } else {
+ jme_phy_off(jme);
+ }
+}
+
static int
jme_close(struct net_device *netdev)
{
@@ -2991,6 +3005,16 @@ jme_remove_one(struct pci_dev *pdev)
}
+static void
+jme_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme_powersave_phy(jme);
+ pci_pme_active(pdev, true);
+}
+
#ifdef CONFIG_PM
static int
jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3052,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
tasklet_hi_enable(&jme->rxempty_task);
pci_save_state(pdev);
- if (jme->reg_pmcs) {
- jme_set_100m_half(jme);
-
- if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
- jme_wait_link(jme);
-
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
- pci_enable_wake(pdev, PCI_D3cold, true);
- } else {
- jme_phy_off(jme);
- }
- pci_set_power_state(pdev, PCI_D3cold);
+ jme_powersave_phy(jme);
+ pci_enable_wake(jme->pdev, PCI_D3hot, true);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -3087,6 +3101,7 @@ static struct pci_driver jme_driver = {
.suspend = jme_suspend,
.resume = jme_resume,
#endif /* CONFIG_PM */
+ .shutdown = jme_shutdown,
};
static int __init
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 4297f6e8c4bc..f69e73e2191e 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
- if (work_done < budget)
+ if (work_done < budget) {
napi_complete(napi);
- /*
- * We've done what we can to clean the buffers. Make sure we
- * get notified when new packets arrive.
- */
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ /*
+ * We've done what we can to clean the buffers. Make sure we
+ * get notified when new packets arrive.
+ */
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
/* TODO: Handle errors */
@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_RX_INT_FLAGS) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers. The
+ * scheduling call may fail if the poll routine
+ * is already scheduled, so disable interrupts
+ * now.
+ */
+ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+
if (napi_schedule_prep(&bp->napi)) {
- /*
- * There's no point taking any more interrupts
- * until we have processed the buffers
- */
- macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
dev_dbg(&bp->pdev->dev,
"scheduling RX softirq\n");
__napi_schedule(&bp->napi);
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 143906417048..f6e0d40cd876 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
return 0;
}
+static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
+{
+ struct mlx4_en_dev *endev = ctx;
+
+ return endev->pndev[port];
+}
+
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
enum mlx4_dev_event event, int port)
{
@@ -282,9 +289,11 @@ err_free_res:
}
static struct mlx4_interface mlx4_en_interface = {
- .add = mlx4_en_add,
- .remove = mlx4_en_remove,
- .event = mlx4_en_event,
+ .add = mlx4_en_add,
+ .remove = mlx4_en_remove,
+ .event = mlx4_en_event,
+ .get_dev = mlx4_en_get_netdev,
+ .protocol = MLX4_PROTOCOL_EN,
};
static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 79478bd4211a..6d6806b361e3 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ int idx;
if (!priv->vlgrp)
return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
+ if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
+ en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+
}
static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ int idx;
if (!priv->vlgrp)
return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
+ if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
+ mlx4_unregister_vlan(mdev->dev, priv->port, idx);
+ else
+ en_err(priv, "could not find vid %d in cache\n", vid);
+
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
if (err)
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index aa3ef2aee5bf..7f5a3221e0c1 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -127,8 +127,8 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
- context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
+ context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index f6511aa2b7df..092e814b1981 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,7 +36,8 @@
#define SET_PORT_GEN_ALL_VALID 0x7
-#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_PROMISC_EN_SHIFT 31
+#define SET_PORT_PROMISC_MODE_SHIFT 30
enum {
MLX4_CMD_SET_VLAN_FLTR = 0x47,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b716e1a1b298..b68eee2414c2 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[20] = "Address vector port checking support",
[21] = "UD multicast support",
[24] = "Demand paging support",
- [25] = "Router support"
+ [25] = "Router support",
+ [30] = "IBoE support"
};
int i;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b07e4dee80aa..02393fdf44c1 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B);
}
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
-{
- struct mlx4_cmd_mailbox *mailbox;
- __be64 *inbox;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- inbox = mailbox->buf;
-
- inbox[0] = cpu_to_be64(virt);
- inbox[1] = cpu_to_be64(dma_addr);
-
- err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
-
- if (!err)
- mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
- (unsigned long long) dma_addr, (unsigned long long) virt);
-
- return err;
-}
-
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index ab56a2f89b65..b10c07a1dc1a 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 555067802751..73c94fcdfddf 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
mutex_unlock(&intf_mutex);
}
+
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 569fa3df381f..782f11d8fa71 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
@@ -1310,7 +1310,7 @@ static int __init mlx4_verify_params(void)
return -1;
}
- if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+ if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
return -1;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 1fc16ab7ad2f..dfed6a07c2d7 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -475,6 +475,7 @@ struct mlx4_en_priv {
char *mc_addrs;
int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
+ int vids[128];
};
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 606aa58afdea..451339559bdc 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
goto out;
}
}
+
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -182,6 +188,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
return err;
}
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int i;
+
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
+ if (table->refs[i] &&
+ (vid == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* VLAN already registered, increase reference count */
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
+
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -205,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
}
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1bb16cb79433..7670aac0e93f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
*
* Returns 0 on success on < 0 on error.
*/
-int phy_clear_interrupt(struct phy_device *phydev)
+static int phy_clear_interrupt(struct phy_device *phydev)
{
int err = 0;
@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success on < 0 on error.
*/
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
int err = 0;
@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
* duplexes. Drop down by one in this order: 1000/FULL,
* 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
*/
-void phy_sanitize_settings(struct phy_device *phydev)
+static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
int idx;
@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
}
-EXPORT_SYMBOL(phy_sanitize_settings);
/**
* phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_enable_interrupts(struct phy_device *phydev)
+static int phy_enable_interrupts(struct phy_device *phydev)
{
int err;
@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
return err;
}
-EXPORT_SYMBOL(phy_enable_interrupts);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_disable_interrupts(struct phy_device *phydev)
+static int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
@@ -574,7 +572,6 @@ phy_err:
return err;
}
-EXPORT_SYMBOL(phy_disable_interrupts);
/**
* phy_start_interrupts - request and enable interrupts for a PHY device
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 16ddc77313cb..993c52c82aeb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface);
+
/*
* Creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+static struct phy_device* phy_device_create(struct mii_bus *bus,
+ int addr, int phy_id)
{
struct phy_device *dev;
@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
return dev;
}
-EXPORT_SYMBOL(phy_device_create);
/**
* get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
* If you want to monitor your own link state, don't call
* this function.
*/
-void phy_prepare_link(struct phy_device *phydev,
+static void phy_prepare_link(struct phy_device *phydev,
void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface)
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* (dev_flags and interface) */
return phy_init_hw(phydev);
}
-EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
-int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
int oldadv, adv;
@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
return changed;
}
-EXPORT_SYMBOL(genphy_config_advert);
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
* to the values in phydev. Assumes that the values are valid.
* Please see phy_sanitize_settings().
*/
-int genphy_setup_forced(struct phy_device *phydev)
+static int genphy_setup_forced(struct phy_device *phydev)
{
int err;
int ctl = 0;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 26c37d3a5868..8ecc170c9b74 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -146,11 +146,13 @@
#define MAX_CMD_DESCRIPTORS 1024
#define MAX_RCV_DESCRIPTORS_1G 4096
#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_RCV_DESCRIPTORS_VF 2048
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define DEFAULT_RCV_DESCRIPTORS_VF 1024
#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_LOOPBACK_TEST 2
#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
struct qlcnic_filter {
@@ -970,6 +973,8 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
+ u16 max_rxd;
+ u16 max_jumbo_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
-#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 25e93a53fca0..ec21d24015c4 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring->tx_pending = adapter->num_txd;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
- } else {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
+ ring->rx_max_pending = adapter->max_rxd;
+ ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
- u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
u16 num_rxd, num_jumbo_rxd, num_txd;
-
if (ring->rx_mini_pending)
return -EOPNOTSUPP;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
- max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
- MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+ MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
- MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+ MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+ "rx jumbo");
num_txd = qlcnic_validate_ringparam(ring->tx_pending,
MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index f047c7c48314..7a298cdf9ab3 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
-
if (adapter->ahw.port_type == QLCNIC_XGBE) {
- adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ } else {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ }
+
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
} else if (adapter->ahw.port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = !!use_msi_x;
@@ -1860,6 +1870,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
+
+ if (jiffies >
+ (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, src_addr, vlan_id,
+ tx_ring);
tmp_fil->ftime = jiffies;
return;
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a478786840a6..22821398fc63 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
int ql_core_dump(struct ql_adapter *qdev,
struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-void qlge_set_multicast_list(struct net_device *ndev);
-#if 1
-#define QL_ALL_DUMP
-#define QL_REG_DUMP
-#define QL_DEV_DUMP
-#define QL_CB_DUMP
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
/* #define QL_IB_DUMP */
/* #define QL_OB_DUMP */
-#endif
#ifdef QL_REG_DUMP
extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index ba0053d8515e..c30e0fe55a31 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+static int ql_wol(struct ql_adapter *qdev);
+static void qlge_set_multicast_list(struct net_device *ndev);
+
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
}
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+ qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
+
+ if (qdev->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(qdev->vlgrp, vid))
+ continue;
+ qlge_vlan_rx_add_vid(qdev->ndev, vid);
+ }
+ }
+}
+
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
"MAC address %pM\n", ndev->dev_addr);
}
-int ql_wol(struct ql_adapter *qdev)
+static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
+ /* Restore vlan setting. */
+ qlge_restore_vlan(qdev);
+
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
return &ndev->stats;
}
-void qlge_set_multicast_list(struct net_device *ndev)
+static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index f84e8570c7cb..0e7c7c7ee164 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
return status;
}
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
-int ql_mb_idc_ack(struct ql_adapter *qdev)
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
-int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index a9ae505e1baf..66c2f1a01963 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
lp->rx_error_count = 0;
lp->rx_error_dpc_count = 0;
lp->rx_session_id[0] = 0x50;
- lp->rx_session_id[0] = 0x48;
- lp->rx_session_id[0] = 0x44;
- lp->rx_session_id[0] = 0x42;
+ lp->rx_session_id[1] = 0x48;
+ lp->rx_session_id[2] = 0x44;
+ lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9265315baa0b..3a0cc63428ee 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
- err = -EAGAIN;
+ return -EAGAIN;
}
err = init_seeq(dev, sp, sregs);
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ac279fad9d45..ab9e3b785b5b 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
return 0;
}
-
-/* VJ header compression */
-EXPORT_SYMBOL(slhc_init);
-EXPORT_SYMBOL(slhc_free);
-EXPORT_SYMBOL(slhc_remember);
-EXPORT_SYMBOL(slhc_compress);
-EXPORT_SYMBOL(slhc_uncompress);
-EXPORT_SYMBOL(slhc_toss);
-
#else /* CONFIG_INET */
-
int
slhc_toss(struct slcompress *comp)
{
@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
return NULL;
}
+
+#endif /* CONFIG_INET */
+
+/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
-#endif /* CONFIG_INET */
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index a8e5856ce882..64bfdae5956f 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2075,7 +2075,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
} else {
/* Try reading mac address from device. if EEPROM is present
* it will already have been set */
- smsc911x_read_mac_address(dev);
+ smsc_get_mac(dev);
if (is_valid_ether_addr(dev->dev_addr)) {
/* eeprom values are valid so use them */
@@ -2176,6 +2176,7 @@ static struct platform_driver smsc911x_driver = {
/* Entry point for loading the module */
static int __init smsc911x_init_module(void)
{
+ SMSC_INITIALIZE();
return platform_driver_register(&smsc911x_driver);
}
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 016360c65ce2..52f38e12a879 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -394,4 +394,15 @@
#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
LPA_PAUSE_ASYM)
+/*
+ * Provide hooks to let the arch add to the initialisation procedure
+ * and to override the source of the MAC address.
+ */
+#define SMSC_INITIALIZE() do {} while (0)
+#define smsc_get_mac(dev) smsc911x_read_mac_address((dev))
+
+#ifdef CONFIG_SMSC911X_ARCH_HOOKS
+#include <asm/smsc911x.h>
+#endif
+
#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 852e917778f8..30ccbb6d097a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
!((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
+ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
spin_lock_bh(&tp->lock);
- if (wol->wolopts & WAKE_MAGIC) {
+ if (device_may_wakeup(dp))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, true);
- } else {
+ else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, false);
- }
spin_unlock_bh(&tp->lock);
+
return 0;
}
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 663b8860a531..793020347e54 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
tmp = schedule_timeout_interruptible(tmp);
} while(time_after(tmp, jiffies));
#else
- udelay(time);
+ mdelay(time / 1000);
#endif
}
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 1cc67138adbf..5b83c3f35f47 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -24,10 +24,6 @@
3XP Processor. It has been tested on x86 and sparc64.
KNOWN ISSUES:
- *) The current firmware always strips the VLAN tag off, even if
- we tell it not to. You should filter VLANs at the switch
- as a workaround (good practice in any event) until we can
- get this fixed.
*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
issue. Hopefully 3Com will fix it.
*) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@ struct typhoon {
struct pci_dev * pdev;
struct net_device * dev;
struct napi_struct napi;
- spinlock_t state_lock;
- struct vlan_group * vlgrp;
struct basic_ring rxHiRing;
struct basic_ring rxBuffRing;
struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@ out:
return err;
}
-static void
-typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct typhoon *tp = netdev_priv(dev);
- struct cmd_desc xp_cmd;
- int err;
-
- spin_lock_bh(&tp->state_lock);
- if(!tp->vlgrp != !grp) {
- /* We've either been turned on for the first time, or we've
- * been turned off. Update the 3XP.
- */
- if(grp)
- tp->offload |= TYPHOON_OFFLOAD_VLAN;
- else
- tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
-
- /* If the interface is up, the runtime is running -- and we
- * must be up for the vlan core to call us.
- *
- * Do the command outside of the spin lock, as it is slow.
- */
- INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
- TYPHOON_CMD_SET_OFFLOAD_TASKS);
- xp_cmd.parm2 = tp->offload;
- xp_cmd.parm3 = tp->offload;
- spin_unlock_bh(&tp->state_lock);
- err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
- netdev_err(tp->dev, "vlan offload error %d\n", -err);
- spin_lock_bh(&tp->state_lock);
- }
-
- /* now make the change visible */
- tp->vlgrp = grp;
- spin_unlock_bh(&tp->state_lock);
-}
-
static inline void
typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
u32 ring_dma)
@@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
- cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
+ cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
@@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
filter |= TYPHOON_RX_FILTER_MCAST_HASH;
}
- INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = filter;
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
}
@@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
return 1;
}
+static int
+typhoon_set_flags(struct net_device *dev, u32 data)
+{
+ /* There's no way to turn off the RX VLAN offloading and stripping
+ * on the current 3XP firmware -- it does not respect the offload
+ * settings -- so we only allow the user to toggle the TX processing.
+ */
+ if (!(data & ETH_FLAG_RXVLAN))
+ return -EINVAL;
+
+ return ethtool_op_set_flags(dev, data,
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+}
+
static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
@@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
.get_ringparam = typhoon_get_ringparam,
+ .set_flags = typhoon_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int
@@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+ tp->offload |= TYPHOON_OFFLOAD_VLAN;
spin_lock_init(&tp->command_lock);
- spin_lock_init(&tp->state_lock);
/* Force the writes to the shared memory area out before continuing. */
wmb();
@@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
tp->rxHiRing.lastWrite = 0;
tp->rxBuffRing.lastWrite = 0;
tp->cmdRing.lastWrite = 0;
- tp->cmdRing.lastWrite = 0;
+ tp->respRing.lastWrite = 0;
tp->txLoRing.lastRead = 0;
tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
} else
skb_checksum_none_assert(new_skb);
- spin_lock(&tp->state_lock);
- if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
- vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
- ntohl(rx->vlanTag) & 0xffff);
- else
- netif_receive_skb(new_skb);
- spin_unlock(&tp->state_lock);
+ if (rx->rxStatus & TYPHOON_RX_VLAN)
+ __vlan_hwaccel_put_tag(new_skb,
+ ntohl(rx->vlanTag) & 0xffff);
+ netif_receive_skb(new_skb);
received++;
budget--;
@@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
- spin_lock_bh(&tp->state_lock);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- spin_unlock_bh(&tp->state_lock);
if(err < 0)
goto error_out;
@@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
if(!netif_running(dev))
return 0;
- spin_lock_bh(&tp->state_lock);
- if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
- spin_unlock_bh(&tp->state_lock);
- netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
- return -EBUSY;
- }
- spin_unlock_bh(&tp->state_lock);
+ /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
+ if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
@@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = typhoon_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
- .ndo_vlan_rx_register = typhoon_vlan_rx_register,
};
static int __devinit
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 37108fb226d3..969c751ee404 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
/* features */
enum {
- UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
- UPT1_F_RSS = 0x0002,
- UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
- UPT1_F_LRO = 0x0008,
+ UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
+ UPT1_F_RSS = cpu_to_le64(0x0002),
+ UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
+ UPT1_F_LRO = cpu_to_le64(0x0008),
};
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index ca7727b940ad..4d84912c99ba 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
-#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
-#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
- * filters */
+#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
+ * filters */
struct Vmxnet3_PM_PktFilter {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f60e0e3097b..e3658e10db39 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
adapter->vlan_grp = grp;
/* update FEATURES to device */
- set_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
/*
@@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = NULL;
- if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
+ if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
int i;
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
/* update FEATURES to device */
- reset_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
@@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* set up feature flags */
if (adapter->rxcsum)
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
+ devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->lro) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
+ devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
adapter->vlan_grp) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
}
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
@@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
in_dev_put(in_dev);
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
@@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
+ adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
netif_device_attach(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e4b5a89165a..b79070bcc92e 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->rxcsum = val;
if (netif_running(netdev)) {
if (val)
- set_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXCSUM;
else
- reset_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXCSUM;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures |=
- cpu_to_le64(UPT1_F_LRO);
+ UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
- cpu_to_le64(~UPT1_F_LRO);
+ ~UPT1_F_LRO;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c88ea5cbba0d..8a2f4712284c 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -301,8 +301,8 @@ struct vmxnet3_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- u8 *hw_addr0; /* for BAR 0 */
- u8 *hw_addr1; /* for BAR 1 */
+ u8 __iomem *hw_addr0; /* for BAR 0 */
+ u8 __iomem *hw_addr1; /* for BAR 1 */
/* feature control */
bool rxcsum;
@@ -353,21 +353,6 @@ struct vmxnet3_adapter {
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
-static inline void set_flag_le16(__le16 *data, u16 flag)
-{
- *data = cpu_to_le16(le16_to_cpu(*data) | flag);
-}
-
-static inline void set_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) | flag);
-}
-
-static inline void reset_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
-}
-
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 0e6db5935609..906a3ca3676b 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -20,6 +20,179 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
+static enum vxge_hw_status
+__vxge_hw_fifo_create(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ struct vxge_hw_fifo_attr *attr);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_abort(
+ struct __vxge_hw_fifo *fifoh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_reset(
+ struct __vxge_hw_fifo *ringh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
+ u32 size);
+
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool_entry *entry);
+
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle);
+
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool *blockpool,
+ u32 pool_size,
+ u32 pool_max);
+
+static void
+__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
+
+static void *
+__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+static void
+__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
+ void *memblock,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+
+static struct __vxge_hw_channel*
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+ enum __vxge_hw_channel_type type, u32 length,
+ u32 per_dtr_space, void *userdata);
+
+static void
+__vxge_hw_channel_free(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_reset(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+
+static void
+__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(
+ u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(
+ void __iomem *reg,
+ u64 mask, u32 max_millis);
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+ u64 mask, u32 max_millis)
+{
+ __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+ wmb();
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+ wmb();
+
+ return __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static struct vxge_hw_mempool*
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
+ u32 item_size, u32 private_size, u32 items_initial,
+ u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata);
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats);
+
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+
+static u64
+__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg);
+
+static u32
+__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+
+
+static enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+ u32 operation, u32 offset, u64 *stat);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+
/*
* __vxge_hw_channel_allocate - Allocate memory for channel
* This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
* Will poll certain register for specified amount of time.
* Will poll until masked bit is not cleared.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
@@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
{
enum vxge_hw_status status;
@@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
*/
-struct vxge_hw_toc_reg __iomem *
+static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0)
{
u64 val64;
@@ -779,7 +952,7 @@ exit:
* vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
* Get the Statistics on aggregate port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats)
{
@@ -814,7 +987,7 @@ exit:
* vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
* Get the Statistics on port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_port_stats *port_stats)
{
@@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
return 0;
#endif
}
-/*
- * vxge_hw_device_debug_mask_get - Get the debug mask
- * This routine returns the current debug mask set
- */
-u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return 0;
- return hldev->debug_module_mask;
-#else
- return 0;
-#endif
-}
/*
* vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
* first block
* Returns the dma address of the first RxD block
*/
-u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
+static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
{
struct vxge_hw_mempool_dma *dma_object;
@@ -1252,7 +1411,7 @@ exit:
* This function creates Ring and initializes it.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_ring_attr *attr)
{
@@ -1363,7 +1522,7 @@ exit:
* __vxge_hw_ring_abort - Returns the RxD
* This function terminates the RxDs of ring
*/
-enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
void *rxdh;
struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
* __vxge_hw_ring_reset - Resets the ring
* This function resets the ring during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@ exit:
* __vxge_hw_ring_delete - Removes the ring
* This function freeup the memory pool and removes the ring
*/
-enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_ring *ring = vp->vpath->ringh;
@@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
* __vxge_hw_mempool_grow
* Will resize mempool up to %num_allocate value.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
u32 *num_allocated)
{
@@ -1527,7 +1686,7 @@ exit:
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
-struct vxge_hw_mempool*
+static struct vxge_hw_mempool*
__vxge_hw_mempool_create(
struct __vxge_hw_device *devh,
u32 memblock_size,
@@ -1644,7 +1803,7 @@ exit:
/*
* vxge_hw_mempool_destroy
*/
-void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
u32 i, j;
struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
* __vxge_hw_device_vpath_config_check - Check vpath configuration.
* Check the vpath configuration
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
{
enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
* _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
* Set the swapper bits appropriately for the lagacy section.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
{
u64 val64;
@@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
#ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
* __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(
struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2021,28 +2180,6 @@ __vxge_hw_kdfc_swapper_set(
}
/*
- * vxge_hw_mgmt_device_config - Retrieve device configuration.
- * Get device configuration. Permits to retrieve at run-time configuration
- * values that were used to initialize and configure the device.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_config *dev_config, int size)
-{
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
- return VXGE_HW_ERR_INVALID_DEVICE;
-
- if (size != sizeof(struct vxge_hw_device_config))
- return VXGE_HW_ERR_VERSION_CONFLICT;
-
- memcpy(dev_config, &hldev->config,
- sizeof(struct vxge_hw_device_config));
-
- return VXGE_HW_OK;
-}
-
-/*
* vxge_hw_mgmt_reg_read - Read Titan register.
*/
enum vxge_hw_status
@@ -2438,7 +2575,7 @@ exit:
* __vxge_hw_fifo_abort - Returns the TxD
* This function terminates the TxDs of fifo
*/
-enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
{
void *txdlh;
@@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
* __vxge_hw_fifo_reset - Resets the fifo
* This function resets the fifo during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
* in pci config space.
* Read from the vpath pci config space.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0, u32 offset, u32 *val)
{
@@ -2542,7 +2679,7 @@ exit:
* __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
* Returns the function number of the vpath.
*/
-u32
+static u32
__vxge_hw_vpath_func_id_get(u32 vp_id,
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
{
@@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
* __vxge_hw_vpath_card_info_get - Get the serial numbers,
* part number and product description.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
* __vxge_hw_vpath_fw_ver_get - Get the fw version
* Returns FW Version
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@ exit:
* __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
* Returns pci function mode
*/
-u64
+static u64
__vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@ exit:
* __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
* from MAC address table.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_addr_get(
u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
* This routine checks the vpath_rst_in_prog register to see if
* adapter completed the reset process for the vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
{
enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
* __vxge_hw_vpath_reset
* This routine resets the vpath on the device
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
* __vxge_hw_vpath_sw_reset
* This routine resets the vpath structures
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@ exit:
* This routine configures the prc registers of virtual path using the config
* passed
*/
-void
+static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the kdfc registers of virtual path using the
* config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3553,7 +3690,7 @@ exit:
* __vxge_hw_vpath_mac_configure
* This routine configures the mac of virtual path using the config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the tim registers of virtual path using the config
* passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine is the final phase of init which initializes the
* registers of the vpath using the configuration passed.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3966,7 +4103,7 @@ exit:
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
struct vxge_hw_vp_config *config)
{
@@ -4022,7 +4159,7 @@ exit:
* __vxge_hw_vp_terminate - Terminate Virtual Path structure
* This routine closes all channels it opened and freeup memory
*/
-void
+static void
__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
* Enable the DMA vpath statistics. The function is to be called to re-enable
* the adapter to update stats into the host memory
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@ exit:
* __vxge_hw_vpath_stats_access - Get the statistics from the given location
* and offset and perform an operation
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
u32 operation, u32 offset, u64 *stat)
{
@@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
/*
* __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@ exit:
/*
* __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
{
u64 *val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@ exit:
/*
* __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
*/
-enum vxge_hw_status __vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@ exit:
return status;
}
+
+static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
+ unsigned long size)
+{
+ gfp_t flags;
+ void *vaddr;
+
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
+
+ vaddr = kmalloc((size), flags);
+
+ vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+ struct pci_dev **p_dma_acch)
+{
+ unsigned long misaligned = *(unsigned long *)p_dma_acch;
+ u8 *tmp = (u8 *)vaddr;
+ tmp -= misaligned;
+ kfree((void *)tmp);
+}
+
/*
* __vxge_hw_blockpool_create - Create block pool
*/
@@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
* vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
* Adds a block to block pool
*/
-void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle)
{
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 1a94343023cb..5c00861b6c2c 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
char version[VXGE_HW_FW_STRLEN];
};
-u64
-__vxge_hw_vpath_pci_func_mode_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
/**
* struct vxge_hw_fifo_config - Configuration of fifo.
* @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
u8 hash_type_ipv6ex_en;
};
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
void vxge_hw_device_debug_set(
struct __vxge_hw_device *devh,
enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
/**
* vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
* @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
struct vxge_hw_fifo_attr fifo_attr;
};
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max);
-
-void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
-
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
- u32 size);
-
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool_entry *entry);
-
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
- void *memblock,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
- struct vxge_hw_device_config *dev_config, int size);
-
enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
-enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
struct vxge_hw_device_config *device_config);
@@ -1954,38 +1893,6 @@ out:
return vaddr;
}
-extern void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle);
-
-static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
- unsigned long size)
-{
- gfp_t flags;
- void *vaddr;
-
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
(*memblock_item_idx) * mempool->items_priv_size;
}
-enum vxge_hw_status
-__vxge_hw_mempool_grow(
- struct vxge_hw_mempool *mempool,
- u32 num_allocate,
- u32 *num_allocated);
-
-struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
- struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 private_size,
- u32 items_initial,
- u32 items_max,
- struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata);
-
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type, u32 length,
- u32 per_dtr_space, void *userdata);
-
-void
-__vxge_hw_channel_free(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_initialize(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_reset(
- struct __vxge_hw_channel *channel);
-
/*
* __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
* for the fifo.
@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
struct vxge_hw_vpath_attr *attr,
struct __vxge_hw_vpath_handle **vpath_handle);
-enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
-
enum vxge_hw_status vxge_hw_vpath_close(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 new_mtu);
-enum vxge_hw_status vxge_hw_vpath_stats_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_access(
- struct __vxge_hw_virtualpath *vpath,
- u32 operation,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
-
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_device_register_poll(
- void __iomem *reg,
- u64 mask, u32 max_millis);
#ifndef readq
static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
writel(val, addr);
}
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- status = __vxge_hw_device_register_poll(addr, mask, max_millis);
- return status;
-}
-
-struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0);
-
-enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_vpath_pci_read(
- struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0,
- u32 offset,
- u32 *val);
-
-enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
-
-u32
-__vxge_hw_vpath_func_id_get(
- u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
-
-enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
+
/**
* vxge_debug
* @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 05679e306fdd..b67746eef923 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_ethtool_stats = vxge_get_ethtool_stats,
};
-void initialize_ethtool_ops(struct net_device *ndev)
+void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index a69542ecb68d..813829f3d024 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state
* change.
*/
-void
+static void
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state
* change.
*/
-void
+static void
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
* If the interrupt is because of a received frame or if the receive ring
* contains fresh as yet un-processed frames, this function is called.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
*
* Enables the interrupts for the vpath
*/
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id = 0;
@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
*
* Disables the interrupts for the vpath
*/
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id;
@@ -1553,7 +1563,7 @@ out:
*
* driver may reset the chip on events of serr, eccerr, etc
*/
-int vxge_reset(struct vxgedev *vdev)
+static int vxge_reset(struct vxgedev *vdev)
{
return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
-int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
}
/* Add a mac address to DA table */
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
return status;
}
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
return FALSE;
}
/* delete a mac address from DA table */
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
}
/* Store all vlan ids from the list to the vid table */
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
}
/* Store all mac addresses from the list to the DA table */
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
}
/* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
}
/* close vpaths */
-void vxge_close_vpaths(struct vxgedev *vdev, int index)
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
{
struct vxge_vpath *vpath;
int i;
@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
}
/* open vpaths */
-int vxge_open_vpaths(struct vxgedev *vdev)
+static int vxge_open_vpaths(struct vxgedev *vdev)
{
struct vxge_hw_vpath_attr attr;
enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@ out0:
}
/* Loop throught the mac address list and delete all the entries */
-void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
{
struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
}
}
-int do_vxge_close(struct net_device *dev, int do_io)
+static int do_vxge_close(struct net_device *dev, int do_io)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
-int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev_out)
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+ struct vxge_config *config,
+ int high_dma, int no_of_vpath,
+ struct vxgedev **vdev_out)
{
struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- initialize_ethtool_ops(ndev);
+ vxge_initialize_ethtool_ops(ndev);
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@ _out0:
*
* This function will unregister and free network device
*/
-void
+static void
vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index d4be07eaacd7..de64536cb7d0 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -396,64 +396,7 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \
} while (0);
-int __devinit vxge_device_register(struct __vxge_hw_device *devh,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev);
-
-void vxge_device_unregister(struct __vxge_hw_device *devh);
-
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
-
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
-
-void vxge_callback_link_up(struct __vxge_hw_device *devh);
-
-void vxge_callback_link_down(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-
-int vxge_reset(struct vxgedev *vdev);
-
-enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata);
-
-enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skbs, int *more);
-
-int vxge_close(struct net_device *dev);
-
-int vxge_open(struct net_device *dev);
-
-void vxge_close_vpaths(struct vxgedev *vdev, int index);
-
-int vxge_open_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_add(struct vxge_vpath *vpath,
- struct macInfo *mac);
-
-void vxge_free_mac_add_list(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-
-int do_vxge_close(struct net_device *dev, int do_io);
-extern void initialize_ethtool_ops(struct net_device *ndev);
+extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index cedf08f99cb3..4bdb611a6842 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,6 +17,13 @@
#include "vxge-config.h"
#include "vxge-main.h"
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
+ u32 vp_id, enum vxge_hw_event type);
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms);
+
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
@@ -513,7 +520,7 @@ exit:
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -538,7 +545,7 @@ exit:
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -564,7 +571,7 @@ exit:
*
* Handle error.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
* it swaps the reserve and free arrays.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
@@ -692,7 +699,8 @@ _alloc_after_swap:
* Posts a dtr to work array.
*
*/
-void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
+ void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
@@ -1658,37 +1666,6 @@ exit:
}
/**
- * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
- * from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the next vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
- u64 data;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, vid, &data);
-
- *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
- return status;
-}
-
-/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
@@ -1898,9 +1875,9 @@ exit:
* Process vpath alarms.
*
*/
-enum vxge_hw_status __vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms)
{
u64 val64;
u64 alarm_status;
@@ -2265,36 +2242,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- if (hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clr_msix_one_shot_vec[msix_id%4]);
- } else {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clear_msix_mask_vect[msix_id%4]);
- }
-}
-
-/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
@@ -2316,22 +2263,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
- * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
- * @vp: Virtual Path handle.
- *
- * The function masks all msix interrupt for the given vpath
- *
- */
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
- &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-}
-
-/**
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
* @vp: Virtual Path handle.
*
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 6fa07d13798e..9890d4d596d0 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1749,14 +1749,6 @@ vxge_hw_mrpcim_stats_access(
u64 *stat);
enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats);
-
-enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
struct vxge_hw_xmac_stats *xmac_stats);
@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
#endif
};
-/* ========================= RING PRIVATE API ============================= */
-u64
-__vxge_hw_ring_first_block_address_get(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_ring_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_ring_abort(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_reset(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
/* ========================= FIFO PRIVATE API ============================= */
struct vxge_hw_fifo_attr;
-enum vxge_hw_status
-__vxge_hw_fifo_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_fifo_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_fifo_abort(
- struct __vxge_hw_fifo *fifoh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_reset(
- struct __vxge_hw_fifo *ringh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
u32 is_last);
};
-void
-__vxge_hw_mempool_destroy(
- struct vxge_hw_mempool *mempool);
-
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
@@ -2195,61 +2144,10 @@ __vxge_hw_vpath_rts_table_set(
u64 data2);
enum vxge_hw_status
-__vxge_hw_vpath_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
__vxge_hw_vpath_enable(
struct __vxge_hw_device *devh,
u32 vp_id);
-void
-__vxge_hw_vpath_prc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vp_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id,
- struct vxge_hw_vp_config *config);
-
-void
-__vxge_hw_vp_terminate(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms);
-
void vxge_hw_device_intr_enable(
struct __vxge_hw_device *devh);
@@ -2321,11 +2219,6 @@ vxge_hw_vpath_vid_get(
u64 *vid);
enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *vid);
-
-enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 vid);
@@ -2387,16 +2280,9 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
-
enum vxge_hw_status vxge_hw_vpath_intr_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
-
-void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
-
void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
-/* ========================== PRIVATE API ================================= */
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_error(
- struct __vxge_hw_device *hldev,
- u32 vp_id,
- enum vxge_hw_event type);
-
#endif
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index f1ae75d35d5d..8251946842e6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -3580,6 +3580,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
common->ah = sc->ah;
common->hw = hw;
common->cachelsz = csz << 2; /* convert to bytes */
+ spin_lock_init(&common->cc_lock);
/* Initialize device */
ret = ath5k_hw_attach(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index ec98ab50748a..a14a5e43cf56 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -34,6 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,6 +103,30 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -118,7 +146,7 @@ static const u32 ar9300Modes_fast_clock_2p2[][3] = {
{0x00008014, 0x044c044c, 0x08980898},
{0x0000801c, 0x148ec02b, 0x148ec057},
{0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x03721821, 0x03721821},
+ {0x00009e00, 0x0372131c, 0x0372131c},
{0x0000a230, 0x0000000b, 0x00000016},
{0x0000a254, 0x00000898, 0x00001130},
};
@@ -595,15 +623,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -624,16 +653,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
{0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -649,13 +678,13 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009814, 0x9280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
+ {0x00009834, 0x6400a290},
{0x00009838, 0x0108ecff},
{0x0000983c, 0x14750600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
+ {0x000098b0, 0x32840bbe},
{0x000098d0, 0x004b6a8e},
{0x000098d4, 0x00000820},
{0x000098dc, 0x00000000},
@@ -681,7 +710,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e3c, 0xcf946222},
{0x00009e40, 0x0d261820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
@@ -694,7 +722,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
{0x0000a228, 0x10002310},
- {0x0000a22c, 0x01036a1e},
+ {0x0000a22c, 0x01036a27},
{0x0000a23c, 0x00000000},
{0x0000a244, 0x0c000000},
{0x0000a2a0, 0x00000001},
@@ -702,10 +730,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a2c8, 0x00000000},
{0x0000a2cc, 0x18c43433},
{0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
{0x0000a2ec, 0x00000000},
{0x0000a2f0, 0x00000000},
{0x0000a2f4, 0x00000000},
@@ -753,33 +777,17 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
+ {0x0000a43c, 0x00100000},
{0x0000a440, 0x00000000},
{0x0000a444, 0x00000000},
{0x0000a448, 0x06000080},
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
- {0x0000a600, 0x00000000},
- {0x0000a604, 0x00000000},
- {0x0000a608, 0x00000000},
- {0x0000a60c, 0x00000000},
- {0x0000a610, 0x00000000},
- {0x0000a614, 0x00000000},
- {0x0000a618, 0x00000000},
- {0x0000a61c, 0x00000000},
- {0x0000a620, 0x00000000},
- {0x0000a624, 0x00000000},
- {0x0000a628, 0x00000000},
- {0x0000a62c, 0x00000000},
- {0x0000a630, 0x00000000},
- {0x0000a634, 0x00000000},
- {0x0000a638, 0x00000000},
- {0x0000a63c, 0x00000000},
{0x0000a640, 0x00000000},
{0x0000a644, 0x3fad9d74},
{0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00000637},
+ {0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
{0x0000a674, 0x09080504},
{0x0000a678, 0x0d0c0b0a},
@@ -802,10 +810,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a8f4, 0x00000000},
{0x0000b2d0, 0x00000080},
{0x0000b2d4, 0x00000000},
- {0x0000b2dc, 0x00000000},
- {0x0000b2e0, 0x00000000},
- {0x0000b2e4, 0x00000000},
- {0x0000b2e8, 0x00000000},
{0x0000b2ec, 0x00000000},
{0x0000b2f0, 0x00000000},
{0x0000b2f4, 0x00000000},
@@ -820,10 +824,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000b8f4, 0x00000000},
{0x0000c2d0, 0x00000080},
{0x0000c2d4, 0x00000000},
- {0x0000c2dc, 0x00000000},
- {0x0000c2e0, 0x00000000},
- {0x0000c2e4, 0x00000000},
- {0x0000c2e8, 0x00000000},
{0x0000c2ec, 0x00000000},
{0x0000c2f0, 0x00000000},
{0x0000c2f4, 0x00000000},
@@ -835,6 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -855,7 +859,7 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -900,6 +904,30 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,6 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -933,7 +965,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -978,6 +1010,30 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1151,14 +1207,14 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
+ {0x0000b080, 0x2a2d2f32},
+ {0x0000b084, 0x21232328},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
{0x0000b0a0, 0x00000000},
{0x0000b0a4, 0x00000000},
{0x0000b0a8, 0x00000000},
@@ -1251,6 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1316,6 +1376,30 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1414,15 +1498,10 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x00008144, 0xffffffff},
{0x00008168, 0x00000000},
{0x0000816c, 0x00000000},
- {0x00008170, 0x18486200},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
{0x000081c0, 0x00000000},
{0x000081c4, 0x33332210},
{0x000081c8, 0x00000000},
{0x000081cc, 0x00000000},
- {0x000081d4, 0x00000000},
{0x000081ec, 0x00000000},
{0x000081f0, 0x00000000},
{0x000081f4, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 7c38229ba670..716db414c258 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -347,6 +347,10 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
(((Y[6] - Y[3]) * 1 << scale_factor) +
(x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
+ /* prevent division by zero */
+ if (G_fxp == 0)
+ return false;
+
Y_intercept =
(G_fxp * (x_est[0] - x_est[3]) +
(1 << scale_factor)) / (1 << scale_factor) + Y[3];
@@ -356,14 +360,12 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
for (i = 0; i <= 3; i++) {
y_est[i] = i * 32;
-
- /* prevent division by zero */
- if (G_fxp == 0)
- return false;
-
x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
}
+ if (y_est[max_index] == 0)
+ return false;
+
x_est_fxp1_nonlin =
x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
G_fxp) / G_fxp;
@@ -457,6 +459,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
scale_B = scale_B / (1 << Q_scale_B);
+ if (scale_B == 0)
+ return false;
Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
beta_raw = beta_raw / (1 << Q_beta);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 4ed010d4ef96..19891e7d49ae 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -370,7 +370,7 @@ void ath_beacon_tasklet(unsigned long data)
ath_print(common, ATH_DBG_BSTUCK,
"beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
- ath_reset(sc, false);
+ ath_reset(sc, true);
}
return;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index bc6c4df9712c..95b41db0d86b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -577,6 +577,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
common->hw = sc->hw;
common->priv = sc;
common->debug_mask = ath9k_debug;
+ spin_lock_init(&common->cc_lock);
spin_lock_init(&sc->wiphy_lock);
spin_lock_init(&sc->sc_resetlock);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3ff0e476c2b3..c6ec800d7a6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -182,6 +182,9 @@ static void ath_update_survey_stats(struct ath_softc *sc)
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
+ if (!ah->curchan)
+ return;
+
if (ah->power_mode == ATH9K_PM_AWAKE)
ath_hw_cycle_counters_update(common);
@@ -577,7 +580,7 @@ void ath_hw_check(struct work_struct *work)
msleep(1);
}
- ath_reset(sc, false);
+ ath_reset(sc, true);
out:
ath9k_ps_restore(sc);
@@ -595,7 +598,7 @@ void ath9k_tasklet(unsigned long data)
ath9k_ps_wakeup(sc);
if (status & ATH9K_INT_FATAL) {
- ath_reset(sc, false);
+ ath_reset(sc, true);
ath9k_ps_restore(sc);
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d077186da870..30ef2dfc1ed2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -673,6 +673,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+ struct ieee80211_tx_info *tx_info;
bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
@@ -699,6 +700,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
+ break;
+
/* do not exceed subframe limit */
if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
status = ATH_AGGR_LIMITED;
@@ -2157,7 +2163,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
ath9k_ps_wakeup(sc);
- ath_reset(sc, false);
+ ath_reset(sc, true);
ath9k_ps_restore(sc);
}
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index f78728c38294..568174c71b94 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -116,8 +116,9 @@ __regwrite_out : \
} while (0);
-#define carl9170_async_get_buf() \
+#define carl9170_async_regwrite_get_buf() \
do { \
+ __nreg = 0; \
__cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \
CARL9170_MAX_CMD_PAYLOAD_LEN); \
if (__cmd == NULL) { \
@@ -128,38 +129,42 @@ do { \
#define carl9170_async_regwrite_begin(carl) \
do { \
- int __nreg = 0, __err = 0; \
struct ar9170 *__carl = carl; \
struct carl9170_cmd *__cmd; \
- carl9170_async_get_buf(); \
+ unsigned int __nreg; \
+ int __err = 0; \
+ carl9170_async_regwrite_get_buf(); \
+
+#define carl9170_async_regwrite_flush() \
+do { \
+ if (__cmd == NULL || __nreg == 0) \
+ break; \
+ \
+ if (IS_ACCEPTING_CMD(__carl) && __nreg) { \
+ __cmd->hdr.len = 8 * __nreg; \
+ __err = __carl9170_exec_cmd(__carl, __cmd, true); \
+ __cmd = NULL; \
+ break; \
+ } \
+ goto __async_regwrite_out; \
+} while (0)
#define carl9170_async_regwrite(r, v) do { \
+ if (__cmd == NULL) \
+ carl9170_async_regwrite_get_buf(); \
__cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \
__cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \
__nreg++; \
- if ((__nreg >= PAYLOAD_MAX/2)) { \
- if (IS_ACCEPTING_CMD(__carl)) { \
- __cmd->hdr.len = 8 * __nreg; \
- __err = __carl9170_exec_cmd(__carl, __cmd, true);\
- __cmd = NULL; \
- carl9170_async_get_buf(); \
- } else { \
- goto __async_regwrite_out; \
- } \
- __nreg = 0; \
- if (__err) \
- goto __async_regwrite_out; \
- } \
+ if ((__nreg >= PAYLOAD_MAX / 2)) \
+ carl9170_async_regwrite_flush(); \
} while (0)
-#define carl9170_async_regwrite_finish() \
+#define carl9170_async_regwrite_finish() do { \
__async_regwrite_out : \
- if (__err == 0 && __nreg) { \
- __cmd->hdr.len = 8 * __nreg; \
- if (IS_ACCEPTING_CMD(__carl)) \
- __err = __carl9170_exec_cmd(__carl, __cmd, true);\
- __nreg = 0; \
- }
+ if (__cmd != NULL && __err == 0) \
+ carl9170_async_regwrite_flush(); \
+ kfree(__cmd); \
+} while (0) \
#define carl9170_async_regwrite_result() \
__err; \
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 3cc99f3f7ab5..980ae70ea424 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -639,8 +639,8 @@ init:
if (err)
goto unlock;
} else {
- err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
rcu_read_unlock();
+ err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
if (err)
goto unlock;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c7f6193934ea..d8607f4c144d 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -591,16 +591,23 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
const bool free_buf)
{
struct urb *urb;
+ int err = 0;
- if (!IS_INITIALIZED(ar))
- return -EPERM;
+ if (!IS_INITIALIZED(ar)) {
+ err = -EPERM;
+ goto err_free;
+ }
- if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4))
- return -EINVAL;
+ if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
+ err = -EINVAL;
+ goto err_free;
+ }
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
+ if (!urb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
@@ -613,6 +620,12 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
usb_free_urb(urb);
return carl9170_usb_submit_cmd_urb(ar);
+
+err_free:
+ if (free_buf)
+ kfree(cmd);
+
+ return err;
}
int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index dfec5496055e..e0f2d122e124 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -2964,7 +2964,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
(2 - i));
}
- for (j = 0; i < 4; j++) {
+ for (j = 0; j < 4; j++) {
if (j < 3) {
cur_lna = lna[j];
cur_hpf1 = hpf1[j];
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index db57aea629d9..2b078a995729 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -1227,7 +1227,8 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
if (unlikely(!agg->wait_for_ba)) {
- IWL_ERR(priv, "Received BA when not expected\n");
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
return -EINVAL;
}
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/wl1251/Makefile
index 4fe246824db3..58b4f935a3f6 100644
--- a/drivers/net/wireless/wl1251/Makefile
+++ b/drivers/net/wireless/wl1251/Makefile
@@ -1,6 +1,8 @@
wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
acx.o boot.o init.o debugfs.o io.o
+wl1251_spi-objs += spi.o
+wl1251_sdio-objs += sdio.o
-obj-$(CONFIG_WL1251) += wl1251.o
-obj-$(CONFIG_WL1251_SPI) += spi.o
-obj-$(CONFIG_WL1251_SDIO) += sdio.o
+obj-$(CONFIG_WL1251) += wl1251.o
+obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
+obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 95f711b251ad..449de59bf35b 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -28,6 +28,7 @@ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
struct inode *inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index c542c7bb7454..d9f51485beee 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -296,10 +296,9 @@ static struct pci_port_ops dino_port_ops = {
.outl = dino_out32
};
-static void dino_disable_irq(unsigned int irq)
+static void dino_mask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct dino_device *dino_dev = desc->chip_data;
+ struct dino_device *dino_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
@@ -309,10 +308,9 @@ static void dino_disable_irq(unsigned int irq)
__raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
-static void dino_enable_irq(unsigned int irq)
+static void dino_unmask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct dino_device *dino_dev = desc->chip_data;
+ struct dino_device *dino_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
@@ -347,20 +345,11 @@ static void dino_enable_irq(unsigned int irq)
}
}
-static unsigned int dino_startup_irq(unsigned int irq)
-{
- dino_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip dino_interrupt_type = {
- .name = "GSC-PCI",
- .startup = dino_startup_irq,
- .shutdown = dino_disable_irq,
- .enable = dino_enable_irq,
- .disable = dino_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .name = "GSC-PCI",
+ .unmask = dino_unmask_irq,
+ .mask = dino_mask_irq,
+ .ack = no_ack_irq,
};
@@ -391,7 +380,7 @@ ilr_again:
int irq = dino_dev->global_irq[local_irq];
DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
__func__, irq, intr_dev, mask);
- __do_IRQ(irq);
+ generic_handle_irq(irq);
mask &= ~(1 << local_irq);
} while (mask);
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 46f503fb7fc5..1211974f55aa 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -144,7 +144,7 @@ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered *
/* called by free irq */
-static void eisa_disable_irq(unsigned int irq)
+static void eisa_mask_irq(unsigned int irq)
{
unsigned long flags;
@@ -164,7 +164,7 @@ static void eisa_disable_irq(unsigned int irq)
}
/* called by request irq */
-static void eisa_enable_irq(unsigned int irq)
+static void eisa_unmask_irq(unsigned int irq)
{
unsigned long flags;
EISA_DBG("enable irq %d\n", irq);
@@ -182,20 +182,11 @@ static void eisa_enable_irq(unsigned int irq)
EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
}
-static unsigned int eisa_startup_irq(unsigned int irq)
-{
- eisa_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip eisa_interrupt_type = {
- .name = "EISA",
- .startup = eisa_startup_irq,
- .shutdown = eisa_disable_irq,
- .enable = eisa_enable_irq,
- .disable = eisa_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .name = "EISA",
+ .unmask = eisa_unmask_irq,
+ .mask = eisa_mask_irq,
+ .ack = no_ack_irq,
};
static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
@@ -233,7 +224,7 @@ static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
}
spin_unlock_irqrestore(&eisa_irq_lock, flags);
- __do_IRQ(irq);
+ generic_handle_irq(irq);
spin_lock_irqsave(&eisa_irq_lock, flags);
/* unmask */
@@ -346,10 +337,10 @@ static int __init eisa_probe(struct parisc_device *dev)
}
/* Reserve IRQ2 */
- irq_to_desc(2)->action = &irq2_action;
-
+ setup_irq(2, &irq2_action);
for (i = 0; i < 16; i++) {
- irq_to_desc(i)->chip = &eisa_interrupt_type;
+ set_irq_chip_and_handler(i, &eisa_interrupt_type,
+ handle_level_irq);
}
EISA_bus = 1;
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 20a1bce1a031..e605298e3aee 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -86,7 +86,7 @@ irqreturn_t gsc_asic_intr(int gsc_asic_irq, void *dev)
do {
int local_irq = __ffs(irr);
unsigned int irq = gsc_asic->global_irq[local_irq];
- __do_IRQ(irq);
+ generic_handle_irq(irq);
irr &= ~(1 << local_irq);
} while (irr);
@@ -105,10 +105,9 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
return NO_IRQ;
}
-static void gsc_asic_disable_irq(unsigned int irq)
+static void gsc_asic_mask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct gsc_asic *irq_dev = desc->chip_data;
+ struct gsc_asic *irq_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -121,10 +120,9 @@ static void gsc_asic_disable_irq(unsigned int irq)
gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
}
-static void gsc_asic_enable_irq(unsigned int irq)
+static void gsc_asic_unmask_irq(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct gsc_asic *irq_dev = desc->chip_data;
+ struct gsc_asic *irq_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -141,33 +139,23 @@ static void gsc_asic_enable_irq(unsigned int irq)
*/
}
-static unsigned int gsc_asic_startup_irq(unsigned int irq)
-{
- gsc_asic_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip gsc_asic_interrupt_type = {
- .name = "GSC-ASIC",
- .startup = gsc_asic_startup_irq,
- .shutdown = gsc_asic_disable_irq,
- .enable = gsc_asic_enable_irq,
- .disable = gsc_asic_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .name = "GSC-ASIC",
+ .unmask = gsc_asic_unmask_irq,
+ .mask = gsc_asic_mask_irq,
+ .ack = no_ack_irq,
};
int gsc_assign_irq(struct irq_chip *type, void *data)
{
static int irq = GSC_IRQ_BASE;
- struct irq_desc *desc;
if (irq > GSC_IRQ_MAX)
return NO_IRQ;
- desc = irq_to_desc(irq);
- desc->chip = type;
- desc->chip_data = data;
+ set_irq_chip_and_handler(irq, type, handle_level_irq);
+ set_irq_chip_data(irq, data);
+
return irq++;
}
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index c76836727cae..a3120a09c43d 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -615,17 +615,10 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
}
-static struct vector_info *iosapic_get_vector(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- return desc->chip_data;
-}
-
-static void iosapic_disable_irq(unsigned int irq)
+static void iosapic_mask_irq(unsigned int irq)
{
unsigned long flags;
- struct vector_info *vi = iosapic_get_vector(irq);
+ struct vector_info *vi = get_irq_chip_data(irq);
u32 d0, d1;
spin_lock_irqsave(&iosapic_lock, flags);
@@ -635,9 +628,9 @@ static void iosapic_disable_irq(unsigned int irq)
spin_unlock_irqrestore(&iosapic_lock, flags);
}
-static void iosapic_enable_irq(unsigned int irq)
+static void iosapic_unmask_irq(unsigned int irq)
{
- struct vector_info *vi = iosapic_get_vector(irq);
+ struct vector_info *vi = get_irq_chip_data(irq);
u32 d0, d1;
/* data is initialized by fixup_irq */
@@ -676,36 +669,14 @@ printk("\n");
DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq,
vi->eoi_addr, vi->eoi_data);
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
-}
-
-/*
- * PARISC only supports PCI devices below I/O SAPIC.
- * PCI only supports level triggered in order to share IRQ lines.
- * ergo I/O SAPIC must always issue EOI on parisc.
- *
- * i386/ia64 support ISA devices and have to deal with
- * edge-triggered interrupts too.
- */
-static void iosapic_end_irq(unsigned int irq)
-{
- struct vector_info *vi = iosapic_get_vector(irq);
- DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq,
- vi->eoi_addr, vi->eoi_data);
- iosapic_eoi(vi->eoi_addr, vi->eoi_data);
- cpu_end_irq(irq);
-}
-
-static unsigned int iosapic_startup_irq(unsigned int irq)
-{
- iosapic_enable_irq(irq);
- return 0;
+ cpu_eoi_irq(irq);
}
#ifdef CONFIG_SMP
static int iosapic_set_affinity_irq(unsigned int irq,
const struct cpumask *dest)
{
- struct vector_info *vi = iosapic_get_vector(irq);
+ struct vector_info *vi = get_irq_chip_data(irq);
u32 d0, d1, dummy_d0;
unsigned long flags;
int dest_cpu;
@@ -730,13 +701,10 @@ static int iosapic_set_affinity_irq(unsigned int irq,
#endif
static struct irq_chip iosapic_interrupt_type = {
- .name = "IO-SAPIC-level",
- .startup = iosapic_startup_irq,
- .shutdown = iosapic_disable_irq,
- .enable = iosapic_enable_irq,
- .disable = iosapic_disable_irq,
- .ack = cpu_ack_irq,
- .end = iosapic_end_irq,
+ .name = "IO-SAPIC-level",
+ .unmask = iosapic_unmask_irq,
+ .mask = iosapic_mask_irq,
+ .ack = cpu_ack_irq,
#ifdef CONFIG_SMP
.set_affinity = iosapic_set_affinity_irq,
#endif
@@ -891,8 +859,8 @@ void *iosapic_register(unsigned long hpa)
isi->isi_version = iosapic_rd_version(isi);
isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
- vip = isi->isi_vector = (struct vector_info *)
- kzalloc(sizeof(struct vector_info) * isi->isi_num_vectors, GFP_KERNEL);
+ vip = isi->isi_vector = kcalloc(isi->isi_num_vectors,
+ sizeof(struct vector_info), GFP_KERNEL);
if (vip == NULL) {
kfree(isi);
return NULL;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index c5c14dd3734f..2350e8a86eef 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -346,8 +346,8 @@ static __inline__ int led_get_net_activity(void)
#ifndef CONFIG_NET
return 0;
#else
- static unsigned long rx_total_last, tx_total_last;
- unsigned long rx_total, tx_total;
+ static u64 rx_total_last, tx_total_last;
+ u64 rx_total, tx_total;
struct net_device *dev;
int retval;
@@ -356,7 +356,7 @@ static __inline__ int led_get_net_activity(void)
/* we are running as a workqueue task, so we can use an RCU lookup */
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
- const struct net_device_stats *stats;
+ const struct rtnl_link_stats64 *stats;
struct rtnl_link_stats64 temp;
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (!in_dev || !in_dev->ifa_list)
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index f7806d81f1e0..0846dafdfff1 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -139,7 +139,7 @@ superio_interrupt(int parent_irq, void *devp)
}
/* Call the appropriate device's interrupt */
- __do_IRQ(local_irq);
+ generic_handle_irq(local_irq);
/* set EOI - forces a new interrupt if a lower priority device
* still needs service.
@@ -286,7 +286,7 @@ superio_init(struct pci_dev *pcidev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
-static void superio_disable_irq(unsigned int irq)
+static void superio_mask_irq(unsigned int irq)
{
u8 r8;
@@ -303,7 +303,7 @@ static void superio_disable_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static void superio_enable_irq(unsigned int irq)
+static void superio_unmask_irq(unsigned int irq)
{
u8 r8;
@@ -319,20 +319,11 @@ static void superio_enable_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static unsigned int superio_startup_irq(unsigned int irq)
-{
- superio_enable_irq(irq);
- return 0;
-}
-
static struct irq_chip superio_interrupt_type = {
- .name = SUPERIO,
- .startup = superio_startup_irq,
- .shutdown = superio_disable_irq,
- .enable = superio_enable_irq,
- .disable = superio_disable_irq,
+ .name = SUPERIO,
+ .unmask = superio_unmask_irq,
+ .mask = superio_mask_irq,
.ack = no_ack_irq,
- .end = no_end_irq,
};
#ifdef DEBUG_SUPERIO_INIT
@@ -363,9 +354,7 @@ int superio_fixup_irq(struct pci_dev *pcidev)
#endif
for (i = 0; i < 16; i++) {
- struct irq_desc *desc = irq_to_desc(i);
-
- desc->chip = &superio_interrupt_type;
+ set_irq_chip_and_handler(i, &superio_interrupt_type, handle_level_irq);
}
/*
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index dc1aa0922868..dcd7ace9221e 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -65,6 +65,4 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
-ifeq ($(CONFIG_PCI_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 7f0af0e9b826..172bf26e0680 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -64,6 +64,49 @@ void pci_bus_remove_resources(struct pci_bus *bus)
}
}
+/*
+ * Find the highest-address bus resource below the cursor "res". If the
+ * cursor is NULL, return the highest resource.
+ */
+static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
+ unsigned int type,
+ struct resource *res)
+{
+ struct resource *r, *prev = NULL;
+ int i;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ if (!r)
+ continue;
+
+ if ((r->flags & IORESOURCE_TYPE_BITS) != type)
+ continue;
+
+ /* If this resource is at or past the cursor, skip it */
+ if (res) {
+ if (r == res)
+ continue;
+ if (r->end > res->end)
+ continue;
+ if (r->end == res->end && r->start > res->start)
+ continue;
+ }
+
+ if (!prev)
+ prev = r;
+
+ /*
+ * A small resource is higher than a large one that ends at
+ * the same address.
+ */
+ if (r->end > prev->end ||
+ (r->end == prev->end && r->start > prev->start))
+ prev = r;
+ }
+
+ return prev;
+}
+
/**
* pci_bus_alloc_resource - allocate a resource from a parent bus
* @bus: PCI bus
@@ -89,9 +132,10 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t),
void *alignf_data)
{
- int i, ret = -ENOMEM;
+ int ret = -ENOMEM;
struct resource *r;
resource_size_t max = -1;
+ unsigned int type = res->flags & IORESOURCE_TYPE_BITS;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
@@ -99,10 +143,9 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
if (!(res->flags & IORESOURCE_MEM_64))
max = PCIBIOS_MAX_MEM_32;
- pci_bus_for_each_resource(bus, r, i) {
- if (!r)
- continue;
-
+ /* Look for space at highest addresses first */
+ r = pci_bus_find_resource_prev(bus, type, NULL);
+ for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) {
/* type_mask must match */
if ((res->flags ^ r->flags) & type_mask)
continue;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 1aaf3f32d3cd..f59ed30512b5 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -133,8 +133,8 @@ void __init ibmphp_hpc_initvars (void)
debug ("%s - Entry\n", __func__);
mutex_init(&sem_hpcaccess);
- init_MUTEX (&semOperations);
- init_MUTEX_LOCKED (&sem_exit);
+ sema_init(&semOperations, 1);
+ sema_init(&sem_exit, 0);
to_debug = 0;
debug ("%s - Exit\n", __func__);
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index de27c1cb5a2b..feff3bee6fe5 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -22,8 +22,8 @@
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
-#define msix_table_offset_reg(base) (base + 0x04)
-#define msix_pba_offset_reg(base) (base + 0x08)
+#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE)
+#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA)
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
#define multi_msix_capable(control) msix_table_size((control))
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7fa3cbd742c5..e98c8104297b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -38,6 +38,19 @@ EXPORT_SYMBOL(pci_pci_problems);
unsigned int pci_pm_d3_delay;
+static void pci_pme_list_scan(struct work_struct *work);
+
+static LIST_HEAD(pci_pme_list);
+static DEFINE_MUTEX(pci_pme_list_mutex);
+static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
+
+struct pci_pme_device {
+ struct list_head list;
+ struct pci_dev *dev;
+};
+
+#define PME_TIMEOUT 1000 /* How long between PME checks */
+
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
unsigned int delay = dev->d3_delay;
@@ -1331,6 +1344,32 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
return !!(dev->pme_support & (1 << state));
}
+static void pci_pme_list_scan(struct work_struct *work)
+{
+ struct pci_pme_device *pme_dev;
+
+ mutex_lock(&pci_pme_list_mutex);
+ if (!list_empty(&pci_pme_list)) {
+ list_for_each_entry(pme_dev, &pci_pme_list, list)
+ pci_pme_wakeup(pme_dev->dev, NULL);
+ schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
+ }
+ mutex_unlock(&pci_pme_list_mutex);
+}
+
+/**
+ * pci_external_pme - is a device an external PCI PME source?
+ * @dev: PCI device to check
+ *
+ */
+
+static bool pci_external_pme(struct pci_dev *dev)
+{
+ if (pci_is_pcie(dev) || dev->bus->number == 0)
+ return false;
+ return true;
+}
+
/**
* pci_pme_active - enable or disable PCI device's PME# function
* @dev: PCI device to handle.
@@ -1354,6 +1393,44 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+ /* PCI (as opposed to PCIe) PME requires that the device have
+ its PME# line hooked up correctly. Not all hardware vendors
+ do this, so the PME never gets delivered and the device
+ remains asleep. The easiest way around this is to
+ periodically walk the list of suspended devices and check
+ whether any have their PME flag set. The assumption is that
+ we'll wake up often enough anyway that this won't be a huge
+ hit, and the power savings from the devices will still be a
+ win. */
+
+ if (pci_external_pme(dev)) {
+ struct pci_pme_device *pme_dev;
+ if (enable) {
+ pme_dev = kmalloc(sizeof(struct pci_pme_device),
+ GFP_KERNEL);
+ if (!pme_dev)
+ goto out;
+ pme_dev->dev = dev;
+ mutex_lock(&pci_pme_list_mutex);
+ list_add(&pme_dev->list, &pci_pme_list);
+ if (list_is_singular(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
+ mutex_unlock(&pci_pme_list_mutex);
+ } else {
+ mutex_lock(&pci_pme_list_mutex);
+ list_for_each_entry(pme_dev, &pci_pme_list, list) {
+ if (pme_dev->dev == dev) {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
+ break;
+ }
+ }
+ mutex_unlock(&pci_pme_list_mutex);
+ }
+ }
+
+out:
dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
@@ -2689,7 +2766,7 @@ int pcie_get_readrq(struct pci_dev *dev)
ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
if (!ret)
- ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
return ret;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6beb11b617a9..f5c7c382765f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -63,11 +63,8 @@ struct pci_platform_pm_ops {
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
extern void pci_disable_enabled_device(struct pci_dev *dev);
-extern bool pci_check_pme_status(struct pci_dev *dev);
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
-extern void pci_wakeup_event(struct pci_dev *dev);
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
-extern void pci_pme_wakeup_bus(struct pci_bus *bus);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index f409948e1a9b..2b2b6508efde 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -416,7 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
*/
static int __init aer_service_init(void)
{
- if (!pci_aer_available())
+ if (!pci_aer_available() || aer_acpi_firmware_first())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 80c11d131499..9656e3060412 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -132,6 +132,7 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
#ifdef CONFIG_ACPI_APEI
extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
+extern bool aer_acpi_firmware_first(void);
#else
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
{
@@ -139,6 +140,8 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
return pci_dev->__aer_firmware_first;
return 0;
}
+
+static inline bool aer_acpi_firmware_first(void) { return false; }
#endif
static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 2bb9b8972211..275bf158ffa7 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -93,4 +93,38 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
+
+static bool aer_firmware_first;
+
+static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_aer_common *p;
+
+ if (aer_firmware_first)
+ return 0;
+
+ switch (hest_hdr->type) {
+ case ACPI_HEST_TYPE_AER_ROOT_PORT:
+ case ACPI_HEST_TYPE_AER_ENDPOINT:
+ case ACPI_HEST_TYPE_AER_BRIDGE:
+ p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
+ aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * aer_acpi_firmware_first - Check if APEI should control AER.
+ */
+bool aer_acpi_firmware_first(void)
+{
+ static bool parsed = false;
+
+ if (!parsed) {
+ apei_hest_parse(aer_hest_parse_aff, NULL);
+ parsed = true;
+ }
+ return aer_firmware_first;
+}
#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 29e268fadf14..43421fbe080a 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -754,7 +754,7 @@ void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
struct pcie_device *p_device = rpc->rpd;
- struct aer_err_source e_src;
+ struct aer_err_source uninitialized_var(e_src);
mutex_lock(&rpc->rpc_mutex);
while (get_e_source(rpc, &e_src))
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index b7c4cb1ccb23..5982b6a63b89 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -49,7 +49,7 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
| OSC_PCI_EXPRESS_PME_CONTROL;
if (pci_aer_available()) {
- if (pcie_aer_get_firmware_first(port))
+ if (aer_acpi_firmware_first())
dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
else
flags |= OSC_PCI_EXPRESS_AER_CONTROL;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 12625d90f8b5..c84900da3c59 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -961,8 +961,8 @@ int pci_setup_device(struct pci_dev *dev)
dev->class = class;
class >>= 8;
- dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
- dev->vendor, dev->device, class, dev->hdr_type);
+ dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n",
+ dev->vendor, dev->device, dev->hdr_type, class);
/* need to have dev->class ready */
dev->cfg_size = pci_cfg_space_size(dev);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 01f0306525a5..297b72c880a1 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -212,8 +212,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#endif /* HAVE_PCI_MMAP */
int ret = 0;
- lock_kernel();
-
switch (cmd) {
case PCIIOC_CONTROLLER:
ret = pci_domain_nr(dev->bus);
@@ -242,7 +240,6 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break;
};
- unlock_kernel();
return ret;
}
@@ -306,6 +303,7 @@ static const struct file_operations proc_bus_pci_operations = {
.read = proc_bus_pci_read,
.write = proc_bus_pci_write,
.unlocked_ioctl = proc_bus_pci_ioctl,
+ .compat_ioctl = proc_bus_pci_ioctl,
#ifdef HAVE_PCI_MMAP
.open = proc_bus_pci_open,
.release = proc_bus_pci_release,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index cc96c7142dac..f5c63fe9db5c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2297,6 +2297,37 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_15,
nvenet_msi_disable);
+/*
+ * Some versions of the MCP55 bridge from nvidia have a legacy irq routing
+ * config register. This register controls the routing of legacy interrupts
+ * from devices that route through the MCP55. If this register is misprogramed
+ * interrupts are only sent to the bsp, unlike conventional systems where the
+ * irq is broadxast to all online cpus. Not having this register set
+ * properly prevents kdump from booting up properly, so lets make sure that
+ * we have it set correctly.
+ * Note this is an undocumented register.
+ */
+static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
+{
+ u32 cfg;
+
+ pci_read_config_dword(dev, 0x74, &cfg);
+
+ if (cfg & ((1 << 2) | (1 << 15))) {
+ printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
+ cfg &= ~((1 << 2) | (1 << 15));
+ pci_write_config_dword(dev, 0x74, cfg);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
+ nvbridge_check_legacy_irq_routing);
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
+ nvbridge_check_legacy_irq_routing);
+
static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 2aaa13150de3..bc0e6eea0fff 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -85,7 +85,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
}
}
res->flags &= ~IORESOURCE_UNSET;
- dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n",
+ dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
resno, res, (unsigned long long)region.start,
(unsigned long long)region.end);
}
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index f540ff96c53f..e61db9dfebef 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -29,7 +29,6 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <asm/intel_scu_ipc.h>
#include <linux/device.h>
#include <linux/intel_pmic_gpio.h>
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 0bab84ebb15d..19bc73695475 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -12,11 +12,12 @@ void pnp_unregister_protocol(struct pnp_protocol *protocol);
#define PNP_EISA_ID_MASK 0x7fffffff
void pnp_eisa_id_to_string(u32 id, char *str);
-struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id, char *pnpid);
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id,
+ const char *pnpid);
struct pnp_card *pnp_alloc_card(struct pnp_protocol *, int id, char *pnpid);
int pnp_add_device(struct pnp_dev *dev);
-struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id);
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id);
int pnp_add_card(struct pnp_card *card);
void pnp_remove_card(struct pnp_card *card);
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 88b3cde52596..0f34d962fd3c 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -124,7 +124,8 @@ static void pnp_release_device(struct device *dmdev)
kfree(dev);
}
-struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid)
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
+ const char *pnpid)
{
struct pnp_dev *dev;
struct pnp_id *dev_id;
@@ -194,8 +195,9 @@ int pnp_add_device(struct pnp_dev *dev)
for (id = dev->id; id; id = id->next)
len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
- pnp_dbg(&dev->dev, "%s device, IDs%s (%s)\n",
- dev->protocol->name, buf, dev->active ? "active" : "disabled");
+ dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs%s (%s)\n",
+ dev->protocol->name, buf,
+ dev->active ? "active" : "disabled");
return 0;
}
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index cd11b113494f..d1dbb9df53fa 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -236,7 +236,7 @@ void pnp_unregister_driver(struct pnp_driver *drv)
* @dev: pointer to the desired device
* @id: pointer to an EISA id string
*/
-struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id)
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id)
{
struct pnp_id *dev_id, *ptr;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index dc4e32e031e9..2d73dfcecdbb 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -28,7 +28,7 @@
#include "../base.h"
#include "pnpacpi.h"
-static int num = 0;
+static int num;
/* We need only to blacklist devices that have already an acpi driver that
* can't use pnp layer. We don't need to blacklist device that are directly
@@ -59,7 +59,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev)
#define TEST_ALPHA(c) \
if (!('@' <= (c) || (c) <= 'Z')) \
return 0
-static int __init ispnpidacpi(char *id)
+static int __init ispnpidacpi(const char *id)
{
TEST_ALPHA(id[0]);
TEST_ALPHA(id[1]);
@@ -180,11 +180,24 @@ struct pnp_protocol pnpacpi_protocol = {
};
EXPORT_SYMBOL(pnpacpi_protocol);
+static char *pnpacpi_get_id(struct acpi_device *device)
+{
+ struct acpi_hardware_id *id;
+
+ list_for_each_entry(id, &device->pnp.ids, list) {
+ if (ispnpidacpi(id->id))
+ return id->id;
+ }
+
+ return NULL;
+}
+
static int __init pnpacpi_add_device(struct acpi_device *device)
{
acpi_handle temp = NULL;
acpi_status status;
struct pnp_dev *dev;
+ char *pnpid;
struct acpi_hardware_id *id;
/*
@@ -192,11 +205,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
* driver should not be loaded.
*/
status = acpi_get_handle(device->handle, "_CRS", &temp);
- if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
- is_exclusive_device(device) || (!device->status.present))
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ pnpid = pnpacpi_get_id(device);
+ if (!pnpid)
+ return 0;
+
+ if (is_exclusive_device(device) || !device->status.present)
return 0;
- dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
+ dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid);
if (!dev)
return -ENOMEM;
@@ -227,7 +246,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
pnpacpi_parse_resource_option_data(dev);
list_for_each_entry(id, &device->pnp.ids, list) {
- if (!strcmp(id->id, acpi_device_hid(device)))
+ if (!strcmp(id->id, pnpid))
continue;
if (!ispnpidacpi(id->id))
continue;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index e3446ab8b563..a925e6b63d72 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -523,7 +523,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
res->start = irq;
res->end = irq;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -544,7 +544,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
res->start = dma;
res->end = dma;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -568,7 +568,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -592,7 +592,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
@@ -616,7 +616,7 @@ struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add %pr\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
return pnp_res;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 07343568a12e..ec444774b9ee 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -64,8 +64,7 @@ config TEST_POWER
config BATTERY_DS2760
tristate "DS2760 battery driver (HP iPAQ & others)"
- select W1
- select W1_SLAVE_DS2760
+ depends on W1 && W1_SLAVE_DS2760
help
Say Y here to enable support for batteries with ds2760 chip.
@@ -109,6 +108,13 @@ config BATTERY_WM97XX
help
Say Y to enable support for battery measured by WM97xx aux port.
+config BATTERY_BQ20Z75
+ tristate "TI BQ20z75 gas gauge"
+ depends on I2C
+ help
+ Say Y to include support for TI BQ20z75 SBS-compliant
+ gas gauge and protection IC.
+
config BATTERY_BQ27x00
tristate "BQ27x00 battery driver"
depends on I2C
@@ -166,4 +172,18 @@ config BATTERY_INTEL_MID
Say Y here to enable the battery driver on Intel MID
platforms.
+config CHARGER_ISP1704
+ tristate "ISP1704 USB Charger Detection"
+ depends on USB_OTG_UTILS
+ help
+ Say Y to enable support for USB Charger Detection with
+ ISP1707/ISP1704 USB transceivers.
+
+config CHARGER_TWL4030
+ tristate "OMAP TWL4030 BCI charger driver"
+ depends on TWL4030_CORE
+ depends on BROKEN
+ help
+ Say Y here to enable support for TWL4030 Battery Charge Interface.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 10143aaf4ee3..c75772eb157c 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -1,16 +1,8 @@
-power_supply-objs := power_supply_core.o
+ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
-ifeq ($(CONFIG_SYSFS),y)
-power_supply-objs += power_supply_sysfs.o
-endif
-
-ifeq ($(CONFIG_LEDS_TRIGGERS),y)
-power_supply-objs += power_supply_leds.o
-endif
-
-ifeq ($(CONFIG_POWER_SUPPLY_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+power_supply-y := power_supply_core.o
+power_supply-$(CONFIG_SYSFS) += power_supply_sysfs.o
+power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
@@ -29,6 +21,7 @@ obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
+obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
@@ -37,3 +30,5 @@ obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
+obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
+obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
new file mode 100644
index 000000000000..492da27e1a47
--- /dev/null
+++ b/drivers/power/bq20z75.c
@@ -0,0 +1,493 @@
+/*
+ * Gas Gauge driver for TI's BQ20Z75
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+enum {
+ REG_MANUFACTURER_DATA,
+ REG_TEMPERATURE,
+ REG_VOLTAGE,
+ REG_CURRENT,
+ REG_CAPACITY,
+ REG_TIME_TO_EMPTY,
+ REG_TIME_TO_FULL,
+ REG_STATUS,
+ REG_CYCLE_COUNT,
+ REG_SERIAL_NUMBER,
+ REG_REMAINING_CAPACITY,
+ REG_FULL_CHARGE_CAPACITY,
+ REG_DESIGN_CAPACITY,
+ REG_DESIGN_VOLTAGE,
+};
+
+/* manufacturer access defines */
+#define MANUFACTURER_ACCESS_STATUS 0x0006
+#define MANUFACTURER_ACCESS_SLEEP 0x0011
+
+/* battery status value bits */
+#define BATTERY_DISCHARGING 0x40
+#define BATTERY_FULL_CHARGED 0x20
+#define BATTERY_FULL_DISCHARGED 0x10
+
+#define BQ20Z75_DATA(_psp, _addr, _min_value, _max_value) { \
+ .psp = _psp, \
+ .addr = _addr, \
+ .min_value = _min_value, \
+ .max_value = _max_value, \
+}
+
+static const struct bq20z75_device_data {
+ enum power_supply_property psp;
+ u8 addr;
+ int min_value;
+ int max_value;
+} bq20z75_data[] = {
+ [REG_MANUFACTURER_DATA] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535),
+ [REG_TEMPERATURE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
+ [REG_VOLTAGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
+ [REG_CURRENT] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768,
+ 32767),
+ [REG_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+ [REG_REMAINING_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
+ [REG_FULL_CHARGE_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
+ [REG_TIME_TO_EMPTY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0,
+ 65535),
+ [REG_TIME_TO_FULL] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0,
+ 65535),
+ [REG_STATUS] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
+ [REG_CYCLE_COUNT] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535),
+ [REG_DESIGN_CAPACITY] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0,
+ 65535),
+ [REG_DESIGN_VOLTAGE] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0,
+ 65535),
+ [REG_SERIAL_NUMBER] =
+ BQ20Z75_DATA(POWER_SUPPLY_PROP_SERIAL_NUMBER, 0x1C, 0, 65535),
+};
+
+static enum power_supply_property bq20z75_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+};
+
+struct bq20z75_info {
+ struct i2c_client *client;
+ struct power_supply power_supply;
+};
+
+static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_word_data(client, address);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: i2c read at address 0x%x failed\n",
+ __func__, address);
+ return ret;
+ }
+ return le16_to_cpu(ret);
+}
+
+static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
+ u16 value)
+{
+ s32 ret;
+
+ ret = i2c_smbus_write_word_data(client, address, le16_to_cpu(value));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: i2c write to address 0x%x failed\n",
+ __func__, address);
+ return ret;
+ }
+ return 0;
+}
+
+static int bq20z75_get_battery_presence_and_health(
+ struct i2c_client *client, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ s32 ret;
+
+ /* Write to ManufacturerAccess with
+ * ManufacturerAccess command and then
+ * read the status */
+ ret = bq20z75_write_word_data(client,
+ bq20z75_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_STATUS);
+ if (ret < 0)
+ return ret;
+
+
+ ret = bq20z75_read_word_data(client,
+ bq20z75_data[REG_MANUFACTURER_DATA].addr);
+ if (ret < 0)
+ return ret;
+
+ if (ret < bq20z75_data[REG_MANUFACTURER_DATA].min_value ||
+ ret > bq20z75_data[REG_MANUFACTURER_DATA].max_value) {
+ val->intval = 0;
+ return 0;
+ }
+
+ /* Mask the upper nibble of 2nd byte and
+ * lower byte of response then
+ * shift the result by 8 to get status*/
+ ret &= 0x0F00;
+ ret >>= 8;
+ if (psp == POWER_SUPPLY_PROP_PRESENT) {
+ if (ret == 0x0F)
+ /* battery removed */
+ val->intval = 0;
+ else
+ val->intval = 1;
+ } else if (psp == POWER_SUPPLY_PROP_HEALTH) {
+ if (ret == 0x09)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (ret == 0x0B)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (ret == 0x0C)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ }
+
+ return 0;
+}
+
+static int bq20z75_get_battery_property(struct i2c_client *client,
+ int reg_offset, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ s32 ret;
+
+ ret = bq20z75_read_word_data(client,
+ bq20z75_data[reg_offset].addr);
+ if (ret < 0)
+ return ret;
+
+ /* returned values are 16 bit */
+ if (bq20z75_data[reg_offset].min_value < 0)
+ ret = (s16)ret;
+
+ if (ret >= bq20z75_data[reg_offset].min_value &&
+ ret <= bq20z75_data[reg_offset].max_value) {
+ val->intval = ret;
+ if (psp == POWER_SUPPLY_PROP_STATUS) {
+ if (ret & BATTERY_FULL_CHARGED)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ }
+ } else {
+ if (psp == POWER_SUPPLY_PROP_STATUS)
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ else
+ val->intval = 0;
+ }
+
+ return 0;
+}
+
+static void bq20z75_unit_adjustment(struct i2c_client *client,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+#define BASE_UNIT_CONVERSION 1000
+#define BATTERY_MODE_CAP_MULT_WATT (10 * BASE_UNIT_CONVERSION)
+#define TIME_UNIT_CONVERSION 600
+#define TEMP_KELVIN_TO_CELCIUS 2731
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval *= BATTERY_MODE_CAP_MULT_WATT;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval *= BASE_UNIT_CONVERSION;
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ /* bq20z75 provides battery tempreture in 0.1°K
+ * so convert it to 0.1°C */
+ val->intval -= TEMP_KELVIN_TO_CELCIUS;
+ val->intval *= 10;
+ break;
+
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ val->intval *= TIME_UNIT_CONVERSION;
+ break;
+
+ default:
+ dev_dbg(&client->dev,
+ "%s: no need for unit conversion %d\n", __func__, psp);
+ }
+}
+
+static int bq20z75_get_battery_capacity(struct i2c_client *client,
+ int reg_offset, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ s32 ret;
+
+ ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr);
+ if (ret < 0)
+ return ret;
+
+ if (psp == POWER_SUPPLY_PROP_CAPACITY) {
+ /* bq20z75 spec says that this can be >100 %
+ * even if max value is 100 % */
+ val->intval = min(ret, 100);
+ } else
+ val->intval = ret;
+
+ return 0;
+}
+
+static char bq20z75_serial[5];
+static int bq20z75_get_battery_serial_number(struct i2c_client *client,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = bq20z75_read_word_data(client,
+ bq20z75_data[REG_SERIAL_NUMBER].addr);
+ if (ret < 0)
+ return ret;
+
+ ret = sprintf(bq20z75_serial, "%04x", ret);
+ val->strval = bq20z75_serial;
+
+ return 0;
+}
+
+static int bq20z75_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int count;
+ int ret;
+ struct bq20z75_info *bq20z75_device = container_of(psy,
+ struct bq20z75_info, power_supply);
+ struct i2c_client *client = bq20z75_device->client;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq20z75_get_battery_presence_and_health(client, psp, val);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
+ if (psp == bq20z75_data[count].psp)
+ break;
+ }
+
+ ret = bq20z75_get_battery_capacity(client, count, psp, val);
+ if (ret)
+ return ret;
+
+ break;
+
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ ret = bq20z75_get_battery_serial_number(client, val);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_TEMP:
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++) {
+ if (psp == bq20z75_data[count].psp)
+ break;
+ }
+
+ ret = bq20z75_get_battery_property(client, count, psp, val);
+ if (ret)
+ return ret;
+
+ break;
+
+ default:
+ dev_err(&client->dev,
+ "%s: INVALID property\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Convert units to match requirements for power supply class */
+ bq20z75_unit_adjustment(client, psp, val);
+
+ dev_dbg(&client->dev,
+ "%s: property = %d, value = %d\n", __func__, psp, val->intval);
+
+ return 0;
+}
+
+static int bq20z75_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bq20z75_info *bq20z75_device;
+ int rc;
+
+ bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL);
+ if (!bq20z75_device)
+ return -ENOMEM;
+
+ bq20z75_device->client = client;
+ bq20z75_device->power_supply.name = "battery";
+ bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
+ bq20z75_device->power_supply.properties = bq20z75_properties;
+ bq20z75_device->power_supply.num_properties =
+ ARRAY_SIZE(bq20z75_properties);
+ bq20z75_device->power_supply.get_property = bq20z75_get_property;
+
+ i2c_set_clientdata(client, bq20z75_device);
+
+ rc = power_supply_register(&client->dev, &bq20z75_device->power_supply);
+ if (rc) {
+ dev_err(&client->dev,
+ "%s: Failed to register power supply\n", __func__);
+ kfree(bq20z75_device);
+ return rc;
+ }
+
+ dev_info(&client->dev,
+ "%s: battery gas gauge device registered\n", client->name);
+
+ return 0;
+}
+
+static int bq20z75_remove(struct i2c_client *client)
+{
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+
+ power_supply_unregister(&bq20z75_device->power_supply);
+ kfree(bq20z75_device);
+ bq20z75_device = NULL;
+
+ return 0;
+}
+
+#if defined CONFIG_PM
+static int bq20z75_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ s32 ret;
+
+ /* write to manufacturer access with sleep command */
+ ret = bq20z75_write_word_data(client,
+ bq20z75_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_SLEEP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#else
+#define bq20z75_suspend NULL
+#endif
+/* any smbus transaction will wake up bq20z75 */
+#define bq20z75_resume NULL
+
+static const struct i2c_device_id bq20z75_id[] = {
+ { "bq20z75", 0 },
+ {}
+};
+
+static struct i2c_driver bq20z75_battery_driver = {
+ .probe = bq20z75_probe,
+ .remove = bq20z75_remove,
+ .suspend = bq20z75_suspend,
+ .resume = bq20z75_resume,
+ .id_table = bq20z75_id,
+ .driver = {
+ .name = "bq20z75-battery",
+ },
+};
+
+static int __init bq20z75_battery_init(void)
+{
+ return i2c_add_driver(&bq20z75_battery_driver);
+}
+module_init(bq20z75_battery_init);
+
+static void __exit bq20z75_battery_exit(void)
+{
+ i2c_del_driver(&bq20z75_battery_driver);
+}
+module_exit(bq20z75_battery_exit);
+
+MODULE_DESCRIPTION("BQ20z75 battery monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 3ec9c6a8896b..eff0273d4030 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -418,6 +418,7 @@ static int bq27x00_battery_remove(struct i2c_client *client)
power_supply_unregister(&di->bat);
+ kfree(di->bus);
kfree(di->bat.name);
mutex_lock(&battery_mutex);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 4d3b27228a2e..b3c01c16a164 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -586,6 +586,7 @@ static int ds2760_battery_remove(struct platform_device *pdev)
&di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
power_supply_unregister(&di->bat);
+ kfree(di);
return 0;
}
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 84d3c43cf2bc..6957e8af6449 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -44,8 +44,8 @@ struct ds278x_info;
struct ds278x_battery_ops {
int (*get_battery_current)(struct ds278x_info *info, int *current_uA);
- int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uA);
- int (*get_battery_capacity)(struct ds278x_info *info, int *capacity_uA);
+ int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uV);
+ int (*get_battery_capacity)(struct ds278x_info *info, int *capacity);
};
#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery)
@@ -137,7 +137,7 @@ static int ds2782_get_current(struct ds278x_info *info, int *current_uA)
return 0;
}
-static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA)
+static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uV)
{
s16 raw;
int err;
@@ -149,7 +149,7 @@ static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA)
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
return err;
- *voltage_uA = (raw / 32) * 4800;
+ *voltage_uV = (raw / 32) * 4800;
return 0;
}
@@ -177,7 +177,7 @@ static int ds2786_get_current(struct ds278x_info *info, int *current_uA)
return 0;
}
-static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA)
+static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
{
s16 raw;
int err;
@@ -189,7 +189,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA)
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
return err;
- *voltage_uA = (raw / 8) * 1220;
+ *voltage_uV = (raw / 8) * 1220;
return 0;
}
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
new file mode 100644
index 000000000000..72512185f3e2
--- /dev/null
+++ b/drivers/power/isp1704_charger.c
@@ -0,0 +1,369 @@
+/*
+ * ISP1704 USB Charger Detection driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/delay.h>
+
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* Vendor specific Power Control register */
+#define ISP1704_PWR_CTRL 0x3d
+#define ISP1704_PWR_CTRL_SWCTRL (1 << 0)
+#define ISP1704_PWR_CTRL_DET_COMP (1 << 1)
+#define ISP1704_PWR_CTRL_BVALID_RISE (1 << 2)
+#define ISP1704_PWR_CTRL_BVALID_FALL (1 << 3)
+#define ISP1704_PWR_CTRL_DP_WKPU_EN (1 << 4)
+#define ISP1704_PWR_CTRL_VDAT_DET (1 << 5)
+#define ISP1704_PWR_CTRL_DPVSRC_EN (1 << 6)
+#define ISP1704_PWR_CTRL_HWDETECT (1 << 7)
+
+#define NXP_VENDOR_ID 0x04cc
+
+static u16 isp170x_id[] = {
+ 0x1704,
+ 0x1707,
+};
+
+struct isp1704_charger {
+ struct device *dev;
+ struct power_supply psy;
+ struct otg_transceiver *otg;
+ struct notifier_block nb;
+ struct work_struct work;
+
+ char model[7];
+ unsigned present:1;
+};
+
+/*
+ * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger
+ * is actually a dedicated charger, the following steps need to be taken.
+ */
+static inline int isp1704_charger_verify(struct isp1704_charger *isp)
+{
+ int ret = 0;
+ u8 r;
+
+ /* Reset the transceiver */
+ r = otg_io_read(isp->otg, ULPI_FUNC_CTRL);
+ r |= ULPI_FUNC_CTRL_RESET;
+ otg_io_write(isp->otg, ULPI_FUNC_CTRL, r);
+ usleep_range(1000, 2000);
+
+ /* Set normal mode */
+ r &= ~(ULPI_FUNC_CTRL_RESET | ULPI_FUNC_CTRL_OPMODE_MASK);
+ otg_io_write(isp->otg, ULPI_FUNC_CTRL, r);
+
+ /* Clear the DP and DM pull-down bits */
+ r = ULPI_OTG_CTRL_DP_PULLDOWN | ULPI_OTG_CTRL_DM_PULLDOWN;
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), r);
+
+ /* Enable strong pull-up on DP (1.5K) and reset */
+ r = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET;
+ otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), r);
+ usleep_range(1000, 2000);
+
+ /* Read the line state */
+ if (!otg_io_read(isp->otg, ULPI_DEBUG)) {
+ /* Disable strong pull-up on DP (1.5K) */
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
+ ULPI_FUNC_CTRL_TERMSELECT);
+ return 1;
+ }
+
+ /* Is it a charger or PS/2 connection */
+
+ /* Enable weak pull-up resistor on DP */
+ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL),
+ ISP1704_PWR_CTRL_DP_WKPU_EN);
+
+ /* Disable strong pull-up on DP (1.5K) */
+ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
+ ULPI_FUNC_CTRL_TERMSELECT);
+
+ /* Enable weak pull-down resistor on DM */
+ otg_io_write(isp->otg, ULPI_SET(ULPI_OTG_CTRL),
+ ULPI_OTG_CTRL_DM_PULLDOWN);
+
+ /* It's a charger if the line states are clear */
+ if (!(otg_io_read(isp->otg, ULPI_DEBUG)))
+ ret = 1;
+
+ /* Disable weak pull-up resistor on DP */
+ otg_io_write(isp->otg, ULPI_CLR(ISP1704_PWR_CTRL),
+ ISP1704_PWR_CTRL_DP_WKPU_EN);
+
+ return ret;
+}
+
+static inline int isp1704_charger_detect(struct isp1704_charger *isp)
+{
+ unsigned long timeout;
+ u8 r;
+ int ret = 0;
+
+ /* set SW control bit in PWR_CTRL register */
+ otg_io_write(isp->otg, ISP1704_PWR_CTRL,
+ ISP1704_PWR_CTRL_SWCTRL);
+
+ /* enable manual charger detection */
+ r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN);
+ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r);
+ usleep_range(1000, 2000);
+
+ timeout = jiffies + msecs_to_jiffies(300);
+ do {
+ /* Check if there is a charger */
+ if (otg_io_read(isp->otg, ISP1704_PWR_CTRL)
+ & ISP1704_PWR_CTRL_VDAT_DET) {
+ ret = isp1704_charger_verify(isp);
+ break;
+ }
+ } while (!time_after(jiffies, timeout));
+
+ return ret;
+}
+
+static void isp1704_charger_work(struct work_struct *data)
+{
+ int detect;
+ struct isp1704_charger *isp =
+ container_of(data, struct isp1704_charger, work);
+
+ /*
+ * FIXME Only supporting dedicated chargers even though isp1704 can
+ * detect HUB and HOST chargers. If the device has already been
+ * enumerated, the detection will break the connection.
+ */
+ if (isp->otg->state != OTG_STATE_B_IDLE)
+ return;
+
+ /* disable data pullups */
+ if (isp->otg->gadget)
+ usb_gadget_disconnect(isp->otg->gadget);
+
+ /* detect charger */
+ detect = isp1704_charger_detect(isp);
+ if (detect) {
+ isp->present = detect;
+ power_supply_changed(&isp->psy);
+ }
+
+ /* enable data pullups */
+ if (isp->otg->gadget)
+ usb_gadget_connect(isp->otg->gadget);
+}
+
+static int isp1704_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct isp1704_charger *isp =
+ container_of(nb, struct isp1704_charger, nb);
+
+ switch (event) {
+ case USB_EVENT_VBUS:
+ schedule_work(&isp->work);
+ break;
+ case USB_EVENT_NONE:
+ if (isp->present) {
+ isp->present = 0;
+ power_supply_changed(&isp->psy);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int isp1704_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct isp1704_charger *isp =
+ container_of(psy, struct isp1704_charger, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = isp->present;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = isp->model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "NXP";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property power_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static inline int isp1704_test_ulpi(struct isp1704_charger *isp)
+{
+ int vendor;
+ int product;
+ int i;
+ int ret = -ENODEV;
+
+ /* Test ULPI interface */
+ ret = otg_io_write(isp->otg, ULPI_SCRATCH, 0xaa);
+ if (ret < 0)
+ return ret;
+
+ ret = otg_io_read(isp->otg, ULPI_SCRATCH);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 0xaa)
+ return -ENODEV;
+
+ /* Verify the product and vendor id matches */
+ vendor = otg_io_read(isp->otg, ULPI_VENDOR_ID_LOW);
+ vendor |= otg_io_read(isp->otg, ULPI_VENDOR_ID_HIGH) << 8;
+ if (vendor != NXP_VENDOR_ID)
+ return -ENODEV;
+
+ product = otg_io_read(isp->otg, ULPI_PRODUCT_ID_LOW);
+ product |= otg_io_read(isp->otg, ULPI_PRODUCT_ID_HIGH) << 8;
+
+ for (i = 0; i < ARRAY_SIZE(isp170x_id); i++) {
+ if (product == isp170x_id[i]) {
+ sprintf(isp->model, "isp%x", product);
+ return product;
+ }
+ }
+
+ dev_err(isp->dev, "product id %x not matching known ids", product);
+
+ return -ENODEV;
+}
+
+static int __devinit isp1704_charger_probe(struct platform_device *pdev)
+{
+ struct isp1704_charger *isp;
+ int ret = -ENODEV;
+
+ isp = kzalloc(sizeof *isp, GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->otg = otg_get_transceiver();
+ if (!isp->otg)
+ goto fail0;
+
+ ret = isp1704_test_ulpi(isp);
+ if (ret < 0)
+ goto fail1;
+
+ isp->dev = &pdev->dev;
+ platform_set_drvdata(pdev, isp);
+
+ isp->psy.name = "isp1704";
+ isp->psy.type = POWER_SUPPLY_TYPE_USB;
+ isp->psy.properties = power_props;
+ isp->psy.num_properties = ARRAY_SIZE(power_props);
+ isp->psy.get_property = isp1704_charger_get_property;
+
+ ret = power_supply_register(isp->dev, &isp->psy);
+ if (ret)
+ goto fail1;
+
+ /*
+ * REVISIT: using work in order to allow the otg notifications to be
+ * made atomically in the future.
+ */
+ INIT_WORK(&isp->work, isp1704_charger_work);
+
+ isp->nb.notifier_call = isp1704_notifier_call;
+
+ ret = otg_register_notifier(isp->otg, &isp->nb);
+ if (ret)
+ goto fail2;
+
+ dev_info(isp->dev, "registered with product id %s\n", isp->model);
+
+ return 0;
+fail2:
+ power_supply_unregister(&isp->psy);
+fail1:
+ otg_put_transceiver(isp->otg);
+fail0:
+ kfree(isp);
+
+ dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
+
+ return ret;
+}
+
+static int __devexit isp1704_charger_remove(struct platform_device *pdev)
+{
+ struct isp1704_charger *isp = platform_get_drvdata(pdev);
+
+ otg_unregister_notifier(isp->otg, &isp->nb);
+ power_supply_unregister(&isp->psy);
+ otg_put_transceiver(isp->otg);
+ kfree(isp);
+
+ return 0;
+}
+
+static struct platform_driver isp1704_charger_driver = {
+ .driver = {
+ .name = "isp1704_charger",
+ },
+ .probe = isp1704_charger_probe,
+ .remove = __devexit_p(isp1704_charger_remove),
+};
+
+static int __init isp1704_charger_init(void)
+{
+ return platform_driver_register(&isp1704_charger_driver);
+}
+module_init(isp1704_charger_init);
+
+static void __exit isp1704_charger_exit(void)
+{
+ platform_driver_unregister(&isp1704_charger_driver);
+}
+module_exit(isp1704_charger_exit);
+
+MODULE_ALIAS("platform:isp1704_charger");
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("ISP170x USB Charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 20c4b952e9bd..a8108a73593e 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -383,6 +383,7 @@ static int __devexit jz_battery_remove(struct platform_device *pdev)
iounmap(jz_battery->base);
release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
+ kfree(jz_battery);
return 0;
}
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index aafc1c506eda..5bc1dcf7785e 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -271,14 +271,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 9760L / 32;
+ val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 15625L / 120;
+ val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
@@ -299,7 +299,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
@@ -313,7 +313,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 6250 / 15;
+ val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index 066f994e6fe5..4fa52e1781a2 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -456,6 +456,7 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &mbc_attr_group);
power_supply_unregister(&mbc->usb);
power_supply_unregister(&mbc->adapter);
power_supply_unregister(&mbc->ac);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 9d30eeb8c810..cd1f90754a3a 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -42,7 +42,8 @@ static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
static char *type_text[] = {
- "Battery", "UPS", "Mains", "USB"
+ "Battery", "UPS", "Mains", "USB",
+ "USB_DCP", "USB_CDP", "USB_ACA"
};
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
@@ -138,6 +139,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(voltage_min_design),
POWER_SUPPLY_ATTR(voltage_now),
POWER_SUPPLY_ATTR(voltage_avg),
+ POWER_SUPPLY_ATTR(current_max),
POWER_SUPPLY_ATTR(current_now),
POWER_SUPPLY_ATTR(current_avg),
POWER_SUPPLY_ATTR(power_now),
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
new file mode 100644
index 000000000000..ff1f42398a2e
--- /dev/null
+++ b/drivers/power/twl4030_charger.c
@@ -0,0 +1,565 @@
+/*
+ * TWL4030/TPS65950 BCI (Battery Charger Interface) driver
+ *
+ * Copyright (C) 2010 GraĹľvydas Ignotas <notasas@gmail.com>
+ *
+ * based on twl4030_bci_battery.c by TI
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/i2c/twl.h>
+#include <linux/power_supply.h>
+#include <linux/notifier.h>
+#include <linux/usb/otg.h>
+
+#define TWL4030_BCIMSTATEC 0x02
+#define TWL4030_BCIICHG 0x08
+#define TWL4030_BCIVAC 0x0a
+#define TWL4030_BCIVBUS 0x0c
+#define TWL4030_BCIMFSTS4 0x10
+#define TWL4030_BCICTL1 0x23
+
+#define TWL4030_BCIAUTOWEN BIT(5)
+#define TWL4030_CONFIG_DONE BIT(4)
+#define TWL4030_BCIAUTOUSB BIT(1)
+#define TWL4030_BCIAUTOAC BIT(0)
+#define TWL4030_CGAIN BIT(5)
+#define TWL4030_USBFASTMCHG BIT(2)
+#define TWL4030_STS_VBUS BIT(7)
+#define TWL4030_STS_USB_ID BIT(2)
+
+/* BCI interrupts */
+#define TWL4030_WOVF BIT(0) /* Watchdog overflow */
+#define TWL4030_TMOVF BIT(1) /* Timer overflow */
+#define TWL4030_ICHGHIGH BIT(2) /* Battery charge current high */
+#define TWL4030_ICHGLOW BIT(3) /* Battery cc. low / FSM state change */
+#define TWL4030_ICHGEOC BIT(4) /* Battery current end-of-charge */
+#define TWL4030_TBATOR2 BIT(5) /* Battery temperature out of range 2 */
+#define TWL4030_TBATOR1 BIT(6) /* Battery temperature out of range 1 */
+#define TWL4030_BATSTS BIT(7) /* Battery status */
+
+#define TWL4030_VBATLVL BIT(0) /* VBAT level */
+#define TWL4030_VBATOV BIT(1) /* VBAT overvoltage */
+#define TWL4030_VBUSOV BIT(2) /* VBUS overvoltage */
+#define TWL4030_ACCHGOV BIT(3) /* Ac charger overvoltage */
+
+#define TWL4030_MSTATEC_USB BIT(4)
+#define TWL4030_MSTATEC_AC BIT(5)
+#define TWL4030_MSTATEC_MASK 0x0f
+#define TWL4030_MSTATEC_QUICK1 0x02
+#define TWL4030_MSTATEC_QUICK7 0x07
+#define TWL4030_MSTATEC_COMPLETE1 0x0b
+#define TWL4030_MSTATEC_COMPLETE4 0x0e
+
+static bool allow_usb;
+module_param(allow_usb, bool, 1);
+MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
+
+struct twl4030_bci {
+ struct device *dev;
+ struct power_supply ac;
+ struct power_supply usb;
+ struct otg_transceiver *transceiver;
+ struct notifier_block otg_nb;
+ int irq_chg;
+ int irq_bci;
+};
+
+/*
+ * clear and set bits on an given register on a given module
+ */
+static int twl4030_clear_set(u8 mod_no, u8 clear, u8 set, u8 reg)
+{
+ u8 val = 0;
+ int ret;
+
+ ret = twl_i2c_read_u8(mod_no, &val, reg);
+ if (ret)
+ return ret;
+
+ val &= ~clear;
+ val |= set;
+
+ return twl_i2c_write_u8(mod_no, val, reg);
+}
+
+static int twl4030_bci_read(u8 reg, u8 *val)
+{
+ return twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, val, reg);
+}
+
+static int twl4030_clear_set_boot_bci(u8 clear, u8 set)
+{
+ return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, 0,
+ TWL4030_CONFIG_DONE | TWL4030_BCIAUTOWEN | set,
+ TWL4030_PM_MASTER_BOOT_BCI);
+}
+
+static int twl4030bci_read_adc_val(u8 reg)
+{
+ int ret, temp;
+ u8 val;
+
+ /* read MSB */
+ ret = twl4030_bci_read(reg + 1, &val);
+ if (ret)
+ return ret;
+
+ temp = (int)(val & 0x03) << 8;
+
+ /* read LSB */
+ ret = twl4030_bci_read(reg, &val);
+ if (ret)
+ return ret;
+
+ return temp | val;
+}
+
+/*
+ * Check if VBUS power is present
+ */
+static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
+{
+ int ret;
+ u8 hwsts;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &hwsts,
+ TWL4030_PM_MASTER_STS_HW_CONDITIONS);
+ if (ret < 0)
+ return 0;
+
+ dev_dbg(bci->dev, "check_vbus: HW_CONDITIONS %02x\n", hwsts);
+
+ /* in case we also have STS_USB_ID, VBUS is driven by TWL itself */
+ if ((hwsts & TWL4030_STS_VBUS) && !(hwsts & TWL4030_STS_USB_ID))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Enable/Disable USB Charge funtionality.
+ */
+static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
+{
+ int ret;
+
+ if (enable) {
+ /* Check for USB charger conneted */
+ if (!twl4030_bci_have_vbus(bci))
+ return -ENODEV;
+
+ /*
+ * Until we can find out what current the device can provide,
+ * require a module param to enable USB charging.
+ */
+ if (!allow_usb) {
+ dev_warn(bci->dev, "USB charging is disabled.\n");
+ return -EACCES;
+ }
+
+ /* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
+ ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
+ if (ret < 0)
+ return ret;
+
+ /* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
+ ret = twl4030_clear_set(TWL4030_MODULE_MAIN_CHARGE, 0,
+ TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
+ } else {
+ ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
+ }
+
+ return ret;
+}
+
+/*
+ * Enable/Disable AC Charge funtionality.
+ */
+static int twl4030_charger_enable_ac(bool enable)
+{
+ int ret;
+
+ if (enable)
+ ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOAC);
+ else
+ ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC, 0);
+
+ return ret;
+}
+
+/*
+ * TWL4030 CHG_PRES (AC charger presence) events
+ */
+static irqreturn_t twl4030_charger_interrupt(int irq, void *arg)
+{
+ struct twl4030_bci *bci = arg;
+
+ dev_dbg(bci->dev, "CHG_PRES irq\n");
+ power_supply_changed(&bci->ac);
+ power_supply_changed(&bci->usb);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * TWL4030 BCI monitoring events
+ */
+static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
+{
+ struct twl4030_bci *bci = arg;
+ u8 irqs1, irqs2;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTERRUPTS, &irqs1,
+ TWL4030_INTERRUPTS_BCIISR1A);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTERRUPTS, &irqs2,
+ TWL4030_INTERRUPTS_BCIISR2A);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ dev_dbg(bci->dev, "BCI irq %02x %02x\n", irqs2, irqs1);
+
+ if (irqs1 & (TWL4030_ICHGLOW | TWL4030_ICHGEOC)) {
+ /* charger state change, inform the core */
+ power_supply_changed(&bci->ac);
+ power_supply_changed(&bci->usb);
+ }
+
+ /* various monitoring events, for now we just log them here */
+ if (irqs1 & (TWL4030_TBATOR2 | TWL4030_TBATOR1))
+ dev_warn(bci->dev, "battery temperature out of range\n");
+
+ if (irqs1 & TWL4030_BATSTS)
+ dev_crit(bci->dev, "battery disconnected\n");
+
+ if (irqs2 & TWL4030_VBATOV)
+ dev_crit(bci->dev, "VBAT overvoltage\n");
+
+ if (irqs2 & TWL4030_VBUSOV)
+ dev_crit(bci->dev, "VBUS overvoltage\n");
+
+ if (irqs2 & TWL4030_ACCHGOV)
+ dev_crit(bci->dev, "Ac charger overvoltage\n");
+
+ return IRQ_HANDLED;
+}
+
+static int twl4030_bci_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv)
+{
+ struct twl4030_bci *bci = container_of(nb, struct twl4030_bci, otg_nb);
+
+ dev_dbg(bci->dev, "OTG notify %lu\n", val);
+
+ switch (val) {
+ case USB_EVENT_VBUS:
+ case USB_EVENT_CHARGER:
+ twl4030_charger_enable_usb(bci, true);
+ break;
+ case USB_EVENT_NONE:
+ twl4030_charger_enable_usb(bci, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/*
+ * TI provided formulas:
+ * CGAIN == 0: ICHG = (BCIICHG * 1.7) / (2^10 - 1) - 0.85
+ * CGAIN == 1: ICHG = (BCIICHG * 3.4) / (2^10 - 1) - 1.7
+ * Here we use integer approximation of:
+ * CGAIN == 0: val * 1.6618 - 0.85
+ * CGAIN == 1: (val * 1.6618 - 0.85) * 2
+ */
+static int twl4030_charger_get_current(void)
+{
+ int curr;
+ int ret;
+ u8 bcictl1;
+
+ curr = twl4030bci_read_adc_val(TWL4030_BCIICHG);
+ if (curr < 0)
+ return curr;
+
+ ret = twl4030_bci_read(TWL4030_BCICTL1, &bcictl1);
+ if (ret)
+ return ret;
+
+ ret = (curr * 16618 - 850 * 10000) / 10;
+ if (bcictl1 & TWL4030_CGAIN)
+ ret *= 2;
+
+ return ret;
+}
+
+/*
+ * Returns the main charge FSM state
+ * Or < 0 on failure.
+ */
+static int twl4030bci_state(struct twl4030_bci *bci)
+{
+ int ret;
+ u8 state;
+
+ ret = twl4030_bci_read(TWL4030_BCIMSTATEC, &state);
+ if (ret) {
+ pr_err("twl4030_bci: error reading BCIMSTATEC\n");
+ return ret;
+ }
+
+ dev_dbg(bci->dev, "state: %02x\n", state);
+
+ return state;
+}
+
+static int twl4030_bci_state_to_status(int state)
+{
+ state &= TWL4030_MSTATEC_MASK;
+ if (TWL4030_MSTATEC_QUICK1 <= state && state <= TWL4030_MSTATEC_QUICK7)
+ return POWER_SUPPLY_STATUS_CHARGING;
+ else if (TWL4030_MSTATEC_COMPLETE1 <= state &&
+ state <= TWL4030_MSTATEC_COMPLETE4)
+ return POWER_SUPPLY_STATUS_FULL;
+ else
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+}
+
+static int twl4030_bci_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct twl4030_bci *bci = dev_get_drvdata(psy->dev->parent);
+ int is_charging;
+ int state;
+ int ret;
+
+ state = twl4030bci_state(bci);
+ if (state < 0)
+ return state;
+
+ if (psy->type == POWER_SUPPLY_TYPE_USB)
+ is_charging = state & TWL4030_MSTATEC_USB;
+ else
+ is_charging = state & TWL4030_MSTATEC_AC;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (is_charging)
+ val->intval = twl4030_bci_state_to_status(state);
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ /* charging must be active for meaningful result */
+ if (!is_charging)
+ return -ENODATA;
+ if (psy->type == POWER_SUPPLY_TYPE_USB) {
+ ret = twl4030bci_read_adc_val(TWL4030_BCIVBUS);
+ if (ret < 0)
+ return ret;
+ /* BCIVBUS uses ADCIN8, 7/1023 V/step */
+ val->intval = ret * 6843;
+ } else {
+ ret = twl4030bci_read_adc_val(TWL4030_BCIVAC);
+ if (ret < 0)
+ return ret;
+ /* BCIVAC uses ADCIN11, 10/1023 V/step */
+ val->intval = ret * 9775;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (!is_charging)
+ return -ENODATA;
+ /* current measurement is shared between AC and USB */
+ ret = twl4030_charger_get_current();
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = is_charging &&
+ twl4030_bci_state_to_status(state) !=
+ POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property twl4030_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static int __init twl4030_bci_probe(struct platform_device *pdev)
+{
+ struct twl4030_bci *bci;
+ int ret;
+ int reg;
+
+ bci = kzalloc(sizeof(*bci), GFP_KERNEL);
+ if (bci == NULL)
+ return -ENOMEM;
+
+ bci->dev = &pdev->dev;
+ bci->irq_chg = platform_get_irq(pdev, 0);
+ bci->irq_bci = platform_get_irq(pdev, 1);
+
+ platform_set_drvdata(pdev, bci);
+
+ bci->ac.name = "twl4030_ac";
+ bci->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ bci->ac.properties = twl4030_charger_props;
+ bci->ac.num_properties = ARRAY_SIZE(twl4030_charger_props);
+ bci->ac.get_property = twl4030_bci_get_property;
+
+ ret = power_supply_register(&pdev->dev, &bci->ac);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ac: %d\n", ret);
+ goto fail_register_ac;
+ }
+
+ bci->usb.name = "twl4030_usb";
+ bci->usb.type = POWER_SUPPLY_TYPE_USB;
+ bci->usb.properties = twl4030_charger_props;
+ bci->usb.num_properties = ARRAY_SIZE(twl4030_charger_props);
+ bci->usb.get_property = twl4030_bci_get_property;
+
+ ret = power_supply_register(&pdev->dev, &bci->usb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register usb: %d\n", ret);
+ goto fail_register_usb;
+ }
+
+ ret = request_threaded_irq(bci->irq_chg, NULL,
+ twl4030_charger_interrupt, 0, pdev->name, bci);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request irq %d, status %d\n",
+ bci->irq_chg, ret);
+ goto fail_chg_irq;
+ }
+
+ ret = request_threaded_irq(bci->irq_bci, NULL,
+ twl4030_bci_interrupt, 0, pdev->name, bci);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request irq %d, status %d\n",
+ bci->irq_bci, ret);
+ goto fail_bci_irq;
+ }
+
+ bci->transceiver = otg_get_transceiver();
+ if (bci->transceiver != NULL) {
+ bci->otg_nb.notifier_call = twl4030_bci_usb_ncb;
+ otg_register_notifier(bci->transceiver, &bci->otg_nb);
+ }
+
+ /* Enable interrupts now. */
+ reg = ~(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
+ TWL4030_TBATOR1 | TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
+ goto fail_unmask_interrupts;
+ }
+
+ reg = ~(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR2A);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
+
+ twl4030_charger_enable_ac(true);
+ twl4030_charger_enable_usb(bci, true);
+
+ return 0;
+
+fail_unmask_interrupts:
+ if (bci->transceiver != NULL) {
+ otg_unregister_notifier(bci->transceiver, &bci->otg_nb);
+ otg_put_transceiver(bci->transceiver);
+ }
+ free_irq(bci->irq_bci, bci);
+fail_bci_irq:
+ free_irq(bci->irq_chg, bci);
+fail_chg_irq:
+ power_supply_unregister(&bci->usb);
+fail_register_usb:
+ power_supply_unregister(&bci->ac);
+fail_register_ac:
+ platform_set_drvdata(pdev, NULL);
+ kfree(bci);
+
+ return ret;
+}
+
+static int __exit twl4030_bci_remove(struct platform_device *pdev)
+{
+ struct twl4030_bci *bci = platform_get_drvdata(pdev);
+
+ twl4030_charger_enable_ac(false);
+ twl4030_charger_enable_usb(bci, false);
+
+ /* mask interrupts */
+ twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
+ TWL4030_INTERRUPTS_BCIIMR2A);
+
+ if (bci->transceiver != NULL) {
+ otg_unregister_notifier(bci->transceiver, &bci->otg_nb);
+ otg_put_transceiver(bci->transceiver);
+ }
+ free_irq(bci->irq_bci, bci);
+ free_irq(bci->irq_chg, bci);
+ power_supply_unregister(&bci->usb);
+ power_supply_unregister(&bci->ac);
+ platform_set_drvdata(pdev, NULL);
+ kfree(bci);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_bci_driver = {
+ .driver = {
+ .name = "twl4030_bci",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(twl4030_bci_remove),
+};
+
+static int __init twl4030_bci_init(void)
+{
+ return platform_driver_probe(&twl4030_bci_driver, twl4030_bci_probe);
+}
+module_init(twl4030_bci_init);
+
+static void __exit twl4030_bci_exit(void)
+{
+ platform_driver_unregister(&twl4030_bci_driver);
+}
+module_exit(twl4030_bci_exit);
+
+MODULE_AUTHOR("GraĹľydas Ignotas");
+MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:twl4030_bci");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index fbcc36dae470..ddf8cf5f3204 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -267,7 +267,6 @@ static void wm831x_config_battery(struct wm831x *wm831x)
ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_1,
WM831X_CHG_ENA_MASK |
WM831X_CHG_FAST_MASK |
- WM831X_CHG_ITERM_MASK |
WM831X_CHG_ITERM_MASK,
reg1);
if (ret != 0)
@@ -612,6 +611,7 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev)
power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
power_supply_unregister(&wm831x_power->usb);
+ kfree(wm831x_power);
return 0;
}
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 3222fa3c808c..0f4a53bdaa3c 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -192,7 +192,7 @@ static int rio_match_bus(struct device *dev, struct device_driver *drv)
out:return 0;
}
-static struct device rio_bus = {
+struct device rio_bus = {
.init_name = "rapidio",
};
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 8070e074c739..1eb82c4c712e 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -48,7 +48,7 @@ DEFINE_SPINLOCK(rio_global_list_lock);
static int next_destid = 0;
static int next_switchid = 0;
static int next_net = 0;
-static int next_comptag;
+static int next_comptag = 1;
static struct timer_list rio_enum_timer =
TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -121,27 +121,6 @@ static int rio_clear_locks(struct rio_mport *port)
u32 result;
int ret = 0;
- /* Assign component tag to all devices */
- next_comptag = 1;
- rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
-
- list_for_each_entry(rdev, &rio_devices, global_list) {
- /* Mark device as discovered */
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
- &result);
- rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
- result | RIO_PORT_GEN_DISCOVERED);
-
- rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
- rdev->comp_tag = next_comptag++;
- if (next_comptag >= 0x10000) {
- pr_err("RIO: Component Tag Counter Overflow\n");
- break;
- }
- }
-
/* Release host device id locks */
rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
port->host_deviceid);
@@ -162,6 +141,15 @@ static int rio_clear_locks(struct rio_mport *port)
rdev->vid, rdev->did);
ret = -EINVAL;
}
+
+ /* Mark device as discovered and enable master */
+ rio_read_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ &result);
+ result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER;
+ rio_write_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ result);
}
return ret;
@@ -420,11 +408,27 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
hopcount, RIO_EFB_ERR_MGMNT);
}
+ if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+ rio_mport_read_config_32(port, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rdev->swpinfo);
+ }
+
rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
&rdev->src_ops);
rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
&rdev->dst_ops);
+ if (do_enum) {
+ /* Assign component tag to device */
+ if (next_comptag >= 0x10000) {
+ pr_err("RIO: Component Tag Counter Overflow\n");
+ goto cleanup;
+ }
+ rio_mport_write_config_32(port, destid, hopcount,
+ RIO_COMPONENT_TAG_CSR, next_comptag);
+ rdev->comp_tag = next_comptag++;
+ }
+
if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
if (do_enum) {
rio_set_device_id(port, destid, hopcount, next_destid);
@@ -439,9 +443,10 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
/* If a PE has both switch and other functions, show it as a switch */
if (rio_is_switch(rdev)) {
- rio_mport_read_config_32(port, destid, hopcount,
- RIO_SWP_INFO_CAR, &rdev->swpinfo);
- rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
+ rswitch = kzalloc(sizeof(*rswitch) +
+ RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
+ sizeof(rswitch->nextdev[0]),
+ GFP_KERNEL);
if (!rswitch)
goto cleanup;
rswitch->switchid = next_switchid;
@@ -458,6 +463,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
rdid++)
rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
rdev->rswitch = rswitch;
+ rswitch->rdev = rdev;
dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
rdev->rswitch->switchid);
rio_switch_init(rdev, do_enum);
@@ -478,6 +484,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
}
rdev->dev.bus = &rio_bus_type;
+ rdev->dev.parent = &rio_bus;
device_initialize(&rdev->dev);
rdev->dev.release = rio_release_dev;
@@ -718,86 +725,53 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)
}
/**
- * rio_get_swpinfo_inport- Gets the ingress port number
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns port number being used to access the switch device.
- */
-static u8
-rio_get_swpinfo_inport(struct rio_mport *mport, u16 destid, u8 hopcount)
-{
- u32 result;
-
- rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
- &result);
-
- return (u8) (result & 0xff);
-}
-
-/**
- * rio_get_swpinfo_tports- Gets total number of ports on the switch
- * @mport: Master port to send transaction
- * @destid: Destination ID associated with the switch
- * @hopcount: Number of hops to the device
- *
- * Returns total numbers of ports implemented by the switch device.
- */
-static u8 rio_get_swpinfo_tports(struct rio_mport *mport, u16 destid,
- u8 hopcount)
-{
- u32 result;
-
- rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR,
- &result);
-
- return RIO_GET_TOTAL_PORTS(result);
-}
-
-/**
- * rio_net_add_mport- Add a master port to a RIO network
- * @net: RIO network
- * @port: Master port to add
- *
- * Adds a master port to the network list of associated master
- * ports..
- */
-static void rio_net_add_mport(struct rio_net *net, struct rio_mport *port)
-{
- spin_lock(&rio_global_list_lock);
- list_add_tail(&port->nnode, &net->mports);
- spin_unlock(&rio_global_list_lock);
-}
-
-/**
* rio_enum_peer- Recursively enumerate a RIO network through a master port
* @net: RIO network being enumerated
* @port: Master port to send transactions
* @hopcount: Number of hops into the network
+ * @prev: Previous RIO device connected to the enumerated one
+ * @prev_port: Port on previous RIO device
*
* Recursively enumerates a RIO network. Transactions are sent via the
* master port passed in @port.
*/
static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
- u8 hopcount)
+ u8 hopcount, struct rio_dev *prev, int prev_port)
{
int port_num;
- int num_ports;
int cur_destid;
int sw_destid;
int sw_inport;
struct rio_dev *rdev;
u16 destid;
+ u32 regval;
int tmp;
+ if (rio_mport_chk_dev_access(port,
+ RIO_ANY_DESTID(port->sys_size), hopcount)) {
+ pr_debug("RIO: device access check failed\n");
+ return -1;
+ }
+
if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) {
pr_debug("RIO: PE already discovered by this host\n");
/*
* Already discovered by this host. Add it as another
- * master port for the current network.
+ * link to the existing device.
*/
- rio_net_add_mport(net, port);
+ rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size),
+ hopcount, RIO_COMPONENT_TAG_CSR, &regval);
+
+ if (regval) {
+ rdev = rio_get_comptag((regval & 0xffff), NULL);
+
+ if (rdev && prev && rio_is_switch(prev)) {
+ pr_debug("RIO: redundant path to %s\n",
+ rio_name(rdev));
+ prev->rswitch->nextdev[prev_port] = rdev;
+ }
+ }
+
return 0;
}
@@ -828,13 +802,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
if (rdev) {
/* Add device to the global and bus/net specific list. */
list_add_tail(&rdev->net_list, &net->devices);
+ rdev->prev = prev;
+ if (prev && rio_is_switch(prev))
+ prev->rswitch->nextdev[prev_port] = rdev;
} else
return -1;
if (rio_is_switch(rdev)) {
next_switchid++;
- sw_inport = rio_get_swpinfo_inport(port,
- RIO_ANY_DESTID(port->sys_size), hopcount);
+ sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
port->host_deviceid, sw_inport, 0);
rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
@@ -847,14 +823,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
rdev->rswitch->route_table[destid] = sw_inport;
}
- num_ports =
- rio_get_swpinfo_tports(port, RIO_ANY_DESTID(port->sys_size),
- hopcount);
pr_debug(
"RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
- rio_name(rdev), rdev->vid, rdev->did, num_ports);
+ rio_name(rdev), rdev->vid, rdev->did,
+ RIO_GET_TOTAL_PORTS(rdev->swpinfo));
sw_destid = next_destid;
- for (port_num = 0; port_num < num_ports; port_num++) {
+ for (port_num = 0;
+ port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ port_num++) {
/*Enable Input Output Port (transmitter reviever)*/
rio_enable_rx_tx_port(port, 0,
RIO_ANY_DESTID(port->sys_size),
@@ -879,7 +855,8 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
RIO_ANY_DESTID(port->sys_size),
port_num, 0);
- if (rio_enum_peer(net, port, hopcount + 1) < 0)
+ if (rio_enum_peer(net, port, hopcount + 1,
+ rdev, port_num) < 0)
return -1;
/* Update routing tables */
@@ -945,10 +922,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
*/
static int rio_enum_complete(struct rio_mport *port)
{
- u32 tag_csr;
+ u32 regval;
- rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
- return (tag_csr & 0xffff) ? 1 : 0;
+ rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ &regval);
+ return (regval & RIO_PORT_GEN_MASTER) ? 1 : 0;
}
/**
@@ -966,7 +944,6 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
u8 hopcount)
{
u8 port_num, route_port;
- int num_ports;
struct rio_dev *rdev;
u16 ndestid;
@@ -983,13 +960,14 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
/* Associated destid is how we accessed this switch */
rdev->rswitch->destid = destid;
- num_ports = rio_get_swpinfo_tports(port, destid, hopcount);
pr_debug(
"RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
- rio_name(rdev), rdev->vid, rdev->did, num_ports);
- for (port_num = 0; port_num < num_ports; port_num++) {
- if (rio_get_swpinfo_inport(port, destid, hopcount) ==
- port_num)
+ rio_name(rdev), rdev->vid, rdev->did,
+ RIO_GET_TOTAL_PORTS(rdev->swpinfo));
+ for (port_num = 0;
+ port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ port_num++) {
+ if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
continue;
if (rio_sport_is_active
@@ -1011,6 +989,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
break;
}
+ if (ndestid == RIO_ANY_DESTID(port->sys_size))
+ continue;
rio_unlock_device(port, destid, hopcount);
if (rio_disc_peer
(net, port, ndestid, hopcount + 1) < 0)
@@ -1108,8 +1088,7 @@ static void rio_update_route_tables(struct rio_mport *port)
if (rswitch->destid == destid)
continue;
- sport = rio_get_swpinfo_inport(port,
- rswitch->destid, rswitch->hopcount);
+ sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
if (rswitch->add_entry) {
rio_route_add_entry(port, rswitch,
@@ -1184,7 +1163,11 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
/* Enable Input Output Port (transmitter reviever) */
rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
- if (rio_enum_peer(net, mport, 0) < 0) {
+ /* Set component tag for host */
+ rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR,
+ next_comptag++);
+
+ if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) {
/* A higher priority host won enumeration, bail. */
printk(KERN_INFO
"RIO: master port %d device has lost enumeration to a remote host\n",
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 00b475658356..137ed93ee33f 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -40,9 +40,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
char *str = buf;
int i;
- if (!rdev->rswitch)
- goto out;
-
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
i++) {
if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE)
@@ -52,7 +49,6 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
rdev->rswitch->route_table[i]);
}
- out:
return (str - buf);
}
@@ -63,10 +59,11 @@ struct device_attribute rio_dev_attrs[] = {
__ATTR_RO(asm_did),
__ATTR_RO(asm_vid),
__ATTR_RO(asm_rev),
- __ATTR_RO(routes),
__ATTR_NULL,
};
+static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL);
+
static ssize_t
rio_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -218,7 +215,17 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
{
int err = 0;
- err = sysfs_create_bin_file(&rdev->dev.kobj, &rio_config_attr);
+ err = device_create_bin_file(&rdev->dev, &rio_config_attr);
+
+ if (!err && rdev->rswitch) {
+ err = device_create_file(&rdev->dev, &dev_attr_routes);
+ if (!err && rdev->rswitch->sw_sysfs)
+ err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
+ }
+
+ if (err)
+ pr_warning("RIO: Failed to create attribute file(s) for %s\n",
+ rio_name(rdev));
return err;
}
@@ -231,5 +238,10 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)
*/
void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
{
- sysfs_remove_bin_file(&rdev->dev.kobj, &rio_config_attr);
+ device_remove_bin_file(&rdev->dev, &rio_config_attr);
+ if (rdev->rswitch) {
+ device_remove_file(&rdev->dev, &dev_attr_routes);
+ if (rdev->rswitch->sw_sysfs)
+ rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
+ }
}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 74e9d22d95fb..68cf0c99138a 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -443,7 +443,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
* @from is not %NULL, searches continue from next device on the global
* list.
*/
-static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
+struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
{
struct list_head *n;
struct rio_dev *rdev;
@@ -495,6 +495,232 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
}
/**
+ * rio_chk_dev_route - Validate route to the specified device.
+ * @rdev: RIO device failed to respond
+ * @nrdev: Last active device on the route to rdev
+ * @npnum: nrdev's port number on the route to rdev
+ *
+ * Follows a route to the specified RIO device to determine the last available
+ * device (and corresponding RIO port) on the route.
+ */
+static int
+rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
+{
+ u32 result;
+ int p_port, dstid, rc = -EIO;
+ struct rio_dev *prev = NULL;
+
+ /* Find switch with failed RIO link */
+ while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) {
+ if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) {
+ prev = rdev->prev;
+ break;
+ }
+ rdev = rdev->prev;
+ }
+
+ if (prev == NULL)
+ goto err_out;
+
+ dstid = (rdev->pef & RIO_PEF_SWITCH) ?
+ rdev->rswitch->destid : rdev->destid;
+ p_port = prev->rswitch->route_table[dstid];
+
+ if (p_port != RIO_INVALID_ROUTE) {
+ pr_debug("RIO: link failed on [%s]-P%d\n",
+ rio_name(prev), p_port);
+ *nrdev = prev;
+ *npnum = p_port;
+ rc = 0;
+ } else
+ pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev));
+err_out:
+ return rc;
+}
+
+/**
+ * rio_mport_chk_dev_access - Validate access to the specified device.
+ * @mport: Master port to send transactions
+ * @destid: Device destination ID in network
+ * @hopcount: Number of hops into the network
+ */
+int
+rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
+{
+ int i = 0;
+ u32 tmp;
+
+ while (rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_DEV_ID_CAR, &tmp)) {
+ i++;
+ if (i == RIO_MAX_CHK_RETRY)
+ return -EIO;
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+/**
+ * rio_chk_dev_access - Validate access to the specified device.
+ * @rdev: Pointer to RIO device control structure
+ */
+static int rio_chk_dev_access(struct rio_dev *rdev)
+{
+ u8 hopcount = 0xff;
+ u16 destid = rdev->destid;
+
+ if (rdev->rswitch) {
+ destid = rdev->rswitch->destid;
+ hopcount = rdev->rswitch->hopcount;
+ }
+
+ return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+}
+
+/**
+ * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and
+ * returns link-response (if requested).
+ * @rdev: RIO devive to issue Input-status command
+ * @pnum: Device port number to issue the command
+ * @lnkresp: Response from a link partner
+ */
+static int
+rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval;
+ int checkcount;
+
+ if (lnkresp) {
+ /* Read from link maintenance response register
+ * to clear valid bit */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ &regval);
+ udelay(50);
+ }
+
+ /* Issue Input-status command */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
+ RIO_MNT_REQ_CMD_IS);
+
+ /* Exit if the response is not expected */
+ if (lnkresp == NULL)
+ return 0;
+
+ checkcount = 3;
+ while (checkcount--) {
+ udelay(50);
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ &regval);
+ if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
+ *lnkresp = regval;
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+/**
+ * rio_clr_err_stopped - Clears port Error-stopped states.
+ * @rdev: Pointer to RIO device control structure
+ * @pnum: Switch port number to clear errors
+ * @err_status: port error status (if 0 reads register from device)
+ */
+static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
+ u32 regval;
+ u32 far_ackid, far_linkstat, near_ackid;
+
+ if (err_status == 0)
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ &err_status);
+
+ if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
+ pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
+ /*
+ * Send a Link-Request/Input-Status control symbol
+ */
+ if (rio_get_input_status(rdev, pnum, &regval)) {
+ pr_debug("RIO_EM: Input-status response timeout\n");
+ goto rd_err;
+ }
+
+ pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n",
+ pnum, regval);
+ far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
+ far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+ &regval);
+ pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
+ near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
+ pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \
+ " near_ackID=0x%02x\n",
+ pnum, far_ackid, far_linkstat, near_ackid);
+
+ /*
+ * If required, synchronize ackIDs of near and
+ * far sides.
+ */
+ if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) ||
+ (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) {
+ /* Align near outstanding/outbound ackIDs with
+ * far inbound.
+ */
+ rio_mport_write_config_32(mport, destid,
+ hopcount, rdev->phys_efptr +
+ RIO_PORT_N_ACK_STS_CSR(pnum),
+ (near_ackid << 24) |
+ (far_ackid << 8) | far_ackid);
+ /* Align far outstanding/outbound ackIDs with
+ * near inbound.
+ */
+ far_ackid++;
+ if (nextdev)
+ rio_write_config_32(nextdev,
+ nextdev->phys_efptr +
+ RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
+ (far_ackid << 24) |
+ (near_ackid << 8) | near_ackid);
+ else
+ pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
+ }
+rd_err:
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ &err_status);
+ pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+ }
+
+ if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
+ pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
+ rio_get_input_status(nextdev,
+ RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
+ udelay(50);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ &err_status);
+ pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
+ }
+
+ return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+ RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
+}
+
+/**
* rio_inb_pwrite_handler - process inbound port-write message
* @pw_msg: pointer to inbound port-write message
*
@@ -507,13 +733,13 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
struct rio_mport *mport;
u8 hopcount;
u16 destid;
- u32 err_status;
+ u32 err_status, em_perrdet, em_ltlerrdet;
int rc, portnum;
rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
if (rdev == NULL) {
- /* Someting bad here (probably enumeration error) */
- pr_err("RIO: %s No matching device for CTag 0x%08x\n",
+ /* Device removed or enumeration error */
+ pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
__func__, pw_msg->em.comptag);
return -EIO;
}
@@ -524,12 +750,11 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
{
u32 i;
for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
- pr_debug("0x%02x: %08x %08x %08x %08x",
+ pr_debug("0x%02x: %08x %08x %08x %08x\n",
i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
i += 4;
}
- pr_debug("\n");
}
#endif
@@ -545,6 +770,26 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
return 0;
}
+ portnum = pw_msg->em.is_port & 0xFF;
+
+ /* Check if device and route to it are functional:
+ * Sometimes devices may send PW message(s) just before being
+ * powered down (or link being lost).
+ */
+ if (rio_chk_dev_access(rdev)) {
+ pr_debug("RIO: device access failed - get link partner\n");
+ /* Scan route to the device and identify failed link.
+ * This will replace device and port reported in PW message.
+ * PW message should not be used after this point.
+ */
+ if (rio_chk_dev_route(rdev, &rdev, &portnum)) {
+ pr_err("RIO: Route trace for %s failed\n",
+ rio_name(rdev));
+ return -EIO;
+ }
+ pw_msg = NULL;
+ }
+
/* For End-point devices processing stops here */
if (!(rdev->pef & RIO_PEF_SWITCH))
return 0;
@@ -562,9 +807,6 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
/*
* Process the port-write notification from switch
*/
-
- portnum = pw_msg->em.is_port & 0xFF;
-
if (rdev->rswitch->em_handle)
rdev->rswitch->em_handle(rdev, portnum);
@@ -573,29 +815,28 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
&err_status);
pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
- if (pw_msg->em.errdetect) {
- pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
- portnum, pw_msg->em.errdetect);
- /* Clear EM Port N Error Detect CSR */
- rio_mport_write_config_32(mport, destid, hopcount,
- rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
- }
+ if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
- if (pw_msg->em.ltlerrdet) {
- pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
- pw_msg->em.ltlerrdet);
- /* Clear EM L/T Layer Error Detect CSR */
- rio_mport_write_config_32(mport, destid, hopcount,
- rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
- }
+ if (!(rdev->rswitch->port_ok & (1 << portnum))) {
+ rdev->rswitch->port_ok |= (1 << portnum);
+ rio_set_port_lockout(rdev, portnum, 0);
+ /* Schedule Insertion Service */
+ pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
+ rio_name(rdev), portnum);
+ }
- /* Clear Port Errors */
- rio_mport_write_config_32(mport, destid, hopcount,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
+ /* Clear error-stopped states (if reported).
+ * Depending on the link partner state, two attempts
+ * may be needed for successful recovery.
+ */
+ if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+ RIO_PORT_N_ERR_STS_PW_INP_ES)) {
+ if (rio_clr_err_stopped(rdev, portnum, err_status))
+ rio_clr_err_stopped(rdev, portnum, 0);
+ }
+ } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */
- if (rdev->rswitch->port_ok & (1 << portnum)) {
- if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
+ if (rdev->rswitch->port_ok & (1 << portnum)) {
rdev->rswitch->port_ok &= ~(1 << portnum);
rio_set_port_lockout(rdev, portnum, 1);
@@ -608,21 +849,32 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
rio_name(rdev), portnum);
}
- } else {
- if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
- rdev->rswitch->port_ok |= (1 << portnum);
- rio_set_port_lockout(rdev, portnum, 0);
+ }
- /* Schedule Insertion Service */
- pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
- rio_name(rdev), portnum);
- }
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+ if (em_perrdet) {
+ pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
+ portnum, em_perrdet);
+ /* Clear EM Port N Error Detect CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
+ }
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+ if (em_ltlerrdet) {
+ pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
+ em_ltlerrdet);
+ /* Clear EM L/T Layer Error Detect CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
}
- /* Clear Port-Write Pending bit */
+ /* Clear remaining error bits and Port-Write Pending bit */
rio_mport_write_config_32(mport, destid, hopcount,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- RIO_PORT_N_ERR_STS_PW_PEND);
+ err_status);
return 0;
}
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index f27b7a9c47d2..b1af414f15e6 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -14,6 +14,8 @@
#include <linux/list.h>
#include <linux/rio.h>
+#define RIO_MAX_CHK_RETRY 3
+
/* Functions internal to the RIO core code */
extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
@@ -22,6 +24,8 @@ extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
u16 destid, u8 hopcount);
extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u32 from);
+extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
+ u8 hopcount);
extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
extern int rio_enum_mport(struct rio_mport *mport);
extern int rio_disc_mport(struct rio_mport *mport);
@@ -34,6 +38,7 @@ extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
u8 hopcount, u16 table);
extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
+extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
/* Structures internal to the RIO core code */
extern struct device_attribute rio_dev_attrs[];
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
index 2b4e9b2b6631..f47fee5d4563 100644
--- a/drivers/rapidio/switches/Kconfig
+++ b/drivers/rapidio/switches/Kconfig
@@ -20,6 +20,13 @@ config RAPIDIO_TSI568
---help---
Includes support for IDT Tsi568 serial RapidIO switch.
+config RAPIDIO_CPS_GEN2
+ bool "IDT CPS Gen.2 SRIO switch support"
+ depends on RAPIDIO
+ default n
+ ---help---
+ Includes support for ITD CPS Gen.2 serial RapidIO switches.
+
config RAPIDIO_TSI500
bool "Tsi500 Parallel RapidIO switch support"
depends on RAPIDIO
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index fe4adc3e8d5f..48d67a6b98c8 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
+obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
new file mode 100644
index 000000000000..0bb871cb5c40
--- /dev/null
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -0,0 +1,447 @@
+/*
+ * IDT CPS Gen.2 Serial RapidIO switch family support
+ *
+ * Copyright 2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+#define LOCAL_RTE_CONF_DESTID_SEL 0x010070
+#define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f
+
+#define IDT_LT_ERR_REPORT_EN 0x03100c
+
+#define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40)
+#define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04
+
+#define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40)
+#define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c
+#define IDT_PORT_INIT_TX_ACQUIRED 0x00000020
+
+#define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100)
+#define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10
+
+#define IDT_DEV_CTRL_1 0xf2000c
+#define IDT_DEV_CTRL_1_GENPW 0x02000000
+#define IDT_DEV_CTRL_1_PRSTBEH 0x00000001
+
+#define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008
+#define IDT_CFGBLK_ERR_REPORT 0xf20014
+#define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002
+
+#define IDT_AUX_PORT_ERR_CAP_EN 0x020000
+#define IDT_AUX_ERR_REPORT_EN 0xf20018
+#define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002
+#define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001
+
+#define IDT_ISLTL_ADDRESS_CAP 0x021014
+
+#define IDT_RIO_DOMAIN 0xf20020
+#define IDT_RIO_DOMAIN_MASK 0x000000ff
+
+#define IDT_PW_INFO_CSR 0xf20024
+
+#define IDT_SOFT_RESET 0xf20040
+#define IDT_SOFT_RESET_REQ 0x00030097
+
+#define IDT_I2C_MCTRL 0xf20050
+#define IDT_I2C_MCTRL_GENPW 0x04000000
+
+#define IDT_JTAG_CTRL 0xf2005c
+#define IDT_JTAG_CTRL_GENPW 0x00000002
+
+#define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100)
+#define IDT_LANE_CTRL_BC 0xffff00
+#define IDT_LANE_CTRL_GENPW 0x00200000
+#define IDT_LANE_DFE_1_BC 0xffff18
+#define IDT_LANE_DFE_2_BC 0xffff1c
+
+#define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100)
+#define IDT_PORT_OPS_GENPW 0x08000000
+#define IDT_PORT_OPS_PL_ELOG 0x00000040
+#define IDT_PORT_OPS_LL_ELOG 0x00000020
+#define IDT_PORT_OPS_LT_ELOG 0x00000010
+#define IDT_PORT_OPS_BC 0xf4ff04
+
+#define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100)
+
+#define IDT_ERR_CAP 0xfd0000
+#define IDT_ERR_CAP_LOG_OVERWR 0x00000004
+
+#define IDT_ERR_RD 0xfd0004
+
+#define IDT_DEFAULT_ROUTE 0xde
+#define IDT_NO_ROUTE 0xdf
+
+static int
+idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ /*
+ * Select routing table to update
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = 0;
+ else
+ table++;
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ LOCAL_RTE_CONF_DESTID_SEL, table);
+
+ /*
+ * Program destination port for the specified destID
+ */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+ (u32)route_destid);
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR,
+ (u32)route_port);
+ udelay(10);
+
+ return 0;
+}
+
+static int
+idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ u32 result;
+
+ /*
+ * Select routing table to read
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = 0;
+ else
+ table++;
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ LOCAL_RTE_CONF_DESTID_SEL, table);
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+ route_destid);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+ if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result)
+ *route_port = RIO_INVALID_ROUTE;
+ else
+ *route_port = (u8)result;
+
+ return 0;
+}
+
+static int
+idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 i;
+
+ /*
+ * Select routing table to read
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = 0;
+ else
+ table++;
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ LOCAL_RTE_CONF_DESTID_SEL, table);
+
+ for (i = RIO_STD_RTE_CONF_EXTCFGEN;
+ i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR,
+ (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) |
+ (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE);
+ i += 4;
+ }
+
+ return 0;
+}
+
+
+static int
+idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 sw_domain)
+{
+ /*
+ * Switch domain configuration operates only at global level
+ */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_RIO_DOMAIN, (u32)sw_domain);
+ return 0;
+}
+
+static int
+idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 *sw_domain)
+{
+ u32 regval;
+
+ /*
+ * Switch domain configuration operates only at global level
+ */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_RIO_DOMAIN, &regval);
+
+ *sw_domain = (u8)(regval & 0xff);
+
+ return 0;
+}
+
+static int
+idtg2_em_init(struct rio_dev *rdev)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval;
+ int i, tmp;
+
+ /*
+ * This routine performs device-specific initialization only.
+ * All standard EM configuration should be performed at upper level.
+ */
+
+ pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+
+ /* Set Port-Write info CSR: PRIO=3 and CRF=1 */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PW_INFO_CSR, 0x0000e000);
+
+ /*
+ * Configure LT LAYER error reporting.
+ */
+
+ /* Enable standard (RIO.p8) error reporting */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_LT_ERR_REPORT_EN,
+ REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
+ REM_LTL_ERR_UNSUPTR);
+
+ /* Use Port-Writes for LT layer error reporting.
+ * Enable per-port reset
+ */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_DEV_CTRL_1, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_DEV_CTRL_1,
+ regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
+
+ /*
+ * Configure PORT error reporting.
+ */
+
+ /* Report all RIO.p8 errors supported by device */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+
+ /* Configure reporting of implementation specific errors/events */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+
+ /* Use Port-Writes for port error reporting and enable error logging */
+ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ for (i = 0; i < tmp; i++) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_PORT_OPS(i), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
+ IDT_PORT_OPS_PL_ELOG |
+ IDT_PORT_OPS_LL_ELOG |
+ IDT_PORT_OPS_LT_ELOG);
+ }
+ /* Overwrite error log if full */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+
+ /*
+ * Configure LANE error reporting.
+ */
+
+ /* Disable line error reporting */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_LANE_ERR_REPORT_EN_BC, 0);
+
+ /* Use Port-Writes for lane error reporting (when enabled)
+ * (do per-lane update because lanes may have different configuration)
+ */
+ tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
+ for (i = 0; i < tmp; i++) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_LANE_CTRL(i), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+ }
+
+ /*
+ * Configure AUX error reporting.
+ */
+
+ /* Disable JTAG and I2C Error capture */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_AUX_PORT_ERR_CAP_EN, 0);
+
+ /* Disable JTAG and I2C Error reporting/logging */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_AUX_ERR_REPORT_EN, 0);
+
+ /* Disable Port-Write notification from JTAG */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_JTAG_CTRL, 0);
+
+ /* Disable Port-Write notification from I2C */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_I2C_MCTRL, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_I2C_MCTRL,
+ regval & ~IDT_I2C_MCTRL_GENPW);
+
+ /*
+ * Configure CFG_BLK error reporting.
+ */
+
+ /* Disable Configuration Block error capture */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+
+ /* Disable Port-Writes for Configuration Block error reporting */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_CFGBLK_ERR_REPORT, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_CFGBLK_ERR_REPORT,
+ regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+
+ /* set TVAL = ~50us */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+
+ return 0;
+}
+
+static int
+idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval, em_perrdet, em_ltlerrdet;
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
+ if (em_ltlerrdet) {
+ /* Service Logical/Transport Layer Error(s) */
+ if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
+ /* Implementation specific error reported */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_ISLTL_ADDRESS_CAP, &regval);
+
+ pr_debug("RIO: %s Implementation Specific LTL errors" \
+ " 0x%x @(0x%x)\n",
+ rio_name(rdev), em_ltlerrdet, regval);
+
+ /* Clear implementation specific address capture CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_ISLTL_ADDRESS_CAP, 0);
+
+ }
+ }
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
+ if (em_perrdet) {
+ /* Service Port-Level Error(s) */
+ if (em_perrdet & REM_PED_IMPL_SPEC) {
+ /* Implementation Specific port error reported */
+
+ /* Get IS errors reported */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_PORT_ISERR_DET(portnum), &regval);
+
+ pr_debug("RIO: %s Implementation Specific Port" \
+ " errors 0x%x\n", rio_name(rdev), regval);
+
+ /* Clear all implementation specific events */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDT_PORT_ISERR_DET(portnum), 0);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t
+idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ ssize_t len = 0;
+ u32 regval;
+
+ while (!rio_mport_read_config_32(mport, destid, hopcount,
+ IDT_ERR_RD, &regval)) {
+ if (!regval) /* 0 = end of log */
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%08x\n", regval);
+ if (len >= (PAGE_SIZE - 10))
+ break;
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL);
+
+static int idtg2_sysfs(struct rio_dev *rdev, int create)
+{
+ struct device *dev = &rdev->dev;
+ int err = 0;
+
+ if (create == RIO_SW_SYSFS_CREATE) {
+ /* Initialize sysfs entries */
+ err = device_create_file(dev, &dev_attr_errlog);
+ if (err)
+ dev_err(dev, "Unable create sysfs errlog file\n");
+ } else
+ device_remove_file(dev, &dev_attr_errlog);
+
+ return err;
+}
+
+static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ rdev->rswitch->add_entry = idtg2_route_add_entry;
+ rdev->rswitch->get_entry = idtg2_route_get_entry;
+ rdev->rswitch->clr_table = idtg2_route_clr_table;
+ rdev->rswitch->set_domain = idtg2_set_domain;
+ rdev->rswitch->get_domain = idtg2_get_domain;
+ rdev->rswitch->em_init = idtg2_em_init;
+ rdev->rswitch->em_handle = idtg2_em_handler;
+ rdev->rswitch->sw_sysfs = idtg2_sysfs;
+
+ return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index 2c790c144f89..fc9f6374f759 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -117,6 +117,10 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
rdev->rswitch->add_entry = idtcps_route_add_entry;
rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -126,6 +130,12 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
rdev->rswitch->em_init = NULL;
rdev->rswitch->em_handle = NULL;
+ if (do_enum) {
+ /* set TVAL = ~50us */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+ }
+
return 0;
}
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index f7fd7898606e..b9a389b9f812 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -29,7 +29,7 @@
#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
-#define TSI568_SP_MODE_BC 0x10004
+#define TSI568_SP_MODE(n) (0x11004 + 0x100*n)
#define TSI568_SP_MODE_PW_DIS 0x08000000
static int
@@ -117,14 +117,19 @@ tsi568_em_init(struct rio_dev *rdev)
u16 destid = rdev->rswitch->destid;
u8 hopcount = rdev->rswitch->hopcount;
u32 regval;
+ int portnum;
pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
/* Make sure that Port-Writes are disabled (for all ports) */
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI568_SP_MODE_BC, &regval);
- rio_mport_write_config_32(mport, destid, hopcount,
- TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
+ for (portnum = 0;
+ portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI568_SP_MODE(portnum), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI568_SP_MODE(portnum),
+ regval | TSI568_SP_MODE_PW_DIS);
+ }
return 0;
}
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index d34df722d95f..2003fb63c404 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -166,7 +166,8 @@ tsi57x_em_init(struct rio_dev *rdev)
pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
- for (portnum = 0; portnum < 16; portnum++) {
+ for (portnum = 0;
+ portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
/* Make sure that Port-Writes are enabled (for all ports) */
rio_mport_read_config_32(mport, destid, hopcount,
TSI578_SP_MODE(portnum), &regval);
@@ -205,6 +206,10 @@ tsi57x_em_init(struct rio_dev *rdev)
portnum++;
}
+ /* set TVAL = ~50us */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
+
return 0;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 48ca7132cc05..6a77437d4f5a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -171,7 +171,8 @@ config RTC_DRV_DS3232
depends on RTC_CLASS && I2C
help
If you say yes here you get support for Dallas Semiconductor
- DS3232 real-time clock chips.
+ DS3232 real-time clock chips. If an interrupt is associated
+ with the device, the alarm functionality is supported.
This driver can also be built as a module. If so, the module
will be called rtc-ds3232.
@@ -765,15 +766,15 @@ config RTC_DRV_AT32AP700X
AT32AP700x family processors.
config RTC_DRV_AT91RM9200
- tristate "AT91RM9200 or AT91SAM9RL"
- depends on ARCH_AT91RM9200 || ARCH_AT91SAM9RL
+ tristate "AT91RM9200 or some AT91SAM9 RTC"
+ depends on ARCH_AT91RM9200 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
help
Driver for the internal RTC (Realtime Clock) module found on
- Atmel AT91RM9200's and AT91SAM9RL chips. On SAM9RL chips
+ Atmel AT91RM9200's and some AT91SAM9 chips. On AT91SAM9 chips
this is powered by the backup power supply.
config RTC_DRV_AT91SAM9
- tristate "AT91SAM9x/AT91CAP9"
+ tristate "AT91SAM9x/AT91CAP9 RTT as RTC"
depends on ARCH_AT91 && !(ARCH_AT91RM9200 || ARCH_AT91X40)
help
RTC driver for the Atmel AT91SAM9x and AT91CAP9 internal RTT
@@ -781,8 +782,8 @@ config RTC_DRV_AT91SAM9
supply (such as a small coin cell battery), but do not need to
be used as RTCs.
- (On AT91SAM9rl chips you probably want to use the dedicated RTC
- module and leave the RTT available for other uses.)
+ (On AT91SAM9rl and AT91SAM9G45 chips you probably want to use the
+ dedicated RTC module and leave the RTT available for other uses.)
config RTC_DRV_AT91SAM9_RTT
int
@@ -952,4 +953,13 @@ config RTC_DRV_JZ4740
This driver can also be buillt as a module. If so, the module
will be called rtc-jz4740.
+config RTC_DRV_LPC32XX
+ depends on ARCH_LPC32XX
+ tristate "NXP LPC32XX RTC"
+ help
+ This enables support for the NXP RTC in the LPC32XX
+
+ This driver can also be buillt as a module. If so, the module
+ will be called rtc-lpc32xx.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0f207b3b5833..7a7cb3228a1d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
+obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 565562ba6ac9..e6539cbabb35 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -158,8 +158,10 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc_dev_prepare(rtc);
err = device_register(&rtc->dev);
- if (err)
+ if (err) {
+ put_device(&rtc->dev);
goto exit_kfree;
+ }
rtc_dev_add_device(rtc);
rtc_sysfs_add_device(rtc);
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index d4fb82d85e9b..b4b6087f2234 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -2,7 +2,7 @@
* Blackfin On-Chip Real Time Clock Driver
* Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -183,29 +183,33 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
struct bfin_rtc *rtc = dev_get_drvdata(dev);
unsigned long events = 0;
bool write_complete = false;
- u16 rtc_istat, rtc_ictl;
+ u16 rtc_istat, rtc_istat_clear, rtc_ictl, bits;
dev_dbg_stamp(dev);
rtc_istat = bfin_read_RTC_ISTAT();
rtc_ictl = bfin_read_RTC_ICTL();
+ rtc_istat_clear = 0;
- if (rtc_istat & RTC_ISTAT_WRITE_COMPLETE) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
+ bits = RTC_ISTAT_WRITE_COMPLETE;
+ if (rtc_istat & bits) {
+ rtc_istat_clear |= bits;
write_complete = true;
complete(&bfin_write_complete);
}
- if (rtc_ictl & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
- if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+ bits = (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+ if (rtc_ictl & bits) {
+ if (rtc_istat & bits) {
+ rtc_istat_clear |= bits;
events |= RTC_AF | RTC_IRQF;
}
}
- if (rtc_ictl & RTC_ISTAT_SEC) {
- if (rtc_istat & RTC_ISTAT_SEC) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
+ bits = RTC_ISTAT_SEC;
+ if (rtc_ictl & bits) {
+ if (rtc_istat & bits) {
+ rtc_istat_clear |= bits;
events |= RTC_UF | RTC_IRQF;
}
}
@@ -213,9 +217,10 @@ static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
if (events)
rtc_update_irq(rtc->rtc_dev, 1, events);
- if (write_complete || events)
+ if (write_complete || events) {
+ bfin_write_RTC_ISTAT(rtc_istat_clear);
return IRQ_HANDLED;
- else
+ } else
return IRQ_NONE;
}
@@ -422,9 +427,13 @@ static int __devexit bfin_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
- if (device_may_wakeup(&pdev->dev)) {
+ struct device *dev = &pdev->dev;
+
+ dev_dbg_stamp(dev);
+
+ if (device_may_wakeup(dev)) {
enable_irq_wake(IRQ_RTC);
- bfin_rtc_sync_pending(&pdev->dev);
+ bfin_rtc_sync_pending(dev);
} else
bfin_rtc_int_clear(0);
@@ -433,7 +442,11 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
static int bfin_rtc_resume(struct platform_device *pdev)
{
- if (device_may_wakeup(&pdev->dev))
+ struct device *dev = &pdev->dev;
+
+ dev_dbg_stamp(dev);
+
+ if (device_may_wakeup(dev))
disable_irq_wake(IRQ_RTC);
/*
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 9de8516e3531..57063552d3b7 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -2,6 +2,7 @@
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
* Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -175,6 +176,182 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
DS3232_REG_SECONDS, 7, buf);
}
+/*
+ * DS3232 has two alarm, we only use alarm1
+ * According to linux specification, only support one-shot alarm
+ * no periodic alarm mode
+ */
+static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ int control, stat;
+ int ret;
+ u8 buf[4];
+
+ mutex_lock(&ds3232->mutex);
+
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+ if (ret < 0)
+ goto out;
+ stat = ret;
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (ret < 0)
+ goto out;
+ control = ret;
+ ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+ if (ret < 0)
+ goto out;
+
+ alarm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+ alarm->time.tm_min = bcd2bin(buf[1] & 0x7F);
+ alarm->time.tm_hour = bcd2bin(buf[2] & 0x7F);
+ alarm->time.tm_mday = bcd2bin(buf[3] & 0x7F);
+
+ alarm->time.tm_mon = -1;
+ alarm->time.tm_year = -1;
+ alarm->time.tm_wday = -1;
+ alarm->time.tm_yday = -1;
+ alarm->time.tm_isdst = -1;
+
+ alarm->enabled = !!(control & DS3232_REG_CR_A1IE);
+ alarm->pending = !!(stat & DS3232_REG_SR_A1F);
+
+ ret = 0;
+out:
+ mutex_unlock(&ds3232->mutex);
+ return ret;
+}
+
+/*
+ * linux rtc-module does not support wday alarm
+ * and only 24h time mode supported indeed
+ */
+static int ds3232_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ int control, stat;
+ int ret;
+ u8 buf[4];
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ mutex_lock(&ds3232->mutex);
+
+ buf[0] = bin2bcd(alarm->time.tm_sec);
+ buf[1] = bin2bcd(alarm->time.tm_min);
+ buf[2] = bin2bcd(alarm->time.tm_hour);
+ buf[3] = bin2bcd(alarm->time.tm_mday);
+
+ /* clear alarm interrupt enable bit */
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (ret < 0)
+ goto out;
+ control = ret;
+ control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
+ ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+ if (ret < 0)
+ goto out;
+
+ /* clear any pending alarm flag */
+ ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
+ if (ret < 0)
+ goto out;
+ stat = ret;
+ stat &= ~(DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
+ ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+ if (ret < 0)
+ goto out;
+
+ ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+
+ if (alarm->enabled) {
+ control |= DS3232_REG_CR_A1IE;
+ ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+ }
+out:
+ mutex_unlock(&ds3232->mutex);
+ return ret;
+}
+
+static void ds3232_update_alarm(struct i2c_client *client)
+{
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ int control;
+ int ret;
+ u8 buf[4];
+
+ mutex_lock(&ds3232->mutex);
+
+ ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+ if (ret < 0)
+ goto unlock;
+
+ buf[0] = bcd2bin(buf[0]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[0];
+ buf[1] = bcd2bin(buf[1]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[1];
+ buf[2] = bcd2bin(buf[2]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[2];
+ buf[3] = bcd2bin(buf[3]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
+ 0x80 : buf[3];
+
+ ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+ if (ret < 0)
+ goto unlock;
+
+ control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
+ if (control < 0)
+ goto unlock;
+
+ if (ds3232->rtc->irq_data & (RTC_AF | RTC_UF))
+ /* enable alarm1 interrupt */
+ control |= DS3232_REG_CR_A1IE;
+ else
+ /* disable alarm1 interrupt */
+ control &= ~(DS3232_REG_CR_A1IE);
+ i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+
+unlock:
+ mutex_unlock(&ds3232->mutex);
+}
+
+static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ if (enabled)
+ ds3232->rtc->irq_data |= RTC_AF;
+ else
+ ds3232->rtc->irq_data &= ~RTC_AF;
+
+ ds3232_update_alarm(client);
+ return 0;
+}
+
+static int ds3232_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = i2c_get_clientdata(client);
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ if (enabled)
+ ds3232->rtc->irq_data |= RTC_UF;
+ else
+ ds3232->rtc->irq_data &= ~RTC_UF;
+
+ ds3232_update_alarm(client);
+ return 0;
+}
+
static irqreturn_t ds3232_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
@@ -222,6 +399,10 @@ unlock:
static const struct rtc_class_ops ds3232_rtc_ops = {
.read_time = ds3232_read_time,
.set_time = ds3232_set_time,
+ .read_alarm = ds3232_read_alarm,
+ .set_alarm = ds3232_set_alarm,
+ .alarm_irq_enable = ds3232_alarm_irq_enable,
+ .update_irq_enable = ds3232_update_irq_enable,
};
static int __devinit ds3232_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 2619d57b91d7..2e16f72c9056 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
* JZ4740 SoC RTC driver
*
* This program is free software; you can redistribute it and/or modify it
@@ -161,7 +162,8 @@ static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs);
if (!ret)
- ret = jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AE, alrm->enabled);
+ ret = jz4740_rtc_ctrl_set_bits(rtc,
+ JZ_RTC_CTRL_AE | JZ_RTC_CTRL_AF_IRQ, alrm->enabled);
return ret;
}
@@ -258,6 +260,8 @@ static int __devinit jz4740_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+ device_init_wakeup(&pdev->dev, 1);
+
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
@@ -318,12 +322,43 @@ static int __devexit jz4740_rtc_remove(struct platform_device *pdev)
return 0;
}
+
+#ifdef CONFIG_PM
+static int jz4740_rtc_suspend(struct device *dev)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(rtc->irq);
+ return 0;
+}
+
+static int jz4740_rtc_resume(struct device *dev)
+{
+ struct jz4740_rtc *rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(rtc->irq);
+ return 0;
+}
+
+static const struct dev_pm_ops jz4740_pm_ops = {
+ .suspend = jz4740_rtc_suspend,
+ .resume = jz4740_rtc_resume,
+};
+#define JZ4740_RTC_PM_OPS (&jz4740_pm_ops)
+
+#else
+#define JZ4740_RTC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
struct platform_driver jz4740_rtc_driver = {
- .probe = jz4740_rtc_probe,
- .remove = __devexit_p(jz4740_rtc_remove),
- .driver = {
- .name = "jz4740-rtc",
+ .probe = jz4740_rtc_probe,
+ .remove = __devexit_p(jz4740_rtc_remove),
+ .driver = {
+ .name = "jz4740-rtc",
.owner = THIS_MODULE,
+ .pm = JZ4740_RTC_PM_OPS,
},
};
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
new file mode 100644
index 000000000000..ec8701ce99f9
--- /dev/null
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2010 NXP Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/*
+ * Clock and Power control register offsets
+ */
+#define LPC32XX_RTC_UCOUNT 0x00
+#define LPC32XX_RTC_DCOUNT 0x04
+#define LPC32XX_RTC_MATCH0 0x08
+#define LPC32XX_RTC_MATCH1 0x0C
+#define LPC32XX_RTC_CTRL 0x10
+#define LPC32XX_RTC_INTSTAT 0x14
+#define LPC32XX_RTC_KEY 0x18
+#define LPC32XX_RTC_SRAM 0x80
+
+#define LPC32XX_RTC_CTRL_MATCH0 (1 << 0)
+#define LPC32XX_RTC_CTRL_MATCH1 (1 << 1)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH0 (1 << 2)
+#define LPC32XX_RTC_CTRL_ONSW_MATCH1 (1 << 3)
+#define LPC32XX_RTC_CTRL_SW_RESET (1 << 4)
+#define LPC32XX_RTC_CTRL_CNTR_DIS (1 << 6)
+#define LPC32XX_RTC_CTRL_ONSW_FORCE_HI (1 << 7)
+
+#define LPC32XX_RTC_INTSTAT_MATCH0 (1 << 0)
+#define LPC32XX_RTC_INTSTAT_MATCH1 (1 << 1)
+#define LPC32XX_RTC_INTSTAT_ONSW (1 << 2)
+
+#define LPC32XX_RTC_KEY_ONSW_LOADVAL 0xB5C13F27
+
+#define RTC_NAME "rtc-lpc32xx"
+
+#define rtc_readl(dev, reg) \
+ __raw_readl((dev)->rtc_base + (reg))
+#define rtc_writel(dev, reg, val) \
+ __raw_writel((val), (dev)->rtc_base + (reg))
+
+struct lpc32xx_rtc {
+ void __iomem *rtc_base;
+ int irq;
+ unsigned char alarm_enabled;
+ struct rtc_device *rtc;
+ spinlock_t lock;
+};
+
+static int lpc32xx_rtc_read_time(struct device *dev, struct rtc_time *time)
+{
+ unsigned long elapsed_sec;
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+ elapsed_sec = rtc_readl(rtc, LPC32XX_RTC_UCOUNT);
+ rtc_time_to_tm(elapsed_sec, time);
+
+ return rtc_valid_tm(time);
+}
+
+static int lpc32xx_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+ u32 tmp;
+
+ spin_lock_irq(&rtc->lock);
+
+ /* RTC must be disabled during count update */
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp | LPC32XX_RTC_CTRL_CNTR_DIS);
+ rtc_writel(rtc, LPC32XX_RTC_UCOUNT, secs);
+ rtc_writel(rtc, LPC32XX_RTC_DCOUNT, 0xFFFFFFFF - secs);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp &= ~LPC32XX_RTC_CTRL_CNTR_DIS);
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int lpc32xx_rtc_read_alarm(struct device *dev,
+ struct rtc_wkalrm *wkalrm)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(rtc_readl(rtc, LPC32XX_RTC_MATCH0), &wkalrm->time);
+ wkalrm->enabled = rtc->alarm_enabled;
+ wkalrm->pending = !!(rtc_readl(rtc, LPC32XX_RTC_INTSTAT) &
+ LPC32XX_RTC_INTSTAT_MATCH0);
+
+ return rtc_valid_tm(&wkalrm->time);
+}
+
+static int lpc32xx_rtc_set_alarm(struct device *dev,
+ struct rtc_wkalrm *wkalrm)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long alarmsecs;
+ u32 tmp;
+ int ret;
+
+ ret = rtc_tm_to_time(&wkalrm->time, &alarmsecs);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to convert time: %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irq(&rtc->lock);
+
+ /* Disable alarm during update */
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+
+ rtc_writel(rtc, LPC32XX_RTC_MATCH0, alarmsecs);
+
+ rtc->alarm_enabled = wkalrm->enabled;
+ if (wkalrm->enabled) {
+ rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+ LPC32XX_RTC_INTSTAT_MATCH0);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp |
+ LPC32XX_RTC_CTRL_MATCH0);
+ }
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int lpc32xx_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct lpc32xx_rtc *rtc = dev_get_drvdata(dev);
+ u32 tmp;
+
+ spin_lock_irq(&rtc->lock);
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+
+ if (enabled) {
+ rtc->alarm_enabled = 1;
+ tmp |= LPC32XX_RTC_CTRL_MATCH0;
+ } else {
+ rtc->alarm_enabled = 0;
+ tmp &= ~LPC32XX_RTC_CTRL_MATCH0;
+ }
+
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static irqreturn_t lpc32xx_rtc_alarm_interrupt(int irq, void *dev)
+{
+ struct lpc32xx_rtc *rtc = dev;
+
+ spin_lock(&rtc->lock);
+
+ /* Disable alarm interrupt */
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+ ~LPC32XX_RTC_CTRL_MATCH0);
+ rtc->alarm_enabled = 0;
+
+ /*
+ * Write a large value to the match value so the RTC won't
+ * keep firing the match status
+ */
+ rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+ rtc_writel(rtc, LPC32XX_RTC_INTSTAT, LPC32XX_RTC_INTSTAT_MATCH0);
+
+ spin_unlock(&rtc->lock);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops lpc32xx_rtc_ops = {
+ .read_time = lpc32xx_rtc_read_time,
+ .set_mmss = lpc32xx_rtc_set_mmss,
+ .read_alarm = lpc32xx_rtc_read_alarm,
+ .set_alarm = lpc32xx_rtc_set_alarm,
+ .alarm_irq_enable = lpc32xx_rtc_alarm_irq_enable,
+};
+
+static int __devinit lpc32xx_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct lpc32xx_rtc *rtc;
+ resource_size_t size;
+ int rtcirq;
+ u32 tmp;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Can't get memory resource\n");
+ return -ENOENT;
+ }
+
+ rtcirq = platform_get_irq(pdev, 0);
+ if (rtcirq < 0 || rtcirq >= NR_IRQS) {
+ dev_warn(&pdev->dev, "Can't get interrupt resource\n");
+ rtcirq = -1;
+ }
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (unlikely(!rtc)) {
+ dev_err(&pdev->dev, "Can't allocate memory\n");
+ return -ENOMEM;
+ }
+ rtc->irq = rtcirq;
+
+ size = resource_size(res);
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, size,
+ pdev->name)) {
+ dev_err(&pdev->dev, "RTC registers are not free\n");
+ return -EBUSY;
+ }
+
+ rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size);
+ if (!rtc->rtc_base) {
+ dev_err(&pdev->dev, "Can't map memory\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&rtc->lock);
+
+ /*
+ * The RTC is on a seperate power domain and can keep it's state
+ * across a chip power cycle. If the RTC has never been previously
+ * setup, then set it up now for the first time.
+ */
+ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL);
+ if (rtc_readl(rtc, LPC32XX_RTC_KEY) != LPC32XX_RTC_KEY_ONSW_LOADVAL) {
+ tmp &= ~(LPC32XX_RTC_CTRL_SW_RESET |
+ LPC32XX_RTC_CTRL_CNTR_DIS |
+ LPC32XX_RTC_CTRL_MATCH0 |
+ LPC32XX_RTC_CTRL_MATCH1 |
+ LPC32XX_RTC_CTRL_ONSW_MATCH0 |
+ LPC32XX_RTC_CTRL_ONSW_MATCH1 |
+ LPC32XX_RTC_CTRL_ONSW_FORCE_HI);
+ rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp);
+
+ /* Clear latched interrupt states */
+ rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF);
+ rtc_writel(rtc, LPC32XX_RTC_INTSTAT,
+ LPC32XX_RTC_INTSTAT_MATCH0 |
+ LPC32XX_RTC_INTSTAT_MATCH1 |
+ LPC32XX_RTC_INTSTAT_ONSW);
+
+ /* Write key value to RTC so it won't reload on reset */
+ rtc_writel(rtc, LPC32XX_RTC_KEY,
+ LPC32XX_RTC_KEY_ONSW_LOADVAL);
+ } else {
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ tmp & ~LPC32XX_RTC_CTRL_MATCH0);
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ rtc->rtc = rtc_device_register(RTC_NAME, &pdev->dev, &lpc32xx_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ dev_err(&pdev->dev, "Can't get RTC\n");
+ platform_set_drvdata(pdev, NULL);
+ return PTR_ERR(rtc->rtc);
+ }
+
+ /*
+ * IRQ is enabled after device registration in case alarm IRQ
+ * is pending upon suspend exit.
+ */
+ if (rtc->irq >= 0) {
+ if (devm_request_irq(&pdev->dev, rtc->irq,
+ lpc32xx_rtc_alarm_interrupt,
+ IRQF_DISABLED, pdev->name, rtc) < 0) {
+ dev_warn(&pdev->dev, "Can't request interrupt.\n");
+ rtc->irq = -1;
+ } else {
+ device_init_wakeup(&pdev->dev, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int __devexit lpc32xx_rtc_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->irq >= 0)
+ device_init_wakeup(&pdev->dev, 0);
+
+ platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(rtc->rtc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_rtc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->irq >= 0) {
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(rtc->irq);
+ else
+ disable_irq_wake(rtc->irq);
+ }
+
+ return 0;
+}
+
+static int lpc32xx_rtc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->irq >= 0 && device_may_wakeup(&pdev->dev))
+ disable_irq_wake(rtc->irq);
+
+ return 0;
+}
+
+/* Unconditionally disable the alarm */
+static int lpc32xx_rtc_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ spin_lock_irq(&rtc->lock);
+
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ rtc_readl(rtc, LPC32XX_RTC_CTRL) &
+ ~LPC32XX_RTC_CTRL_MATCH0);
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int lpc32xx_rtc_thaw(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (rtc->alarm_enabled) {
+ spin_lock_irq(&rtc->lock);
+
+ rtc_writel(rtc, LPC32XX_RTC_CTRL,
+ rtc_readl(rtc, LPC32XX_RTC_CTRL) |
+ LPC32XX_RTC_CTRL_MATCH0);
+
+ spin_unlock_irq(&rtc->lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
+ .suspend = lpc32xx_rtc_suspend,
+ .resume = lpc32xx_rtc_resume,
+ .freeze = lpc32xx_rtc_freeze,
+ .thaw = lpc32xx_rtc_thaw,
+ .restore = lpc32xx_rtc_resume
+};
+
+#define LPC32XX_RTC_PM_OPS (&lpc32xx_rtc_pm_ops)
+#else
+#define LPC32XX_RTC_PM_OPS NULL
+#endif
+
+static struct platform_driver lpc32xx_rtc_driver = {
+ .probe = lpc32xx_rtc_probe,
+ .remove = __devexit_p(lpc32xx_rtc_remove),
+ .driver = {
+ .name = RTC_NAME,
+ .owner = THIS_MODULE,
+ .pm = LPC32XX_RTC_PM_OPS
+ },
+};
+
+static int __init lpc32xx_rtc_init(void)
+{
+ return platform_driver_register(&lpc32xx_rtc_driver);
+}
+module_init(lpc32xx_rtc_init);
+
+static void __exit lpc32xx_rtc_exit(void)
+{
+ platform_driver_unregister(&lpc32xx_rtc_driver);
+}
+module_exit(lpc32xx_rtc_exit);
+
+MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
+MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rtc-lpc32xx");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 64d9727b7229..73377b0d65da 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -34,7 +34,8 @@
* Board-specific wiring options include using split power mode with
* RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
* and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
- * low power modes). See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ * low power modes) for OMAP1 boards (OMAP-L138 has this built into
+ * the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
*/
#define OMAP_RTC_BASE 0xfffb4800
@@ -401,16 +402,17 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
*
- * - Boards wired so that RTC_WAKE_INT does something, and muxed
- * right (W13_1610_RTC_WAKE_INT is the default after chip reset),
- * should initialize the device wakeup flag appropriately.
+ * - Device wake-up capability setting should come through chip
+ * init logic. OMAP1 boards should initialize the "wakeup capable"
+ * flag in the platform device if the board is wired right for
+ * being woken up by RTC alarm. For OMAP-L138, this capability
+ * is built into the SoC by the "Deep Sleep" capability.
*
* - Boards wired so RTC_ON_nOFF is used as the reset signal,
* rather than nPWRON_RESET, should forcibly enable split
* power mode. (Some chip errata report that RTC_CTRL_SPLIT
* is write-only, and always reads as zero...)
*/
- device_init_wakeup(&pdev->dev, 0);
if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
pr_info("%s: split power mode\n", pdev->name);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index f57a87f4ae96..cf953ecbfca9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -100,7 +100,7 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
spin_lock_irq(&s3c_rtc_pie_lock);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
- tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
tmp &= ~S3C64XX_RTCCON_TICEN;
if (enabled)
@@ -171,8 +171,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
goto retry_get_time;
}
- pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
- rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
+ 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
@@ -185,7 +185,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year += 100;
rtc_tm->tm_mon -= 1;
- return 0;
+ return rtc_valid_tm(rtc_tm);
}
static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
@@ -193,8 +193,8 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
- tm->tm_year, tm->tm_mon, tm->tm_mday,
+ pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
/* we get around y2k by simply not supporting it */
@@ -231,9 +231,9 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
- pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+ pr_debug("read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alm_en,
- alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+ 1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
@@ -242,34 +242,34 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (alm_en & S3C2410_RTCALM_SECEN)
alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
else
- alm_tm->tm_sec = 0xff;
+ alm_tm->tm_sec = -1;
if (alm_en & S3C2410_RTCALM_MINEN)
alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
else
- alm_tm->tm_min = 0xff;
+ alm_tm->tm_min = -1;
if (alm_en & S3C2410_RTCALM_HOUREN)
alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
else
- alm_tm->tm_hour = 0xff;
+ alm_tm->tm_hour = -1;
if (alm_en & S3C2410_RTCALM_DAYEN)
alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
else
- alm_tm->tm_mday = 0xff;
+ alm_tm->tm_mday = -1;
if (alm_en & S3C2410_RTCALM_MONEN) {
alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
alm_tm->tm_mon -= 1;
} else {
- alm_tm->tm_mon = 0xff;
+ alm_tm->tm_mon = -1;
}
if (alm_en & S3C2410_RTCALM_YEAREN)
alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
else
- alm_tm->tm_year = 0xffff;
+ alm_tm->tm_year = -1;
return 0;
}
@@ -280,10 +280,10 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
void __iomem *base = s3c_rtc_base;
unsigned int alrm_en;
- pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+ pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
- tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
- tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
@@ -318,7 +318,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
unsigned int ticnt;
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
- ticnt = readb(s3c_rtc_base + S3C2410_RTCCON);
+ ticnt = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt &= S3C64XX_RTCCON_TICEN;
} else {
ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
@@ -379,7 +379,8 @@ static const struct rtc_class_ops s3c_rtcops = {
.set_alarm = s3c_rtc_setalarm,
.irq_set_freq = s3c_rtc_setfreq,
.irq_set_state = s3c_rtc_setpie,
- .proc = s3c_rtc_proc,
+ .proc = s3c_rtc_proc,
+ .alarm_irq_enable = s3c_rtc_setaie,
};
static void s3c_rtc_enable(struct platform_device *pdev, int en)
@@ -391,11 +392,11 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
return;
if (!en) {
- tmp = readb(base + S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
tmp &= ~S3C64XX_RTCCON_TICEN;
tmp &= ~S3C2410_RTCCON_RTCEN;
- writeb(tmp, base + S3C2410_RTCCON);
+ writew(tmp, base + S3C2410_RTCCON);
if (s3c_rtc_cpu_type == TYPE_S3C2410) {
tmp = readb(base + S3C2410_TICNT);
@@ -405,25 +406,28 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
} else {
/* re-enable the device, and check it is ok */
- if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
+ if ((readw(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0) {
dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
- tmp = readb(base + S3C2410_RTCCON);
- writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
+ writew(tmp | S3C2410_RTCCON_RTCEN,
+ base + S3C2410_RTCCON);
}
- if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
+ if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)) {
dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
- tmp = readb(base + S3C2410_RTCCON);
- writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
+ writew(tmp & ~S3C2410_RTCCON_CNTSEL,
+ base + S3C2410_RTCCON);
}
- if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
+ if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)) {
dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
- tmp = readb(base + S3C2410_RTCCON);
- writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
+ tmp = readw(base + S3C2410_RTCCON);
+ writew(tmp & ~S3C2410_RTCCON_CLKRST,
+ base + S3C2410_RTCCON);
}
}
}
@@ -452,8 +456,8 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
static int __devinit s3c_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
+ struct rtc_time rtc_tm;
struct resource *res;
- unsigned int tmp, i;
int ret;
pr_debug("%s: probe=%p\n", __func__, pdev);
@@ -514,8 +518,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
- pr_debug("s3c2410_rtc: RTCCON=%02x\n",
- readb(s3c_rtc_base + S3C2410_RTCCON));
+ pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+ readw(s3c_rtc_base + S3C2410_RTCCON));
device_init_wakeup(&pdev->dev, 1);
@@ -534,11 +538,19 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
/* Check RTC Time */
- for (i = S3C2410_RTCSEC; i <= S3C2410_RTCYEAR; i += 0x4) {
- tmp = readb(s3c_rtc_base + i);
+ s3c_rtc_gettime(NULL, &rtc_tm);
+
+ if (rtc_valid_tm(&rtc_tm)) {
+ rtc_tm.tm_year = 100;
+ rtc_tm.tm_mon = 0;
+ rtc_tm.tm_mday = 1;
+ rtc_tm.tm_hour = 0;
+ rtc_tm.tm_min = 0;
+ rtc_tm.tm_sec = 0;
+
+ s3c_rtc_settime(NULL, &rtc_tm);
- if ((tmp & 0xf) > 0x9 || ((tmp >> 4) & 0xf) > 0x9)
- writeb(0, s3c_rtc_base + i);
+ dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
}
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
@@ -578,7 +590,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
/* save TICNT for anyone using periodic interrupts */
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
- ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON);
+ ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
}
s3c_rtc_enable(pdev, 0);
@@ -596,8 +608,8 @@ static int s3c_rtc_resume(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
- tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
- writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+ writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
}
if (device_may_wakeup(&pdev->dev))
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 8e03e7600239..2ea91e049255 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -51,6 +51,10 @@ source "drivers/staging/cx25821/Kconfig"
source "drivers/staging/tm6000/Kconfig"
+source "drivers/staging/cpia/Kconfig"
+
+source "drivers/staging/stradis/Kconfig"
+
source "drivers/staging/usbip/Kconfig"
source "drivers/staging/winbond/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 0e7d7559d379..c359fbb0e04a 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_TM6000) += tm6000/
+obj-$(CONFIG_VIDEO_CPIA) += cpia/
+obj-$(CONFIG_VIDEO_STRADIS) += stradis/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_USB_IP_COMMON) += usbip/
obj-$(CONFIG_W35UND) += winbond/
diff --git a/drivers/staging/cpia/Kconfig b/drivers/staging/cpia/Kconfig
new file mode 100644
index 000000000000..205d247ad373
--- /dev/null
+++ b/drivers/staging/cpia/Kconfig
@@ -0,0 +1,39 @@
+config VIDEO_CPIA
+ tristate "CPiA Video For Linux (DEPRECATED)"
+ depends on VIDEO_V4L1
+ default n
+ ---help---
+ This driver is DEPRECATED please use the gspca cpia1 module
+ instead. Note that you need atleast version 0.6.4 of libv4l for
+ the cpia1 gspca module.
+
+ This is the video4linux driver for cameras based on Vision's CPiA
+ (Colour Processor Interface ASIC), such as the Creative Labs Video
+ Blaster Webcam II. If you have one of these cameras, say Y here
+ and select parallel port and/or USB lowlevel support below,
+ otherwise say N. This will not work with the Creative Webcam III.
+
+ Please read <file:Documentation/video4linux/README.cpia> for more
+ information.
+
+ This driver is also available as a module (cpia).
+
+config VIDEO_CPIA_PP
+ tristate "CPiA Parallel Port Lowlevel Support"
+ depends on PARPORT_1284 && VIDEO_CPIA && PARPORT
+ help
+ This is the lowlevel parallel port support for cameras based on
+ Vision's CPiA (Colour Processor Interface ASIC), such as the
+ Creative Webcam II. If you have the parallel port version of one
+ of these cameras, say Y here, otherwise say N. It is also available
+ as a module (cpia_pp).
+
+config VIDEO_CPIA_USB
+ tristate "CPiA USB Lowlevel Support"
+ depends on VIDEO_CPIA && USB
+ help
+ This is the lowlevel USB support for cameras based on Vision's CPiA
+ (Colour Processor Interface ASIC), such as the Creative Webcam II.
+ If you have the USB version of one of these cameras, say Y here,
+ otherwise say N. This will not work with the Creative Webcam III.
+ It is also available as a module (cpia_usb).
diff --git a/drivers/staging/cpia/Makefile b/drivers/staging/cpia/Makefile
new file mode 100644
index 000000000000..89e52f10d739
--- /dev/null
+++ b/drivers/staging/cpia/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_VIDEO_CPIA) += cpia.o
+obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
+obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o
+
+EXTRA_CFLAGS += -Idrivers/media/video
diff --git a/drivers/staging/cpia/TODO b/drivers/staging/cpia/TODO
new file mode 100644
index 000000000000..ccb1c0775eec
--- /dev/null
+++ b/drivers/staging/cpia/TODO
@@ -0,0 +1,8 @@
+This is an obsolete driver for some cpia-based webcams that use the parallel port.
+We couldn't find anyone with this hardware in order to port it to use V4L2.
+
+Also, parallel-port webcams are obsolete nowadays.
+
+If nobody take care on it, the driver will be removed for 2.6.38.
+
+Please send patches to linux-media@vger.kernel.org
diff --git a/drivers/media/video/cpia.c b/drivers/staging/cpia/cpia.c
index 933ae4c8cb9a..933ae4c8cb9a 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/staging/cpia/cpia.c
diff --git a/drivers/media/video/cpia.h b/drivers/staging/cpia/cpia.h
index 8f0cfee4b8a1..8f0cfee4b8a1 100644
--- a/drivers/media/video/cpia.h
+++ b/drivers/staging/cpia/cpia.h
diff --git a/drivers/media/video/cpia_pp.c b/drivers/staging/cpia/cpia_pp.c
index f5604c16a092..f5604c16a092 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/staging/cpia/cpia_pp.c
diff --git a/drivers/media/video/cpia_usb.c b/drivers/staging/cpia/cpia_usb.c
index 58d193ff591c..58d193ff591c 100644
--- a/drivers/media/video/cpia_usb.c
+++ b/drivers/staging/cpia/cpia_usb.c
diff --git a/drivers/staging/cx25821/Kconfig b/drivers/staging/cx25821/Kconfig
index 813cb355ac01..1d73334d2a44 100644
--- a/drivers/staging/cx25821/Kconfig
+++ b/drivers/staging/cx25821/Kconfig
@@ -5,7 +5,7 @@ config VIDEO_CX25821
select I2C_ALGOBIT
select VIDEO_BTCX
select VIDEO_TVEEPROM
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEOBUF_DVB
select VIDEOBUF_DMA_SG
select VIDEO_CX25840
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c
index bbe36437ac16..2a01dc057b2c 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/staging/cx25821/cx25821-alsa.c
@@ -629,7 +629,7 @@ static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static struct pci_device_id cx25821_audio_pci_tbl[] __devinitdata = {
+static const struct pci_device_id cx25821_audio_pci_tbl[] __devinitdata = {
{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.c b/drivers/staging/cx25821/cx25821-audio-upstream.c
index cdff49f409f2..6f3200666c93 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.c
+++ b/drivers/staging/cx25821/cx25821-audio-upstream.c
@@ -40,8 +40,8 @@ MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
MODULE_LICENSE("GPL");
static int _intr_msk =
- FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF | FLD_AUD_SRC_SYNC |
- FLD_AUD_SRC_OPC_ERR;
+ FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF | FLD_AUD_SRC_SYNC |
+ FLD_AUD_SRC_OPC_ERR;
int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
struct sram_channel *ch,
@@ -506,7 +506,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
{
int i = 0;
u32 int_msk_tmp;
- struct sram_channel *channel = dev->channels[chan_num].sram_channels;
+ struct sram_channel *channel = dev->channels[chan_num].sram_channels;
dma_addr_t risc_phys_jump_addr;
__le32 *rp;
@@ -608,7 +608,7 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
if (!dev)
return -1;
- sram_ch = dev->channels[dev->_audio_upstream_channel_select].
+ sram_ch = dev->channels[dev->_audio_upstream_channel_select].
sram_channels;
msk_stat = cx_read(sram_ch->int_mstat);
@@ -733,7 +733,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
}
dev->_audio_upstream_channel_select = channel_select;
- sram_ch = dev->channels[channel_select].sram_channels;
+ sram_ch = dev->channels[channel_select].sram_channels;
/* Work queue */
INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler);
@@ -764,9 +764,8 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
str_length + 1);
/* Default if filename is empty string */
- if (strcmp(dev->input_audiofilename, "") == 0) {
+ if (strcmp(dev->input_audiofilename, "") == 0)
dev->_audiofilename = "/root/audioGOOD.wav";
- }
} else {
str_length = strlen(_defaultAudioName);
dev->_audiofilename = kmalloc(str_length + 1, GFP_KERNEL);
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.h b/drivers/staging/cx25821/cx25821-audio-upstream.h
index ca987addf815..668a4f11e80b 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.h
+++ b/drivers/staging/cx25821/cx25821-audio-upstream.h
@@ -46,11 +46,11 @@
#define USE_RISC_NOOP_AUDIO 1
#ifdef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE ( LINES_PER_AUDIO_BUFFER*RISC_READ_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
+#define AUDIO_RISC_DMA_BUF_SIZE (LINES_PER_AUDIO_BUFFER*RISC_READ_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + NUM_NO_OPS*DWORD_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
#endif
#ifndef USE_RISC_NOOP_AUDIO
-#define AUDIO_RISC_DMA_BUF_SIZE ( LINES_PER_AUDIO_BUFFER*RISC_READ_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
+#define AUDIO_RISC_DMA_BUF_SIZE (LINES_PER_AUDIO_BUFFER*RISC_READ_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + RISC_JUMP_INSTRUCTION_SIZE)
#endif
static int _line_size;
diff --git a/drivers/staging/cx25821/cx25821-audio.h b/drivers/staging/cx25821/cx25821-audio.h
index 434b2a312a80..a702a0db21d3 100644
--- a/drivers/staging/cx25821/cx25821-audio.h
+++ b/drivers/staging/cx25821/cx25821-audio.h
@@ -31,18 +31,18 @@
#define NUMBER_OF_PROGRAMS 8
/*
- Max size of the RISC program for a buffer. - worst case is 2 writes per line
- Space is also added for the 4 no-op instructions added on the end.
-*/
+ * Max size of the RISC program for a buffer. - worst case is 2 writes per line
+ * Space is also added for the 4 no-op instructions added on the end.
+ */
#ifndef USE_RISC_NOOP
#define MAX_BUFFER_PROGRAM_SIZE \
- (2*LINES_PER_BUFFER*RISC_WRITE_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE*4)
+ (2*LINES_PER_BUFFER*RISC_WRITE_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE*4)
#endif
/* MAE 12 July 2005 Try to use NOOP RISC instruction instead */
#ifdef USE_RISC_NOOP
#define MAX_BUFFER_PROGRAM_SIZE \
- (2*LINES_PER_BUFFER*RISC_WRITE_INSTRUCTION_SIZE + RISC_NOOP_INSTRUCTION_SIZE*4)
+ (2*LINES_PER_BUFFER*RISC_WRITE_INSTRUCTION_SIZE + RISC_NOOP_INSTRUCTION_SIZE*4)
#endif
/* Sizes of various instructions in bytes. Used when adding instructions. */
diff --git a/drivers/staging/cx25821/cx25821-core.c b/drivers/staging/cx25821/cx25821-core.c
index c487c19256b9..ca1eece3df0d 100644
--- a/drivers/staging/cx25821/cx25821-core.c
+++ b/drivers/staging/cx25821/cx25821-core.c
@@ -42,7 +42,7 @@ static unsigned int card[] = {[0 ... (CX25821_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
-static unsigned int cx25821_devcount = 0;
+static unsigned int cx25821_devcount;
static DEFINE_MUTEX(devlist);
LIST_HEAD(cx25821_devlist);
@@ -781,14 +781,14 @@ static void cx25821_shutdown(struct cx25821_dev *dev)
/* Disable Video A/B activity */
for (i = 0; i < VID_CHANNEL_NUM; i++) {
- cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
- cx_write(dev->channels[i].sram_channels->int_msk, 0);
+ cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
+ cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
- for (i = VID_UPSTREAM_SRAM_CHANNEL_I; i <= VID_UPSTREAM_SRAM_CHANNEL_J;
- i++) {
- cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
- cx_write(dev->channels[i].sram_channels->int_msk, 0);
+ for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
+ i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
+ cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
+ cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
/* Disable Audio activity */
@@ -806,9 +806,9 @@ void cx25821_set_pixel_format(struct cx25821_dev *dev, int channel_select,
u32 format)
{
if (channel_select <= 7 && channel_select >= 0) {
- cx_write(dev->channels[channel_select].
- sram_channels->pix_frmt, format);
- dev->channels[channel_select].pixel_formats = format;
+ cx_write(dev->channels[channel_select].
+ sram_channels->pix_frmt, format);
+ dev->channels[channel_select].pixel_formats = format;
}
}
@@ -829,7 +829,7 @@ static void cx25821_initialize(struct cx25821_dev *dev)
cx_write(PCI_INT_STAT, 0xffffffff);
for (i = 0; i < VID_CHANNEL_NUM; i++)
- cx_write(dev->channels[i].sram_channels->int_stat, 0xffffffff);
+ cx_write(dev->channels[i].sram_channels->int_stat, 0xffffffff);
cx_write(AUD_A_INT_STAT, 0xffffffff);
cx_write(AUD_B_INT_STAT, 0xffffffff);
@@ -843,22 +843,22 @@ static void cx25821_initialize(struct cx25821_dev *dev)
mdelay(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
- cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
- cx25821_sram_channel_setup(dev, dev->channels[i].sram_channels,
- 1440, 0);
- dev->channels[i].pixel_formats = PIXEL_FRMT_422;
- dev->channels[i].use_cif_resolution = FALSE;
+ cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
+ cx25821_sram_channel_setup(dev, dev->channels[i].sram_channels,
+ 1440, 0);
+ dev->channels[i].pixel_formats = PIXEL_FRMT_422;
+ dev->channels[i].use_cif_resolution = FALSE;
}
/* Probably only affect Downstream */
- for (i = VID_UPSTREAM_SRAM_CHANNEL_I; i <= VID_UPSTREAM_SRAM_CHANNEL_J;
- i++) {
- cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
+ for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
+ i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
+ cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
}
- cx25821_sram_channel_setup_audio(dev,
- dev->channels[SRAM_CH08].sram_channels,
- 128, 0);
+ cx25821_sram_channel_setup_audio(dev,
+ dev->channels[SRAM_CH08].sram_channels,
+ 128, 0);
cx25821_gpio_init(dev);
}
@@ -931,8 +931,8 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
/* Apply a sensible clock frequency for the PCIe bridge */
dev->clk_freq = 28000000;
- for (i = 0; i < MAX_VID_CHANNEL_NUM; i++)
- dev->channels[i].sram_channels = &cx25821_sram_channels[i];
+ for (i = 0; i < MAX_VID_CHANNEL_NUM; i++)
+ dev->channels[i].sram_channels = &cx25821_sram_channels[i];
if (dev->nr > 1)
CX25821_INFO("dev->nr > 1!");
@@ -1003,11 +1003,11 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
cx25821_card_setup(dev);
- if (medusa_video_init(dev) < 0)
- CX25821_ERR("%s() Failed to initialize medusa!\n"
- , __func__);
+ if (medusa_video_init(dev) < 0)
+ CX25821_ERR("%s() Failed to initialize medusa!\n"
+ , __func__);
- cx25821_video_register(dev);
+ cx25821_video_register(dev);
/* register IOCTL device */
dev->ioctl_dev =
@@ -1319,7 +1319,7 @@ void cx25821_free_buffer(struct videobuf_queue *q, struct cx25821_buffer *buf)
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
- videobuf_waiton(&buf->vb, 0, 0);
+ videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
@@ -1342,12 +1342,12 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
for (i = 0; i < VID_CHANNEL_NUM; i++) {
if (pci_status & mask[i]) {
- vid_status = cx_read(dev->channels[i].
- sram_channels->int_stat);
+ vid_status = cx_read(dev->channels[i].
+ sram_channels->int_stat);
if (vid_status)
handled +=
- cx25821_video_irq(dev, i, vid_status);
+ cx25821_video_irq(dev, i, vid_status);
cx_write(PCI_INT_STAT, mask[i]);
}
diff --git a/drivers/staging/cx25821/cx25821-i2c.c b/drivers/staging/cx25821/cx25821-i2c.c
index e43572e61ece..2b14bcca6897 100644
--- a/drivers/staging/cx25821/cx25821-i2c.c
+++ b/drivers/staging/cx25821/cx25821-i2c.c
@@ -283,7 +283,7 @@ static struct i2c_algorithm cx25821_i2c_algo_template = {
.master_xfer = i2c_xfer,
.functionality = cx25821_functionality,
#ifdef NEED_ALGO_CONTROL
- .algo_control = dummy_algo_control,
+ .algo_control = dummy_algo_control,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-medusa-reg.h b/drivers/staging/cx25821/cx25821-medusa-reg.h
index f7f33b3e7058..1c1c228352d1 100644
--- a/drivers/staging/cx25821/cx25821-medusa-reg.h
+++ b/drivers/staging/cx25821/cx25821-medusa-reg.h
@@ -443,13 +443,13 @@
/*****************************************************************************/
/* LUMA_CTRL register fields */
#define VDEC_A_BRITE_CTRL 0x1014
-#define VDEC_A_CNTRST_CTRL 0x1015
-#define VDEC_A_PEAK_SEL 0x1016
+#define VDEC_A_CNTRST_CTRL 0x1015
+#define VDEC_A_PEAK_SEL 0x1016
/*****************************************************************************/
/* CHROMA_CTRL register fields */
-#define VDEC_A_USAT_CTRL 0x1018
-#define VDEC_A_VSAT_CTRL 0x1019
-#define VDEC_A_HUE_CTRL 0x101A
+#define VDEC_A_USAT_CTRL 0x1018
+#define VDEC_A_VSAT_CTRL 0x1019
+#define VDEC_A_HUE_CTRL 0x101A
#endif
diff --git a/drivers/staging/cx25821/cx25821-medusa-video.c b/drivers/staging/cx25821/cx25821-medusa-video.c
index ef9f2b82a860..1e11e0ce2d0a 100644
--- a/drivers/staging/cx25821/cx25821-medusa-video.c
+++ b/drivers/staging/cx25821/cx25821-medusa-video.c
@@ -778,9 +778,9 @@ int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder)
int medusa_video_init(struct cx25821_dev *dev)
{
- u32 value = 0, tmp = 0;
- int ret_val = 0;
- int i = 0;
+ u32 value = 0, tmp = 0;
+ int ret_val = 0;
+ int i = 0;
mutex_lock(&dev->lock);
@@ -829,7 +829,7 @@ int medusa_video_init(struct cx25821_dev *dev)
/* select AFE clock to output mode */
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
value &= 0x83FFFFFF;
- ret_val =
+ ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL,
value | 0x10000000);
diff --git a/drivers/staging/cx25821/cx25821-reg.h b/drivers/staging/cx25821/cx25821-reg.h
index cfe0f32db377..a3fc25a4dc0b 100644
--- a/drivers/staging/cx25821/cx25821-reg.h
+++ b/drivers/staging/cx25821/cx25821-reg.h
@@ -163,8 +163,8 @@
#define FLD_VID_DST_RISC2 0x00000010
#define FLD_VID_SRC_RISC1 0x00000002
#define FLD_VID_DST_RISC1 0x00000001
-#define FLD_VID_SRC_ERRORS FLD_VID_SRC_OPC_ERR | FLD_VID_SRC_SYNC | FLD_VID_SRC_UF
-#define FLD_VID_DST_ERRORS FLD_VID_DST_OPC_ERR | FLD_VID_DST_SYNC | FLD_VID_DST_OF
+#define FLD_VID_SRC_ERRORS (FLD_VID_SRC_OPC_ERR | FLD_VID_SRC_SYNC | FLD_VID_SRC_UF)
+#define FLD_VID_DST_ERRORS (FLD_VID_DST_OPC_ERR | FLD_VID_DST_SYNC | FLD_VID_DST_OF)
/* ***************************************************************************** */
#define AUD_A_INT_MSK 0x0400C0 /* Audio Int interrupt mask */
diff --git a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c b/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
index d12dbb572e8b..405e2db72b0f 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
@@ -32,17 +32,17 @@
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
MODULE_LICENSE("GPL");
static int _intr_msk =
- FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR;
+ FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR;
static __le32 *cx25821_update_riscprogram_ch2(struct cx25821_dev *dev,
- __le32 * rp, unsigned int offset,
+ __le32 *rp, unsigned int offset,
unsigned int bpl, u32 sync_line,
unsigned int lines,
int fifo_enable, int field_type)
@@ -53,9 +53,8 @@ static __le32 *cx25821_update_riscprogram_ch2(struct cx25821_dev *dev,
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
/* scan lines */
@@ -75,7 +74,7 @@ static __le32 *cx25821_update_riscprogram_ch2(struct cx25821_dev *dev,
}
static __le32 *cx25821_risc_field_upstream_ch2(struct cx25821_dev *dev,
- __le32 * rp,
+ __le32 *rp,
dma_addr_t databuf_phys_addr,
unsigned int offset,
u32 sync_line, unsigned int bpl,
@@ -88,14 +87,12 @@ static __le32 *cx25821_risc_field_upstream_ch2(struct cx25821_dev *dev,
int dist_betwn_starts = bpl * 2;
/* sync instruction */
- if (sync_line != NO_SYNC_LINE) {
+ if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
- }
if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
/* scan lines */
@@ -133,7 +130,7 @@ int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
{
__le32 *rp;
int fifo_enable = 0;
- int singlefield_lines = lines >> 1; /*get line count for single field */
+ int singlefield_lines = lines >> 1; /*get line count for single field */
int odd_num_lines = singlefield_lines;
int frame = 0;
int frame_size = 0;
@@ -218,15 +215,15 @@ void cx25821_stop_upstream_video_ch2(struct cx25821_dev *dev)
("cx25821: No video file is currently running so return!\n");
return;
}
- /* Disable RISC interrupts */
+ /* Disable RISC interrupts */
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp & ~_intr_msk);
- /* Turn OFF risc and fifo */
+ /* Turn OFF risc and fifo */
tmp = cx_read(sram_ch->dma_ctl);
cx_write(sram_ch->dma_ctl, tmp & ~(FLD_VID_FIFO_EN | FLD_VID_RISC_EN));
- /* Clear data buffer memory */
+ /* Clear data buffer memory */
if (dev->_data_buf_virt_addr_ch2)
memset(dev->_data_buf_virt_addr_ch2, 0,
dev->_data_buf_size_ch2);
@@ -250,9 +247,8 @@ void cx25821_stop_upstream_video_ch2(struct cx25821_dev *dev)
void cx25821_free_mem_upstream_ch2(struct cx25821_dev *dev)
{
- if (dev->_is_running_ch2) {
+ if (dev->_is_running_ch2)
cx25821_stop_upstream_video_ch2(dev);
- }
if (dev->_dma_virt_addr_ch2) {
pci_free_consistent(dev->pci, dev->_risc_size_ch2,
@@ -303,11 +299,10 @@ int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
file_offset = dev->_frame_count_ch2 * frame_size;
myfile = filp_open(dev->_filename_ch2, O_RDONLY | O_LARGEFILE, 0);
-
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
- __func__, dev->_filename_ch2, open_errno);
+ printk("%s(): ERROR opening file(%s) with errno = %d!\n",
+ __func__, dev->_filename_ch2, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
@@ -371,8 +366,8 @@ static void cx25821_vidups_handler_ch2(struct work_struct *work)
container_of(work, struct cx25821_dev, _irq_work_entry_ch2);
if (!dev) {
- printk("ERROR %s(): since container_of(work_struct) FAILED! \n",
- __func__);
+ printk("ERROR %s(): since container_of(work_struct) FAILED!\n",
+ __func__);
return;
}
@@ -398,8 +393,8 @@ int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
- __func__, dev->_filename_ch2, open_errno);
+ printk("%s(): ERROR opening file(%s) with errno = %d!\n",
+ __func__, dev->_filename_ch2, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
@@ -450,9 +445,8 @@ int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (i > 0)
dev->_frame_count_ch2++;
- if (vfs_read_retval < line_size) {
+ if (vfs_read_retval < line_size)
break;
- }
}
dev->_file_status_ch2 =
@@ -494,7 +488,7 @@ static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
return -ENOMEM;
}
- /* Iniitize at this address until n bytes to 0 */
+ /* Iniitize at this address until n bytes to 0 */
memset(dev->_dma_virt_addr_ch2, 0, dev->_risc_size_ch2);
if (dev->_data_buf_virt_addr_ch2 != NULL) {
@@ -502,7 +496,7 @@ static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
dev->_data_buf_virt_addr_ch2,
dev->_data_buf_phys_addr_ch2);
}
- /* For Video Data buffer allocation */
+ /* For Video Data buffer allocation */
dev->_data_buf_virt_addr_ch2 =
pci_alloc_consistent(dev->pci, dev->upstream_databuf_size_ch2,
&data_dma_addr);
@@ -515,26 +509,26 @@ static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
return -ENOMEM;
}
- /* Initialize at this address until n bytes to 0 */
+ /* Initialize at this address until n bytes to 0 */
memset(dev->_data_buf_virt_addr_ch2, 0, dev->_data_buf_size_ch2);
ret = cx25821_openfile_ch2(dev, sram_ch);
if (ret < 0)
return ret;
- /* Creating RISC programs */
+ /* Creating RISC programs */
ret =
cx25821_risc_buffer_upstream_ch2(dev, dev->pci, 0, bpl,
dev->_lines_count_ch2);
if (ret < 0) {
printk(KERN_INFO
- "cx25821: Failed creating Video Upstream Risc programs! \n");
+ "cx25821: Failed creating Video Upstream Risc programs!\n");
goto error;
}
return 0;
- error:
+ error:
return ret;
}
@@ -542,7 +536,7 @@ int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
u32 status)
{
u32 int_msk_tmp;
- struct sram_channel *channel = dev->channels[chan_num].sram_channels;
+ struct sram_channel *channel = dev->channels[chan_num].sram_channels;
int singlefield_lines = NTSC_FIELD_HEIGHT;
int line_size_in_bytes = Y422_LINE_SZ;
int odd_risc_prog_size = 0;
@@ -550,13 +544,13 @@ int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
__le32 *rp;
if (status & FLD_VID_SRC_RISC1) {
- /* We should only process one program per call */
+ /* We should only process one program per call */
u32 prog_cnt = cx_read(channel->gpcnt);
- /*
- Since we've identified our IRQ, clear our bits from the
- interrupt mask and interrupt status registers
- */
+ /*
+ * Since we've identified our IRQ, clear our bits from the
+ * interrupt mask and interrupt status registers
+ */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp & ~_intr_msk);
cx_write(channel->int_stat, _intr_msk);
@@ -612,7 +606,7 @@ int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
dev->_frame_count_ch2);
return -1;
}
- /* ElSE, set the interrupt mask register, re-enable irq. */
+ /* ElSE, set the interrupt mask register, re-enable irq. */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
@@ -631,24 +625,22 @@ static irqreturn_t cx25821_upstream_irq_ch2(int irq, void *dev_id)
return -1;
channel_num = VID_UPSTREAM_SRAM_CHANNEL_J;
-
- sram_ch = dev->channels[channel_num].sram_channels;
+ sram_ch = dev->channels[channel_num].sram_channels;
msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
- /* Only deal with our interrupt */
+ /* Only deal with our interrupt */
if (vid_status) {
handled =
cx25821_video_upstream_irq_ch2(dev, channel_num,
vid_status);
}
- if (handled < 0) {
+ if (handled < 0)
cx25821_stop_upstream_video_ch2(dev);
- } else {
+ else
handled += handled;
- }
return IRQ_RETVAL(handled);
}
@@ -667,22 +659,21 @@ static void cx25821_set_pixelengine_ch2(struct cx25821_dev *dev,
value |= dev->_isNTSC_ch2 ? 0 : 0x10;
cx_write(ch->vid_fmt_ctl, value);
- /*
- set number of active pixels in each line. Default is 720
- pixels in both NTSC and PAL format
- */
+ /*
+ * set number of active pixels in each line. Default is 720
+ * pixels in both NTSC and PAL format
+ */
cx_write(ch->vid_active_ctl1, width);
num_lines = (height / 2) & 0x3FF;
odd_num_lines = num_lines;
- if (dev->_isNTSC_ch2) {
+ if (dev->_isNTSC_ch2)
odd_num_lines += 1;
- }
value = (num_lines << 16) | odd_num_lines;
- /* set number of active lines in field 0 (top) and field 1 (bottom) */
+ /* set number of active lines in field 0 (top) and field 1 (bottom) */
cx_write(ch->vid_active_ctl2, value);
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
@@ -694,27 +685,27 @@ int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
u32 tmp = 0;
int err = 0;
- /*
- 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface
- for channel A-C
- */
+ /*
+ * 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface
+ * for channel A-C
+ */
tmp = cx_read(VID_CH_MODE_SEL);
cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
- /*
- Set the physical start address of the RISC program in the initial
- program counter(IPC) member of the cmds.
- */
+ /*
+ * Set the physical start address of the RISC program in the initial
+ * program counter(IPC) member of the cmds.
+ */
cx_write(sram_ch->cmds_start + 0, dev->_dma_phys_addr_ch2);
- cx_write(sram_ch->cmds_start + 4, 0); /* Risc IPC High 64 bits 63-32 */
+ cx_write(sram_ch->cmds_start + 4, 0); /* Risc IPC High 64 bits 63-32 */
/* reset counter */
cx_write(sram_ch->gpcnt_ctl, 3);
- /* Clear our bits from the interrupt status register. */
+ /* Clear our bits from the interrupt status register. */
cx_write(sram_ch->int_stat, _intr_msk);
- /* Set the interrupt mask register, enable irq. */
+ /* Set the interrupt mask register, enable irq. */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
@@ -727,7 +718,7 @@ int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
dev->pci->irq);
goto fail_irq;
}
- /* Start the DMA engine */
+ /* Start the DMA engine */
tmp = cx_read(sram_ch->dma_ctl);
cx_set(sram_ch->dma_ctl, tmp | FLD_VID_RISC_EN);
@@ -736,7 +727,7 @@ int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
return 0;
- fail_irq:
+ fail_irq:
cx25821_dev_unregister(dev);
return err;
}
@@ -758,7 +749,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
}
dev->_channel2_upstream_select = channel_select;
- sram_ch = dev->channels[channel_select].sram_channels;
+ sram_ch = dev->channels[channel_select].sram_channels;
INIT_WORK(&dev->_irq_work_entry_ch2, cx25821_vidups_handler_ch2);
dev->_irq_queues_ch2 =
@@ -769,10 +760,10 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
("cx25821: create_singlethread_workqueue() for Video FAILED!\n");
return -ENOMEM;
}
- /*
- 656/VIP SRC Upstream Channel I & J and 7 -
- Host Bus Interface for channel A-C
- */
+ /*
+ * 656/VIP SRC Upstream Channel I & J and 7 -
+ * Host Bus Interface for channel A-C
+ */
tmp = cx_read(VID_CH_MODE_SEL);
cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
@@ -808,7 +799,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
str_length + 1);
}
- /* Default if filename is empty string */
+ /* Default if filename is empty string */
if (strcmp(dev->input_filename_ch2, "") == 0) {
if (dev->_isNTSC_ch2) {
dev->_filename_ch2 =
@@ -833,7 +824,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
dev->upstream_riscbuf_size_ch2 = risc_buffer_size * 2;
dev->upstream_databuf_size_ch2 = data_frame_size * 2;
- /* Allocating buffers and prepare RISC program */
+ /* Allocating buffers and prepare RISC program */
retval =
cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
dev->_line_size_ch2);
@@ -848,7 +839,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
return 0;
- error:
+ error:
cx25821_dev_unregister(dev);
return err;
diff --git a/drivers/staging/cx25821/cx25821-video-upstream-ch2.h b/drivers/staging/cx25821/cx25821-video-upstream-ch2.h
index 62340636c916..029e8305724b 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream-ch2.h
+++ b/drivers/staging/cx25821/cx25821-video-upstream-ch2.h
@@ -88,14 +88,14 @@
#endif
#ifndef USE_RISC_NOOP_VIDEO
-#define PAL_US_VID_PROG_SIZE ((PAL_FIELD_HEIGHT + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE )
-#define PAL_RISC_BUF_SIZE ( 2 * (RISC_SYNC_INSTRUCTION_SIZE + PAL_US_VID_PROG_SIZE) )
+#define PAL_US_VID_PROG_SIZE ((PAL_FIELD_HEIGHT + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+#define PAL_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + PAL_US_VID_PROG_SIZE))
#define PAL_VID_PROG_SIZE ((PAL_FIELD_HEIGHT*2) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE )
-#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE )
-#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE )
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
+#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
#define NTSC_US_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-#define NTSC_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE) )
+#define NTSC_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
#define FRAME1_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE )
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
#endif
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.c b/drivers/staging/cx25821/cx25821-video-upstream.c
index 756a820a76cb..16bf74d65912 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.c
+++ b/drivers/staging/cx25821/cx25821-video-upstream.c
@@ -39,7 +39,7 @@ MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
MODULE_LICENSE("GPL");
static int _intr_msk =
- FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR;
+ FLD_VID_SRC_RISC1 | FLD_VID_SRC_UF | FLD_VID_SRC_SYNC | FLD_VID_SRC_OPC_ERR;
int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
struct sram_channel *ch,
@@ -346,13 +346,13 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk(KERN_ERR
+ printk(KERN_ERR
"%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_filename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk(KERN_ERR
+ printk(KERN_ERR
"%s: File has no file operations registered!",
__func__);
filp_close(myfile, NULL);
@@ -360,7 +360,7 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
}
if (!myfile->f_op->read) {
- printk(KERN_ERR
+ printk(KERN_ERR
"%s: File has no READ operations registered!",
__func__);
filp_close(myfile, NULL);
@@ -415,7 +415,7 @@ static void cx25821_vidups_handler(struct work_struct *work)
container_of(work, struct cx25821_dev, _irq_work_entry);
if (!dev) {
- printk(KERN_ERR
+ printk(KERN_ERR
"ERROR %s(): since container_of(work_struct) FAILED!\n",
__func__);
return;
@@ -448,7 +448,7 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk(KERN_ERR
+ printk(KERN_ERR
"%s: File has no file operations registered!",
__func__);
filp_close(myfile, NULL);
@@ -456,7 +456,7 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
}
if (!myfile->f_op->read) {
- printk(KERN_ERR
+ printk(KERN_ERR
"%s: File has no READ operations registered! \
Returning.",
__func__);
@@ -589,7 +589,7 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
u32 status)
{
u32 int_msk_tmp;
- struct sram_channel *channel = dev->channels[chan_num].sram_channels;
+ struct sram_channel *channel = dev->channels[chan_num].sram_channels;
int singlefield_lines = NTSC_FIELD_HEIGHT;
int line_size_in_bytes = Y422_LINE_SZ;
int odd_risc_prog_size = 0;
@@ -657,12 +657,12 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
Interrupt!\n", __func__);
if (status & FLD_VID_SRC_SYNC)
- printk(KERN_ERR "%s: Video Received Sync Error \
- Interrupt!\n", __func__);
+ printk(KERN_ERR "%s: Video Received Sync Error \
+ Interrupt!\n", __func__);
if (status & FLD_VID_SRC_OPC_ERR)
- printk(KERN_ERR "%s: Video Received OpCode Error \
- Interrupt!\n", __func__);
+ printk(KERN_ERR "%s: Video Received OpCode Error \
+ Interrupt!\n", __func__);
}
if (dev->_file_status == END_OF_FILE) {
@@ -690,7 +690,7 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
channel_num = VID_UPSTREAM_SRAM_CHANNEL_I;
- sram_ch = dev->channels[channel_num].sram_channels;
+ sram_ch = dev->channels[channel_num].sram_channels;
msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
@@ -811,7 +811,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
}
dev->_channel_upstream_select = channel_select;
- sram_ch = dev->channels[channel_select].sram_channels;
+ sram_ch = dev->channels[channel_select].sram_channels;
INIT_WORK(&dev->_irq_work_entry, cx25821_vidups_handler);
dev->_irq_queues = create_singlethread_workqueue("cx25821_workqueue");
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.h b/drivers/staging/cx25821/cx25821-video-upstream.h
index 10dee5c24a81..f0b3ac0880ad 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.h
+++ b/drivers/staging/cx25821/cx25821-video-upstream.h
@@ -97,13 +97,13 @@
#define PAL_RISC_BUF_SIZE (2 * PAL_US_VID_PROG_SIZE)
#define PAL_VID_PROG_SIZE ((PAL_FIELD_HEIGHT*2) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE )
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE )
-#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE )
+#define ODD_FLD_PAL_PROG_SIZE ((PAL_FIELD_HEIGHT) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
+#define ODD_FLD_NTSC_PROG_SIZE ((NTSC_ODD_FLD_LINES) * 3 * DWORD_SIZE + RISC_SYNC_INSTRUCTION_SIZE + RISC_WRITECR_INSTRUCTION_SIZE)
#define NTSC_US_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + 1) * 3 * DWORD_SIZE + RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
-#define NTSC_RISC_BUF_SIZE ( 2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE) )
+#define NTSC_RISC_BUF_SIZE (2 * (RISC_SYNC_INSTRUCTION_SIZE + NTSC_US_VID_PROG_SIZE))
#define FRAME1_VID_PROG_SIZE ((NTSC_ODD_FLD_LINES + NTSC_FIELD_HEIGHT) * 3 * DWORD_SIZE + 2*RISC_SYNC_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE )
+ RISC_WRITECR_INSTRUCTION_SIZE + JUMP_INSTRUCTION_SIZE)
#endif
diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c
index 1d5e8796d383..e7f1d5778cec 100644
--- a/drivers/staging/cx25821/cx25821-video.c
+++ b/drivers/staging/cx25821/cx25821-video.c
@@ -856,7 +856,7 @@ static int video_open(struct file *file)
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
- sizeof(struct cx25821_buffer), fh);
+ sizeof(struct cx25821_buffer), fh, NULL);
dprintk(1, "post videobuf_queue_init()\n");
unlock_kernel();
@@ -993,6 +993,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
+ struct v4l2_mbus_framefmt mbus_fmt;
int err;
int pix_format = PIXEL_FRMT_422;
@@ -1039,7 +1040,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
dprintk(2, "%s() width=%d height=%d field=%d\n", __func__, fh->width,
fh->height, fh->vidq.field);
- cx25821_call_all(dev, video, s_fmt, f);
+ v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
+ cx25821_call_all(dev, video, s_mbus_fmt, &mbus_fmt);
return 0;
}
diff --git a/drivers/staging/cx25821/cx25821.h b/drivers/staging/cx25821/cx25821.h
index 1b628f61578a..c94000125782 100644
--- a/drivers/staging/cx25821/cx25821.h
+++ b/drivers/staging/cx25821/cx25821.h
@@ -91,10 +91,10 @@
/* Currently supported by the driver */
#define CX25821_NORMS (\
- V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_M_KR | \
- V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \
- V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_H | \
- V4L2_STD_PAL_Nc )
+ V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_M_KR | \
+ V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \
+ V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_H | \
+ V4L2_STD_PAL_Nc)
#define CX25821_BOARD_CONEXANT_ATHENA10 1
#define MAX_VID_CHANNEL_NUM 12
@@ -139,7 +139,7 @@ struct cx25821_fh {
/* video capture */
struct cx25821_fmt *fmt;
unsigned int width, height;
- int channel_id;
+ int channel_id;
/* vbi capture */
struct videobuf_queue vidq;
@@ -238,26 +238,25 @@ struct cx25821_data {
};
struct cx25821_channel {
- struct v4l2_prio_state prio;
+ struct v4l2_prio_state prio;
- int ctl_bright;
- int ctl_contrast;
- int ctl_hue;
- int ctl_saturation;
+ int ctl_bright;
+ int ctl_contrast;
+ int ctl_hue;
+ int ctl_saturation;
+ struct cx25821_data timeout_data;
- struct cx25821_data timeout_data;
+ struct video_device *video_dev;
+ struct cx25821_dmaqueue vidq;
- struct video_device *video_dev;
- struct cx25821_dmaqueue vidq;
+ struct sram_channel *sram_channels;
- struct sram_channel *sram_channels;
-
- struct mutex lock;
- int resources;
+ struct mutex lock;
+ int resources;
- int pixel_formats;
- int use_cif_resolution;
- int cif_width;
+ int pixel_formats;
+ int use_cif_resolution;
+ int cif_width;
};
struct cx25821_dev {
@@ -283,7 +282,7 @@ struct cx25821_dev {
int nr;
struct mutex lock;
- struct cx25821_channel channels[MAX_VID_CHANNEL_NUM];
+ struct cx25821_channel channels[MAX_VID_CHANNEL_NUM];
/* board details */
unsigned int board;
@@ -311,7 +310,7 @@ struct cx25821_dev {
int _audio_lines_count;
int _audioframe_count;
int _audio_upstream_channel_select;
- int _last_index_irq; /* The last interrupt index processed. */
+ int _last_index_irq; /* The last interrupt index processed. */
__le32 *_risc_audio_jmp_addr;
__le32 *_risc_virt_start_addr;
@@ -443,7 +442,7 @@ static inline struct cx25821_dev *get_cx25821(struct v4l2_device *v4l2_dev)
}
#define cx25821_call_all(dev, o, f, args...) \
- v4l2_device_call_all(&dev->v4l2_dev, 0, o, f, ##args)
+ v4l2_device_call_all(&dev->v4l2_dev, 0, o, f, ##args)
extern struct list_head cx25821_devlist;
extern struct cx25821_board cx25821_boards[];
@@ -491,7 +490,7 @@ struct sram_channel {
u32 fld_aud_fifo_en;
u32 fld_aud_risc_en;
- /* For Upstream Video */
+ /* For Upstream Video */
u32 vid_fmt_ctl;
u32 vid_active_ctl1;
u32 vid_active_ctl2;
@@ -511,8 +510,8 @@ extern struct sram_channel cx25821_sram_channels[];
#define cx_write(reg, value) writel((value), dev->lmmio + ((reg)>>2))
#define cx_andor(reg, mask, value) \
- writel((readl(dev->lmmio+((reg)>>2)) & ~(mask)) |\
- ((value) & (mask)), dev->lmmio+((reg)>>2))
+ writel((readl(dev->lmmio+((reg)>>2)) & ~(mask)) |\
+ ((value) & (mask)), dev->lmmio+((reg)>>2))
#define cx_set(reg, bit) cx_andor((reg), (bit), (bit))
#define cx_clear(reg, bit) cx_andor((reg), (bit), 0)
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
index fd48b38e797c..b996697e7eb2 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -293,7 +293,7 @@ static void
dt3155_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
if (vb->state == VIDEOBUF_ACTIVE)
- videobuf_waiton(vb, 0, 0); /* FIXME: cannot be interrupted */
+ videobuf_waiton(q, vb, 0, 0); /* FIXME: cannot be interrupted */
videobuf_dma_contig_free(q, vb);
vb->state = VIDEOBUF_NEEDS_INIT;
}
@@ -440,7 +440,7 @@ dt3155_open(struct file *filp)
videobuf_queue_dma_contig_init(pd->vidq, &vbq_ops,
&pd->pdev->dev, &pd->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), pd);
+ sizeof(struct videobuf_buffer), pd, NULL);
/* disable all irqs, clear all irq flags */
iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD,
pd->regs + INT_CSR);
@@ -494,7 +494,7 @@ dt3155_release(struct file *filp)
tmp = pd->curr_buf;
spin_unlock_irqrestore(&pd->lock, flags);
if (tmp)
- videobuf_waiton(tmp, 0, 1); /* block, interruptible */
+ videobuf_waiton(pd->vidq, tmp, 0, 1); /* block, interruptible */
dt3155_stop_acq(pd);
videobuf_stop(pd->vidq);
pd->acq_fp = NULL;
@@ -603,7 +603,7 @@ dt3155_ioc_streamoff(struct file *filp, void *p, enum v4l2_buf_type type)
tmp = pd->curr_buf;
spin_unlock_irqrestore(&pd->lock, flags);
if (tmp)
- videobuf_waiton(tmp, 0, 1); /* block, interruptible */
+ videobuf_waiton(pd->vidq, tmp, 0, 1); /* block, interruptible */
return ret;
}
diff --git a/drivers/staging/go7007/Kconfig b/drivers/staging/go7007/Kconfig
index 75fa46805527..3aecd30f0d1e 100644
--- a/drivers/staging/go7007/Kconfig
+++ b/drivers/staging/go7007/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_GO7007
depends on BKL # please fix
depends on SND
select VIDEOBUF_DMA_SG
- select VIDEO_IR
+ depends on VIDEO_IR
select VIDEO_TUNER
select VIDEO_TVEEPROM
select SND_PCM
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index 372a7c6791ca..b3f42f37a313 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -194,51 +194,15 @@ int go7007_reset_encoder(struct go7007 *go)
* Attempt to instantiate an I2C client by ID, probably loading a module.
*/
static int init_i2c_module(struct i2c_adapter *adapter, const char *type,
- int id, int addr)
+ int addr)
{
struct go7007 *go = i2c_get_adapdata(adapter);
struct v4l2_device *v4l2_dev = &go->v4l2_dev;
- char *modname;
- switch (id) {
- case I2C_DRIVERID_WIS_SAA7115:
- modname = "wis-saa7115";
- break;
- case I2C_DRIVERID_WIS_SAA7113:
- modname = "wis-saa7113";
- break;
- case I2C_DRIVERID_WIS_UDA1342:
- modname = "wis-uda1342";
- break;
- case I2C_DRIVERID_WIS_SONY_TUNER:
- modname = "wis-sony-tuner";
- break;
- case I2C_DRIVERID_WIS_TW9903:
- modname = "wis-tw9903";
- break;
- case I2C_DRIVERID_WIS_TW2804:
- modname = "wis-tw2804";
- break;
- case I2C_DRIVERID_WIS_OV7640:
- modname = "wis-ov7640";
- break;
- case I2C_DRIVERID_S2250:
- modname = "s2250";
- break;
- default:
- modname = NULL;
- break;
- }
-
- if (v4l2_i2c_new_subdev(v4l2_dev, adapter, modname, type, addr, NULL))
+ if (v4l2_i2c_new_subdev(v4l2_dev, adapter, NULL, type, addr, NULL))
return 0;
- if (modname != NULL)
- printk(KERN_INFO
- "go7007: probing for module %s failed\n", modname);
- else
- printk(KERN_INFO
- "go7007: sensor %u seems to be unsupported!\n", id);
+ printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type);
return -1;
}
@@ -277,7 +241,6 @@ int go7007_register_encoder(struct go7007 *go)
for (i = 0; i < go->board_info->num_i2c_devs; ++i)
init_i2c_module(&go->i2c_adapter,
go->board_info->i2c_devs[i].type,
- go->board_info->i2c_devs[i].id,
go->board_info->i2c_devs[i].addr);
if (go->board_id == GO7007_BOARDID_ADLINK_MPG24)
i2c_clients_command(&go->i2c_adapter,
@@ -393,7 +356,8 @@ static void write_bitmap_word(struct go7007 *go)
for (i = 0; i < 16; ++i) {
y = (((go->parse_length - 1) << 3) + i) / (go->width >> 4);
x = (((go->parse_length - 1) << 3) + i) % (go->width >> 4);
- go->active_map[stride * y + (x >> 3)] |=
+ if (stride * y + (x >> 3) < sizeof(go->active_map))
+ go->active_map[stride * y + (x >> 3)] |=
(go->modet_word & 1) << (x & 0x7);
go->modet_word >>= 1;
}
@@ -485,6 +449,15 @@ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
}
break;
case STATE_00_00_01:
+ if (buf[i] == 0xF8 && go->modet_enable == 0) {
+ /* MODET start code, but MODET not enabled */
+ store_byte(go->active_buf, 0x00);
+ store_byte(go->active_buf, 0x00);
+ store_byte(go->active_buf, 0x01);
+ store_byte(go->active_buf, 0xF8);
+ go->state = STATE_DATA;
+ break;
+ }
/* If this is the start of a new MPEG frame,
* get a new buffer */
if ((go->format == GO7007_FORMAT_MPEG1 ||
diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/go7007/go7007-usb.c
index 20ed930b588c..bea9f4d5bc3c 100644
--- a/drivers/staging/go7007/go7007-usb.c
+++ b/drivers/staging/go7007/go7007-usb.c
@@ -394,7 +394,7 @@ static struct go7007_usb_board board_adlink_mpg24 = {
.num_i2c_devs = 1,
.i2c_devs = {
{
- .type = "wis_twTW2804",
+ .type = "wis_tw2804",
.id = I2C_DRIVERID_WIS_TW2804,
.addr = 0x00, /* yes, really */
},
diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
index 46b4b9f6855b..2b27d8da70a2 100644
--- a/drivers/staging/go7007/go7007-v4l2.c
+++ b/drivers/staging/go7007/go7007-v4l2.c
@@ -252,23 +252,22 @@ static int set_capture_size(struct go7007 *go, struct v4l2_format *fmt, int try)
go->modet_map[i] = 0;
if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) {
- struct v4l2_format res;
+ struct v4l2_mbus_framefmt mbus_fmt;
- if (fmt != NULL) {
- res = *fmt;
- } else {
- res.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- res.fmt.pix.width = width;
- }
+ mbus_fmt.code = V4L2_MBUS_FMT_FIXED;
+ if (fmt != NULL)
+ mbus_fmt.width = fmt->fmt.pix.width;
+ else
+ mbus_fmt.width = width;
if (height > sensor_height / 2) {
- res.fmt.pix.height = height / 2;
+ mbus_fmt.height = height / 2;
go->encoder_v_halve = 0;
} else {
- res.fmt.pix.height = height;
+ mbus_fmt.height = height;
go->encoder_v_halve = 1;
}
- call_all(&go->v4l2_dev, video, s_fmt, &res);
+ call_all(&go->v4l2_dev, video, s_mbus_fmt, &mbus_fmt);
} else {
if (width <= sensor_width / 4) {
go->encoder_h_halve = 1;
diff --git a/drivers/staging/go7007/s2250-board.c b/drivers/staging/go7007/s2250-board.c
index 93f26048e3b4..e7736a915530 100644
--- a/drivers/staging/go7007/s2250-board.c
+++ b/drivers/staging/go7007/s2250-board.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
-#include <media/v4l2-i2c-drv.h>
#include <media/v4l2-subdev.h>
#include "go7007-priv.h"
@@ -479,12 +478,13 @@ static int s2250_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return 0;
}
-static int s2250_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int s2250_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
{
struct s2250 *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (fmt->fmt.pix.height < 640) {
+ if (fmt->height < 640) {
write_reg_fp(client, 0x12b, state->reg12b_val | 0x400);
write_reg_fp(client, 0x140, 0x060);
} else {
@@ -555,7 +555,7 @@ static const struct v4l2_subdev_audio_ops s2250_audio_ops = {
static const struct v4l2_subdev_video_ops s2250_video_ops = {
.s_routing = s2250_s_video_routing,
- .s_fmt = s2250_s_fmt,
+ .s_mbus_fmt = s2250_s_mbus_fmt,
};
static const struct v4l2_subdev_ops s2250_ops = {
@@ -674,9 +674,25 @@ static const struct i2c_device_id s2250_id[] = {
};
MODULE_DEVICE_TABLE(i2c, s2250_id);
-static struct v4l2_i2c_driver_data v4l2_i2c_data = {
- .name = "s2250",
- .probe = s2250_probe,
- .remove = s2250_remove,
- .id_table = s2250_id,
+static struct i2c_driver s2250_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s2250",
+ },
+ .probe = s2250_probe,
+ .remove = s2250_remove,
+ .id_table = s2250_id,
};
+
+static __init int init_s2250(void)
+{
+ return i2c_add_driver(&s2250_driver);
+}
+
+static __exit void exit_s2250(void)
+{
+ i2c_del_driver(&s2250_driver);
+}
+
+module_init(init_s2250);
+module_exit(exit_s2250);
diff --git a/drivers/staging/go7007/wis-ov7640.c b/drivers/staging/go7007/wis-ov7640.c
index 4f0cbdde2765..6bc9470fecb6 100644
--- a/drivers/staging/go7007/wis-ov7640.c
+++ b/drivers/staging/go7007/wis-ov7640.c
@@ -81,6 +81,7 @@ static const struct i2c_device_id wis_ov7640_id[] = {
{ "wis_ov7640", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wis_ov7640_id);
static struct i2c_driver wis_ov7640_driver = {
.driver = {
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/go7007/wis-saa7113.c
index 72f5c1f56d19..05e0e1083864 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/go7007/wis-saa7113.c
@@ -308,6 +308,7 @@ static const struct i2c_device_id wis_saa7113_id[] = {
{ "wis_saa7113", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wis_saa7113_id);
static struct i2c_driver wis_saa7113_driver = {
.driver = {
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/go7007/wis-saa7115.c
index cd950b61cf70..46cff59e28b7 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/go7007/wis-saa7115.c
@@ -441,6 +441,7 @@ static const struct i2c_device_id wis_saa7115_id[] = {
{ "wis_saa7115", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wis_saa7115_id);
static struct i2c_driver wis_saa7115_driver = {
.driver = {
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/go7007/wis-sony-tuner.c
index 981c9b311b8b..8f1b7d4f6a2e 100644
--- a/drivers/staging/go7007/wis-sony-tuner.c
+++ b/drivers/staging/go7007/wis-sony-tuner.c
@@ -692,6 +692,7 @@ static const struct i2c_device_id wis_sony_tuner_id[] = {
{ "wis_sony_tuner", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wis_sony_tuner_id);
static struct i2c_driver wis_sony_tuner_driver = {
.driver = {
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/go7007/wis-tw2804.c
index ee28a99dc388..5b218c55842a 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/go7007/wis-tw2804.c
@@ -331,6 +331,7 @@ static const struct i2c_device_id wis_tw2804_id[] = {
{ "wis_tw2804", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wis_tw2804_id);
static struct i2c_driver wis_tw2804_driver = {
.driver = {
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/go7007/wis-tw9903.c
index 80d47269b1c0..9230f4a80529 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/go7007/wis-tw9903.c
@@ -313,6 +313,7 @@ static const struct i2c_device_id wis_tw9903_id[] = {
{ "wis_tw9903", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wis_tw9903_id);
static struct i2c_driver wis_tw9903_driver = {
.driver = {
diff --git a/drivers/staging/go7007/wis-uda1342.c b/drivers/staging/go7007/wis-uda1342.c
index 5c4eb49d7357..0127be2f3be0 100644
--- a/drivers/staging/go7007/wis-uda1342.c
+++ b/drivers/staging/go7007/wis-uda1342.c
@@ -86,6 +86,7 @@ static const struct i2c_device_id wis_uda1342_id[] = {
{ "wis_uda1342", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wis_uda1342_id);
static struct i2c_driver wis_uda1342_driver = {
.driver = {
diff --git a/drivers/staging/lirc/Kconfig b/drivers/staging/lirc/Kconfig
index 100c4d4b8125..fa790db75d7e 100644
--- a/drivers/staging/lirc/Kconfig
+++ b/drivers/staging/lirc/Kconfig
@@ -53,7 +53,7 @@ config LIRC_ITE8709
config LIRC_PARALLEL
tristate "Homebrew Parallel Port Receiver"
- depends on LIRC_STAGING && PARPORT && !SMP
+ depends on LIRC_STAGING && PARPORT
help
Driver for Homebrew Parallel Port Receivers
diff --git a/drivers/staging/lirc/lirc_igorplugusb.c b/drivers/staging/lirc/lirc_igorplugusb.c
index bce600ede263..0dc2c2b22c2b 100644
--- a/drivers/staging/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/lirc/lirc_igorplugusb.c
@@ -54,10 +54,10 @@
/* module identification */
-#define DRIVER_VERSION "0.1"
+#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR \
"Jan M. Hochstein <hochstein@algo.informatik.tu-darmstadt.de>"
-#define DRIVER_DESC "USB remote driver for LIRC"
+#define DRIVER_DESC "Igorplug USB remote driver for LIRC"
#define DRIVER_NAME "lirc_igorplugusb"
/* debugging support */
@@ -201,7 +201,6 @@ struct igorplug {
/* usb */
struct usb_device *usbdev;
- struct urb *urb_in;
int devnum;
unsigned char *buf_in;
@@ -216,28 +215,36 @@ struct igorplug {
/* handle sending (init strings) */
int send_flags;
- wait_queue_head_t wait_out;
};
static int unregister_from_lirc(struct igorplug *ir)
{
- struct lirc_driver *d = ir->d;
+ struct lirc_driver *d;
int devnum;
- if (!ir->d)
+ if (!ir) {
+ printk(KERN_ERR "%s: called with NULL device struct!\n",
+ __func__);
return -EINVAL;
+ }
devnum = ir->devnum;
- dprintk(DRIVER_NAME "[%d]: unregister from lirc called\n", devnum);
+ d = ir->d;
- lirc_unregister_driver(d->minor);
+ if (!d) {
+ printk(KERN_ERR "%s: called with NULL lirc driver struct!\n",
+ __func__);
+ return -EINVAL;
+ }
- printk(DRIVER_NAME "[%d]: usb remote disconnected\n", devnum);
+ dprintk(DRIVER_NAME "[%d]: calling lirc_unregister_driver\n", devnum);
+ lirc_unregister_driver(d->minor);
kfree(d);
ir->d = NULL;
kfree(ir);
- return 0;
+
+ return devnum;
}
static int set_use_inc(void *data)
@@ -248,6 +255,7 @@ static int set_use_inc(void *data)
printk(DRIVER_NAME "[?]: set_use_inc called with no context\n");
return -EIO;
}
+
dprintk(DRIVER_NAME "[%d]: set use inc\n", ir->devnum);
if (!ir->usbdev)
@@ -264,9 +272,29 @@ static void set_use_dec(void *data)
printk(DRIVER_NAME "[?]: set_use_dec called with no context\n");
return;
}
+
dprintk(DRIVER_NAME "[%d]: set use dec\n", ir->devnum);
}
+static void send_fragment(struct igorplug *ir, struct lirc_buffer *buf,
+ int i, int max)
+{
+ int code;
+
+ /* MODE2: pulse/space (PULSE_BIT) in 1us units */
+ while (i < max) {
+ /* 1 Igor-tick = 85.333333 us */
+ code = (unsigned int)ir->buf_in[i] * 85 +
+ (unsigned int)ir->buf_in[i] / 3;
+ ir->last_time.tv_usec += code;
+ if (ir->in_space)
+ code |= PULSE_BIT;
+ lirc_buffer_write(buf, (unsigned char *)&code);
+ /* 1 chunk = CODE_LENGTH bytes */
+ ir->in_space ^= 1;
+ ++i;
+ }
+}
/**
* Called in user context.
@@ -274,41 +302,32 @@ static void set_use_dec(void *data)
* -ENODATA if none was available. This should add some number of bits
* evenly divisible by code_length to the buffer
*/
-static int usb_remote_poll(void *data, struct lirc_buffer *buf)
+static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
{
int ret;
struct igorplug *ir = (struct igorplug *)data;
- if (!ir->usbdev) /* Has the device been removed? */
+ if (!ir || !ir->usbdev) /* Has the device been removed? */
return -ENODEV;
memset(ir->buf_in, 0, ir->len_in);
- ret = usb_control_msg(
- ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- GET_INFRACODE, USB_TYPE_VENDOR|USB_DIR_IN,
- 0/* offset */, /*unused*/0,
- ir->buf_in, ir->len_in,
- /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
+ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
+ GET_INFRACODE, USB_TYPE_VENDOR | USB_DIR_IN,
+ 0/* offset */, /*unused*/0,
+ ir->buf_in, ir->len_in,
+ /*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
if (ret > 0) {
- int i = DEVICE_HEADERLEN;
int code, timediff;
struct timeval now;
- if (ret <= 1) /* ACK packet has 1 byte --> ignore */
+ /* ACK packet has 1 byte --> ignore */
+ if (ret < DEVICE_HEADERLEN)
return -ENODATA;
dprintk(DRIVER_NAME ": Got %d bytes. Header: %02x %02x %02x\n",
ret, ir->buf_in[0], ir->buf_in[1], ir->buf_in[2]);
- if (ir->buf_in[2] != 0) {
- printk(DRIVER_NAME "[%d]: Device buffer overrun.\n",
- ir->devnum);
- /* start at earliest byte */
- i = DEVICE_HEADERLEN + ir->buf_in[2];
- /* where are we now? space, gap or pulse? */
- }
-
do_gettimeofday(&now);
timediff = now.tv_sec - ir->last_time.tv_sec;
if (timediff + 1 > PULSE_MASK / 1000000)
@@ -325,18 +344,20 @@ static int usb_remote_poll(void *data, struct lirc_buffer *buf)
lirc_buffer_write(buf, (unsigned char *)&code);
ir->in_space = 1; /* next comes a pulse */
- /* MODE2: pulse/space (PULSE_BIT) in 1us units */
-
- while (i < ret) {
- /* 1 Igor-tick = 85.333333 us */
- code = (unsigned int)ir->buf_in[i] * 85
- + (unsigned int)ir->buf_in[i] / 3;
- if (ir->in_space)
- code |= PULSE_BIT;
- lirc_buffer_write(buf, (unsigned char *)&code);
- /* 1 chunk = CODE_LENGTH bytes */
- ir->in_space ^= 1;
- ++i;
+ if (ir->buf_in[2] == 0)
+ send_fragment(ir, buf, DEVICE_HEADERLEN, ret);
+ else {
+ printk(KERN_WARNING DRIVER_NAME
+ "[%d]: Device buffer overrun.\n", ir->devnum);
+ /* HHHNNNNNNNNNNNOOOOOOOO H = header
+ <---[2]---> N = newer
+ <---------ret--------> O = older */
+ ir->buf_in[2] %= ret - DEVICE_HEADERLEN; /* sanitize */
+ /* keep even-ness to not desync pulse/pause */
+ send_fragment(ir, buf, DEVICE_HEADERLEN +
+ ir->buf_in[2] - (ir->buf_in[2] & 1), ret);
+ send_fragment(ir, buf, DEVICE_HEADERLEN,
+ DEVICE_HEADERLEN + ir->buf_in[2]);
}
ret = usb_control_msg(
@@ -358,12 +379,12 @@ static int usb_remote_poll(void *data, struct lirc_buffer *buf)
-static int usb_remote_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int igorplugusb_remote_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *dev = NULL;
struct usb_host_interface *idesc = NULL;
- struct usb_host_endpoint *ep_ctl2;
+ struct usb_endpoint_descriptor *ep;
struct igorplug *ir = NULL;
struct lirc_driver *driver = NULL;
int devnum, pipe, maxp;
@@ -380,20 +401,21 @@ static int usb_remote_probe(struct usb_interface *intf,
if (idesc->desc.bNumEndpoints != 1)
return -ENODEV;
- ep_ctl2 = idesc->endpoint;
- if (((ep_ctl2->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+
+ ep = &idesc->endpoint->desc;
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
!= USB_DIR_IN)
- || (ep_ctl2->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ || (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
!= USB_ENDPOINT_XFER_CONTROL)
return -ENODEV;
- pipe = usb_rcvctrlpipe(dev, ep_ctl2->desc.bEndpointAddress);
+
+ pipe = usb_rcvctrlpipe(dev, ep->bEndpointAddress);
devnum = dev->devnum;
maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
- dprintk(DRIVER_NAME "[%d]: bytes_in_key=%lu maxp=%d\n",
+ dprintk(DRIVER_NAME "[%d]: bytes_in_key=%zu maxp=%d\n",
devnum, CODE_LENGTH, maxp);
-
mem_failure = 0;
ir = kzalloc(sizeof(struct igorplug), GFP_KERNEL);
if (!ir) {
@@ -406,9 +428,8 @@ static int usb_remote_probe(struct usb_interface *intf,
goto mem_failure_switch;
}
- ir->buf_in = usb_alloc_coherent(dev,
- DEVICE_BUFLEN+DEVICE_HEADERLEN,
- GFP_ATOMIC, &ir->dma_in);
+ ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
+ GFP_ATOMIC, &ir->dma_in);
if (!ir->buf_in) {
mem_failure = 3;
goto mem_failure_switch;
@@ -424,12 +445,10 @@ static int usb_remote_probe(struct usb_interface *intf,
driver->set_use_inc = &set_use_inc;
driver->set_use_dec = &set_use_dec;
driver->sample_rate = sample_rate; /* per second */
- driver->add_to_buf = &usb_remote_poll;
+ driver->add_to_buf = &igorplugusb_remote_poll;
driver->dev = &intf->dev;
driver->owner = THIS_MODULE;
- init_waitqueue_head(&ir->wait_out);
-
minor = lirc_register_driver(driver);
if (minor < 0)
mem_failure = 9;
@@ -438,7 +457,7 @@ mem_failure_switch:
switch (mem_failure) {
case 9:
- usb_free_coherent(dev, DEVICE_BUFLEN+DEVICE_HEADERLEN,
+ usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
ir->buf_in, ir->dma_in);
case 3:
kfree(driver);
@@ -454,7 +473,7 @@ mem_failure_switch:
ir->d = driver;
ir->devnum = devnum;
ir->usbdev = dev;
- ir->len_in = DEVICE_BUFLEN+DEVICE_HEADERLEN;
+ ir->len_in = DEVICE_BUFLEN + DEVICE_HEADERLEN;
ir->in_space = 1; /* First mode2 event is a space. */
do_gettimeofday(&ir->last_time);
@@ -484,63 +503,64 @@ mem_failure_switch:
}
-static void usb_remote_disconnect(struct usb_interface *intf)
+static void igorplugusb_remote_disconnect(struct usb_interface *intf)
{
- struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_device *usbdev = interface_to_usbdev(intf);
struct igorplug *ir = usb_get_intfdata(intf);
+ struct device *dev = &intf->dev;
+ int devnum;
+
usb_set_intfdata(intf, NULL);
if (!ir || !ir->d)
return;
ir->usbdev = NULL;
- wake_up_all(&ir->wait_out);
- usb_free_coherent(dev, ir->len_in, ir->buf_in, ir->dma_in);
+ usb_free_coherent(usbdev, ir->len_in, ir->buf_in, ir->dma_in);
+
+ devnum = unregister_from_lirc(ir);
- unregister_from_lirc(ir);
+ dev_info(dev, DRIVER_NAME "[%d]: %s done\n", devnum, __func__);
}
-static struct usb_device_id usb_remote_id_table[] = {
+static struct usb_device_id igorplugusb_remote_id_table[] = {
/* Igor Plug USB (Atmel's Manufact. ID) */
{ USB_DEVICE(0x03eb, 0x0002) },
+ /* Fit PC2 Infrared Adapter */
+ { USB_DEVICE(0x03eb, 0x21fe) },
/* Terminating entry */
{ }
};
-static struct usb_driver usb_remote_driver = {
+static struct usb_driver igorplugusb_remote_driver = {
.name = DRIVER_NAME,
- .probe = usb_remote_probe,
- .disconnect = usb_remote_disconnect,
- .id_table = usb_remote_id_table
+ .probe = igorplugusb_remote_probe,
+ .disconnect = igorplugusb_remote_disconnect,
+ .id_table = igorplugusb_remote_id_table
};
-static int __init usb_remote_init(void)
+static int __init igorplugusb_remote_init(void)
{
- int i;
+ int ret = 0;
- printk(KERN_INFO "\n"
- DRIVER_NAME ": " DRIVER_DESC " v" DRIVER_VERSION "\n");
- printk(DRIVER_NAME ": " DRIVER_AUTHOR "\n");
- dprintk(DRIVER_NAME ": debug mode enabled\n");
+ dprintk(DRIVER_NAME ": loaded, debug mode enabled\n");
- i = usb_register(&usb_remote_driver);
- if (i < 0) {
- printk(DRIVER_NAME ": usb register failed, result = %d\n", i);
- return -ENODEV;
- }
+ ret = usb_register(&igorplugusb_remote_driver);
+ if (ret)
+ printk(KERN_ERR DRIVER_NAME ": usb register failed!\n");
- return 0;
+ return ret;
}
-static void __exit usb_remote_exit(void)
+static void __exit igorplugusb_remote_exit(void)
{
- usb_deregister(&usb_remote_driver);
+ usb_deregister(&igorplugusb_remote_driver);
}
-module_init(usb_remote_init);
-module_exit(usb_remote_exit);
+module_init(igorplugusb_remote_init);
+module_exit(igorplugusb_remote_exit);
#include <linux/vermagic.h>
MODULE_INFO(vermagic, VERMAGIC_STRING);
@@ -548,8 +568,10 @@ MODULE_INFO(vermagic, VERMAGIC_STRING);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(usb, usb_remote_id_table);
+MODULE_DEVICE_TABLE(usb, igorplugusb_remote_id_table);
module_param(sample_rate, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sample_rate, "Sampling rate in Hz (default: 100)");
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c
index 543c5c3bf907..929ae5795467 100644
--- a/drivers/staging/lirc/lirc_it87.c
+++ b/drivers/staging/lirc/lirc_it87.c
@@ -239,8 +239,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
int retval = 0;
- unsigned long value = 0;
- unsigned int ivalue;
+ __u32 value = 0;
unsigned long hw_flags;
if (cmd == LIRC_GET_FEATURES)
@@ -256,24 +255,24 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case LIRC_GET_FEATURES:
case LIRC_GET_SEND_MODE:
case LIRC_GET_REC_MODE:
- retval = put_user(value, (unsigned long *) arg);
+ retval = put_user(value, (__u32 *) arg);
break;
case LIRC_SET_SEND_MODE:
case LIRC_SET_REC_MODE:
- retval = get_user(value, (unsigned long *) arg);
+ retval = get_user(value, (__u32 *) arg);
break;
case LIRC_SET_SEND_CARRIER:
- retval = get_user(ivalue, (unsigned int *) arg);
+ retval = get_user(value, (__u32 *) arg);
if (retval)
return retval;
- ivalue /= 1000;
- if (ivalue > IT87_CIR_FREQ_MAX ||
- ivalue < IT87_CIR_FREQ_MIN)
+ value /= 1000;
+ if (value > IT87_CIR_FREQ_MAX ||
+ value < IT87_CIR_FREQ_MIN)
return -EINVAL;
- it87_freq = ivalue;
+ it87_freq = value;
spin_lock_irqsave(&hardware_lock, hw_flags);
outb(((inb(io + IT87_CIR_TCR2) & IT87_CIR_TCR2_TXMPW) |
@@ -340,6 +339,9 @@ static const struct file_operations lirc_fops = {
.write = lirc_write,
.poll = lirc_poll,
.unlocked_ioctl = lirc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lirc_ioctl,
+#endif
.open = lirc_open,
.release = lirc_close,
.llseek = noop_llseek,
@@ -964,10 +966,11 @@ static void __exit lirc_it87_exit(void)
printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
}
-/* SECTION: PNP for ITE8704/18 */
+/* SECTION: PNP for ITE8704/13/18 */
static const struct pnp_device_id pnp_dev_table[] = {
{"ITE8704", 0},
+ {"ITE8713", 0},
{}
};
diff --git a/drivers/staging/lirc/lirc_ite8709.c b/drivers/staging/lirc/lirc_ite8709.c
index 9352f45bbece..cb20cfdcfadd 100644
--- a/drivers/staging/lirc/lirc_ite8709.c
+++ b/drivers/staging/lirc/lirc_ite8709.c
@@ -102,8 +102,8 @@ struct ite8709_device {
int io;
int irq;
spinlock_t hardware_lock;
- unsigned long long acc_pulse;
- unsigned long long acc_space;
+ __u64 acc_pulse;
+ __u64 acc_space;
char lastbit;
struct timeval last_tv;
struct lirc_driver driver;
@@ -220,7 +220,7 @@ static void ite8709_set_use_dec(void *data)
}
static void ite8709_add_read_queue(struct ite8709_device *dev, int flag,
- unsigned long long val)
+ __u64 val)
{
int value;
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index 6da4a8c6ebc3..884904c782d1 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -24,10 +24,6 @@
/*** Includes ***/
-#ifdef CONFIG_SMP
-#error "--- Sorry, this driver is not SMP safe. ---"
-#endif
-
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
@@ -301,9 +297,9 @@ static void irq_handler(void *blah)
if (signal != 0) {
/* ajust value to usecs */
- unsigned long long helper;
+ __u64 helper;
- helper = ((unsigned long long) signal)*1000000;
+ helper = ((__u64) signal)*1000000;
do_div(helper, timer);
signal = (long) helper;
@@ -404,9 +400,9 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
/* adjust values from usecs */
for (i = 0; i < count; i++) {
- unsigned long long helper;
+ __u64 helper;
- helper = ((unsigned long long) wbuf[i])*timer;
+ helper = ((__u64) wbuf[i])*timer;
do_div(helper, 1000000);
wbuf[i] = (int) helper;
}
@@ -464,48 +460,48 @@ static unsigned int lirc_poll(struct file *file, poll_table *wait)
static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
int result;
- unsigned long features = LIRC_CAN_SET_TRANSMITTER_MASK |
- LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2;
- unsigned long mode;
- unsigned int ivalue;
+ __u32 features = LIRC_CAN_SET_TRANSMITTER_MASK |
+ LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2;
+ __u32 mode;
+ __u32 value;
switch (cmd) {
case LIRC_GET_FEATURES:
- result = put_user(features, (unsigned long *) arg);
+ result = put_user(features, (__u32 *) arg);
if (result)
return result;
break;
case LIRC_GET_SEND_MODE:
- result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
+ result = put_user(LIRC_MODE_PULSE, (__u32 *) arg);
if (result)
return result;
break;
case LIRC_GET_REC_MODE:
- result = put_user(LIRC_MODE_MODE2, (unsigned long *) arg);
+ result = put_user(LIRC_MODE_MODE2, (__u32 *) arg);
if (result)
return result;
break;
case LIRC_SET_SEND_MODE:
- result = get_user(mode, (unsigned long *) arg);
+ result = get_user(mode, (__u32 *) arg);
if (result)
return result;
if (mode != LIRC_MODE_PULSE)
return -EINVAL;
break;
case LIRC_SET_REC_MODE:
- result = get_user(mode, (unsigned long *) arg);
+ result = get_user(mode, (__u32 *) arg);
if (result)
return result;
if (mode != LIRC_MODE_MODE2)
return -ENOSYS;
break;
case LIRC_SET_TRANSMITTER_MASK:
- result = get_user(ivalue, (unsigned int *) arg);
+ result = get_user(value, (__u32 *) arg);
if (result)
return result;
- if ((ivalue & LIRC_PARALLEL_TRANSMITTER_MASK) != ivalue)
+ if ((value & LIRC_PARALLEL_TRANSMITTER_MASK) != value)
return LIRC_PARALLEL_MAX_TRANSMITTERS;
- tx_mask = ivalue;
+ tx_mask = value;
break;
default:
return -ENOIOCTLCMD;
@@ -546,6 +542,9 @@ static const struct file_operations lirc_fops = {
.write = lirc_write,
.poll = lirc_poll,
.unlocked_ioctl = lirc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lirc_ioctl,
+#endif
.open = lirc_open,
.release = lirc_close
};
@@ -576,28 +575,6 @@ static struct lirc_driver driver = {
static int pf(void *handle);
static void kf(void *handle);
-static struct timer_list poll_timer;
-static void poll_state(unsigned long ignored);
-
-static void poll_state(unsigned long ignored)
-{
- printk(KERN_NOTICE "%s: time\n",
- LIRC_DRIVER_NAME);
- del_timer(&poll_timer);
- if (is_claimed)
- return;
- kf(NULL);
- if (!is_claimed) {
- printk(KERN_NOTICE "%s: could not claim port, giving up\n",
- LIRC_DRIVER_NAME);
- init_timer(&poll_timer);
- poll_timer.expires = jiffies + HZ;
- poll_timer.data = (unsigned long)current;
- poll_timer.function = poll_state;
- add_timer(&poll_timer);
- }
-}
-
static int pf(void *handle)
{
parport_disable_irq(pport);
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 8da382492612..971844bbee28 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -372,7 +372,7 @@ static unsigned long conv_us_to_clocks;
static int init_timing_params(unsigned int new_duty_cycle,
unsigned int new_freq)
{
- unsigned long long loops_per_sec, work;
+ __u64 loops_per_sec, work;
duty_cycle = new_duty_cycle;
freq = new_freq;
@@ -987,8 +987,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
int result;
- unsigned long value;
- unsigned int ivalue;
+ __u32 value;
switch (cmd) {
case LIRC_GET_SEND_MODE:
@@ -997,7 +996,7 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
result = put_user(LIRC_SEND2MODE
(hardware[type].features&LIRC_CAN_SEND_MASK),
- (unsigned long *) arg);
+ (__u32 *) arg);
if (result)
return result;
break;
@@ -1006,7 +1005,7 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
return -ENOIOCTLCMD;
- result = get_user(value, (unsigned long *) arg);
+ result = get_user(value, (__u32 *) arg);
if (result)
return result;
/* only LIRC_MODE_PULSE supported */
@@ -1023,12 +1022,12 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE))
return -ENOIOCTLCMD;
- result = get_user(ivalue, (unsigned int *) arg);
+ result = get_user(value, (__u32 *) arg);
if (result)
return result;
- if (ivalue <= 0 || ivalue > 100)
+ if (value <= 0 || value > 100)
return -EINVAL;
- return init_timing_params(ivalue, freq);
+ return init_timing_params(value, freq);
break;
case LIRC_SET_SEND_CARRIER:
@@ -1036,12 +1035,12 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER))
return -ENOIOCTLCMD;
- result = get_user(ivalue, (unsigned int *) arg);
+ result = get_user(value, (__u32 *) arg);
if (result)
return result;
- if (ivalue > 500000 || ivalue < 20000)
+ if (value > 500000 || value < 20000)
return -EINVAL;
- return init_timing_params(duty_cycle, ivalue);
+ return init_timing_params(duty_cycle, value);
break;
default:
@@ -1054,6 +1053,9 @@ static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = lirc_write,
.unlocked_ioctl = lirc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lirc_ioctl,
+#endif
.read = lirc_dev_fop_read,
.poll = lirc_dev_fop_poll,
.open = lirc_dev_fop_open,
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index 2478871bd95e..c553ab626238 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -336,9 +336,8 @@ static ssize_t lirc_write(struct file *file, const char *buf, size_t n,
static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
int retval = 0;
- unsigned long value = 0;
+ __u32 value = 0;
#ifdef LIRC_ON_SA1100
- unsigned int ivalue;
if (cmd == LIRC_GET_FEATURES)
value = LIRC_CAN_SEND_PULSE |
@@ -362,22 +361,22 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case LIRC_GET_FEATURES:
case LIRC_GET_SEND_MODE:
case LIRC_GET_REC_MODE:
- retval = put_user(value, (unsigned long *) arg);
+ retval = put_user(value, (__u32 *) arg);
break;
case LIRC_SET_SEND_MODE:
case LIRC_SET_REC_MODE:
- retval = get_user(value, (unsigned long *) arg);
+ retval = get_user(value, (__u32 *) arg);
break;
#ifdef LIRC_ON_SA1100
case LIRC_SET_SEND_DUTY_CYCLE:
- retval = get_user(ivalue, (unsigned int *) arg);
+ retval = get_user(value, (__u32 *) arg);
if (retval)
return retval;
- if (ivalue <= 0 || ivalue > 100)
+ if (value <= 0 || value > 100)
return -EINVAL;
- /* (ivalue/100)*(1000000/freq) */
- duty_cycle = ivalue;
+ /* (value/100)*(1000000/freq) */
+ duty_cycle = value;
pulse_width = (unsigned long) duty_cycle*10000/freq;
space_width = (unsigned long) 1000000L/freq-pulse_width;
if (pulse_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY)
@@ -386,12 +385,12 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
space_width -= LIRC_ON_SA1100_TRANSMITTER_LATENCY;
break;
case LIRC_SET_SEND_CARRIER:
- retval = get_user(ivalue, (unsigned int *) arg);
+ retval = get_user(value, (__u32 *) arg);
if (retval)
return retval;
- if (ivalue > 500000 || ivalue < 20000)
+ if (value > 500000 || value < 20000)
return -EINVAL;
- freq = ivalue;
+ freq = value;
pulse_width = (unsigned long) duty_cycle*10000/freq;
space_width = (unsigned long) 1000000L/freq-pulse_width;
if (pulse_width >= LIRC_ON_SA1100_TRANSMITTER_LATENCY)
@@ -457,6 +456,9 @@ static const struct file_operations lirc_fops = {
.write = lirc_write,
.poll = lirc_poll,
.unlocked_ioctl = lirc_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lirc_ioctl,
+#endif
.open = lirc_dev_fop_open,
.release = lirc_dev_fop_close,
.llseek = no_llseek,
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index 100caab10451..d92064498523 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -1139,6 +1139,9 @@ static const struct file_operations lirc_fops = {
.write = write,
.poll = poll,
.unlocked_ioctl = ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ioctl,
+#endif
.open = open,
.release = close
};
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 97dae297ca3c..c62d30017c07 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -882,12 +882,8 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
static int pohmelfs_fsync(struct file *file, int datasync)
{
struct inode *inode = file->f_mapping->host;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0, /* sys_fsync did this */
- };
- return sync_inode(inode, &wbc);
+ return sync_inode_metadata(inode, 1);
}
ssize_t pohmelfs_write(struct file *file, const char __user *buf,
diff --git a/drivers/staging/stradis/Kconfig b/drivers/staging/stradis/Kconfig
new file mode 100644
index 000000000000..92e891141896
--- /dev/null
+++ b/drivers/staging/stradis/Kconfig
@@ -0,0 +1,7 @@
+config VIDEO_STRADIS
+ tristate "Stradis 4:2:2 MPEG-2 video driver (DEPRECATED)"
+ depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS
+ help
+ Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video
+ driver for PCI. There is a product page at
+ <http://www.stradis.com/>.
diff --git a/drivers/staging/stradis/Makefile b/drivers/staging/stradis/Makefile
new file mode 100644
index 000000000000..0f1feab59e39
--- /dev/null
+++ b/drivers/staging/stradis/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
+
+EXTRA_CFLAGS += -Idrivers/media/video
diff --git a/drivers/staging/stradis/TODO b/drivers/staging/stradis/TODO
new file mode 100644
index 000000000000..f48150fe2fa9
--- /dev/null
+++ b/drivers/staging/stradis/TODO
@@ -0,0 +1,6 @@
+This is an obsolete driver for ancient stradis hardware.
+We couldn't find anyone with this hardware in order to port it to use V4L2.
+
+If nobody take care on it, the driver will be removed for 2.6.38.
+
+Please send patches to linux-media@vger.kernel.org
diff --git a/drivers/media/video/stradis.c b/drivers/staging/stradis/stradis.c
index a057824e7ebc..a057824e7ebc 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/staging/stradis/stradis.c
diff --git a/drivers/staging/tm6000/TODO b/drivers/staging/tm6000/TODO
new file mode 100644
index 000000000000..34780fc17b16
--- /dev/null
+++ b/drivers/staging/tm6000/TODO
@@ -0,0 +1,6 @@
+There a few things to do before putting this driver in production:
+ - CodingStyle;
+ - Fix audio;
+ - Fix some panic/OOPS conditions.
+
+Please send patches to linux-media@vger.kernel.org
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/staging/tm6000/tm6000-alsa.c
index 087137d9164d..e379e3ec4442 100644
--- a/drivers/staging/tm6000/tm6000-alsa.c
+++ b/drivers/staging/tm6000/tm6000-alsa.c
@@ -160,15 +160,15 @@ static struct snd_pcm_hardware snd_tm6000_digital_hw = {
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_48000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
- .period_bytes_min = 62720,
- .period_bytes_max = 62720,
+ .period_bytes_min = 64,
+ .period_bytes_max = 12544,
.periods_min = 1,
- .periods_max = 1024,
+ .periods_max = 98,
.buffer_bytes_max = 62720 * 8,
};
@@ -201,6 +201,14 @@ _error:
*/
static int snd_tm6000_close(struct snd_pcm_substream *substream)
{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct tm6000_core *core = chip->core;
+
+ if (atomic_read(&core->stream_started) > 0) {
+ atomic_set(&core->stream_started, 0);
+ schedule_work(&core->wq_trigger);
+ }
+
return 0;
}
@@ -211,38 +219,67 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
struct snd_pcm_runtime *runtime;
int period_elapsed = 0;
unsigned int stride, buf_pos;
+ int length;
+
+ if (atomic_read(&core->stream_started) == 0)
+ return 0;
- if (!size || !substream)
+ if (!size || !substream) {
+ dprintk(1, "substream was NULL\n");
return -EINVAL;
+ }
runtime = substream->runtime;
- if (!runtime || !runtime->dma_area)
+ if (!runtime || !runtime->dma_area) {
+ dprintk(1, "runtime was NULL\n");
return -EINVAL;
+ }
buf_pos = chip->buf_pos;
stride = runtime->frame_bits >> 3;
+ if (stride == 0) {
+ dprintk(1, "stride is zero\n");
+ return -EINVAL;
+ }
+
+ length = size / stride;
+ if (length == 0) {
+ dprintk(1, "%s: length was zero\n", __func__);
+ return -EINVAL;
+ }
+
dprintk(1, "Copying %d bytes at %p[%d] - buf size=%d x %d\n", size,
runtime->dma_area, buf_pos,
(unsigned int)runtime->buffer_size, stride);
- if (buf_pos + size >= runtime->buffer_size * stride) {
- unsigned int cnt = runtime->buffer_size * stride - buf_pos;
- memcpy(runtime->dma_area + buf_pos, buf, cnt);
- memcpy(runtime->dma_area, buf + cnt, size - cnt);
+ if (buf_pos + length >= runtime->buffer_size) {
+ unsigned int cnt = runtime->buffer_size - buf_pos;
+ memcpy(runtime->dma_area + buf_pos * stride, buf, cnt * stride);
+ memcpy(runtime->dma_area, buf + cnt * stride,
+ length * stride - cnt * stride);
} else
- memcpy(runtime->dma_area + buf_pos, buf, size);
+ memcpy(runtime->dma_area + buf_pos * stride, buf,
+ length * stride);
- chip->buf_pos += size;
- if (chip->buf_pos >= runtime->buffer_size * stride)
- chip->buf_pos -= runtime->buffer_size * stride;
+#ifndef NO_PCM_LOCK
+ snd_pcm_stream_lock(substream);
+#endif
- chip->period_pos += size;
+ chip->buf_pos += length;
+ if (chip->buf_pos >= runtime->buffer_size)
+ chip->buf_pos -= runtime->buffer_size;
+
+ chip->period_pos += length;
if (chip->period_pos >= runtime->period_size) {
chip->period_pos -= runtime->period_size;
period_elapsed = 1;
}
+#ifndef NO_PCM_LOCK
+ snd_pcm_stream_unlock(substream);
+#endif
+
if (period_elapsed)
snd_pcm_period_elapsed(substream);
@@ -272,8 +309,12 @@ static int snd_tm6000_hw_params(struct snd_pcm_substream *substream,
static int snd_tm6000_hw_free(struct snd_pcm_substream *substream)
{
struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct tm6000_core *core = chip->core;
- _tm6000_stop_audio_dma(chip);
+ if (atomic_read(&core->stream_started) > 0) {
+ atomic_set(&core->stream_started, 0);
+ schedule_work(&core->wq_trigger);
+ }
return 0;
}
@@ -295,30 +336,42 @@ static int snd_tm6000_prepare(struct snd_pcm_substream *substream)
/*
* trigger callback
*/
+static void audio_trigger(struct work_struct *work)
+{
+ struct tm6000_core *core = container_of(work, struct tm6000_core,
+ wq_trigger);
+ struct snd_tm6000_card *chip = core->adev;
+
+ if (atomic_read(&core->stream_started)) {
+ dprintk(1, "starting capture");
+ _tm6000_start_audio_dma(chip);
+ } else {
+ dprintk(1, "stopping capture");
+ _tm6000_stop_audio_dma(chip);
+ }
+}
+
static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
- int err;
-
- spin_lock(&chip->reg_lock);
+ struct tm6000_core *core = chip->core;
+ int err = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- err = _tm6000_start_audio_dma(chip);
+ atomic_set(&core->stream_started, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
- err = _tm6000_stop_audio_dma(chip);
+ atomic_set(&core->stream_started, 0);
break;
default:
err = -EINVAL;
break;
}
-
- spin_unlock(&chip->reg_lock);
+ schedule_work(&core->wq_trigger);
return err;
}
-
/*
* pointer callback
*/
@@ -411,6 +464,7 @@ int tm6000_audio_init(struct tm6000_core *dev)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops);
+ INIT_WORK(&dev->wq_trigger, audio_trigger);
rc = snd_card_register(card);
if (rc < 0)
goto error;
diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c
index 9d091c34991b..664e6038090d 100644
--- a/drivers/staging/tm6000/tm6000-cards.c
+++ b/drivers/staging/tm6000/tm6000-cards.c
@@ -1,20 +1,20 @@
/*
- tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
@@ -349,7 +349,7 @@ int tm6000_xc5000_callback(void *ptr, int component, int command, int arg)
dev->gpio.tuner_reset, 0x01);
break;
}
- return (rc);
+ return rc;
}
EXPORT_SYMBOL_GPL(tm6000_xc5000_callback);
@@ -545,7 +545,7 @@ static void tm6000_config_tuner(struct tm6000_core *dev)
/* Load tuner module */
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tuner", "tuner", dev->tuner_addr, NULL);
+ NULL, "tuner", dev->tuner_addr, NULL);
memset(&tun_setup, 0, sizeof(tun_setup));
tun_setup.type = dev->tuner_type;
@@ -683,7 +683,7 @@ static int tm6000_init_dev(struct tm6000_core *dev)
if (dev->caps.has_tda9874)
v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
- "tvaudio", "tvaudio", I2C_ADDR_TDA9874, NULL);
+ NULL, "tvaudio", I2C_ADDR_TDA9874, NULL);
/* register and initialize V4L2 */
rc = tm6000_v4l2_register(dev);
@@ -909,8 +909,6 @@ static void tm6000_usb_disconnect(struct usb_interface *interface)
printk(KERN_INFO "tm6000: disconnecting %s\n", dev->name);
- mutex_lock(&dev->lock);
-
tm6000_ir_fini(dev);
if (dev->gpio.power_led) {
@@ -945,7 +943,6 @@ static void tm6000_usb_disconnect(struct usb_interface *interface)
tm6000_close_extension(dev);
tm6000_remove_from_devlist(dev);
- mutex_unlock(&dev->lock);
kfree(dev);
}
diff --git a/drivers/staging/tm6000/tm6000-core.c b/drivers/staging/tm6000/tm6000-core.c
index cded411d8bba..df3f187959b9 100644
--- a/drivers/staging/tm6000/tm6000-core.c
+++ b/drivers/staging/tm6000/tm6000-core.c
@@ -1,23 +1,23 @@
/*
- tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
-
- Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
- - DVB-T support
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ * - DVB-T support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -30,14 +30,13 @@
#include <media/v4l2-common.h>
#include <media/tuner.h>
-#define USB_TIMEOUT 5*HZ /* ms */
+#define USB_TIMEOUT (5 * HZ) /* ms */
int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
u16 value, u16 index, u8 *buf, u16 len)
{
int ret, i;
unsigned int pipe;
- static int ini = 0, last = 0, n = 0;
u8 *data = NULL;
if (len)
@@ -52,19 +51,12 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
}
if (tm6000_debug & V4L2_DEBUG_I2C) {
- if (!ini)
- last = ini = jiffies;
-
- printk("%06i (dev %p, pipe %08x): ", n, dev->udev, pipe);
+ printk("(dev %p, pipe %08x): ", dev->udev, pipe);
- printk("%s: %06u ms %06u ms %02x %02x %02x %02x %02x %02x %02x %02x ",
+ printk("%s: %02x %02x %02x %02x %02x %02x %02x %02x ",
(req_type & USB_DIR_IN) ? " IN" : "OUT",
- jiffies_to_msecs(jiffies-last),
- jiffies_to_msecs(jiffies-ini),
req_type, req, value&0xff, value>>8, index&0xff,
index>>8, len&0xff, len>>8);
- last = jiffies;
- n++;
if (!(req_type & USB_DIR_IN)) {
printk(">>> ");
@@ -186,21 +178,17 @@ void tm6000_set_fourcc_format(struct tm6000_core *dev)
}
}
-int tm6000_init_analog_mode(struct tm6000_core *dev)
+static void tm6000_set_vbi(struct tm6000_core *dev)
{
- if (dev->dev_type == TM6010) {
- int val;
+ /*
+ * FIXME:
+ * VBI lines and start/end are different between 60Hz and 50Hz
+ * So, it is very likely that we need to change the config to
+ * something that takes it into account, doing something different
+ * if (dev->norm & V4L2_STD_525_60)
+ */
- /* Enable video */
- val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0);
- val |= 0x60;
- tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
- val = tm6000_get_reg(dev,
- TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0);
- val &= ~0x40;
- tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, val);
-
- /* Init teletext */
+ if (dev->dev_type == TM6010) {
tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01);
tm6000_set_reg(dev, TM6010_REQ07_R41_TELETEXT_VBI_CODE1, 0x27);
tm6000_set_reg(dev, TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55);
@@ -249,44 +237,26 @@ int tm6000_init_analog_mode(struct tm6000_core *dev)
tm6000_set_reg(dev, TM6010_REQ07_R5B_VBI_TELETEXT_DTO0, 0x4c);
tm6000_set_reg(dev, TM6010_REQ07_R40_TELETEXT_VBI_CODE0, 0x01);
tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x00);
+ }
+}
+int tm6000_init_analog_mode(struct tm6000_core *dev)
+{
+ struct v4l2_frequency f;
+
+ if (dev->dev_type == TM6010) {
+ int val;
+
+ /* Enable video */
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0);
+ val |= 0x60;
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ val = tm6000_get_reg(dev,
+ TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0);
+ val &= ~0x40;
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, val);
- /* Init audio */
- tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, 0x04);
- tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0xa0);
- tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, 0x05);
- tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x06);
- tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x08);
- tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91);
- tm6000_set_reg(dev, TM6010_REQ08_R0B_A_ASD_THRES1, 0x20);
- tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x12);
- tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x20);
- tm6000_set_reg(dev, TM6010_REQ08_R0E_A_MONO_THRES1, 0xf0);
- tm6000_set_reg(dev, TM6010_REQ08_R0F_A_MONO_THRES2, 0x80);
- tm6000_set_reg(dev, TM6010_REQ08_R10_A_MUTE_THRES1, 0xc0);
- tm6000_set_reg(dev, TM6010_REQ08_R11_A_MUTE_THRES2, 0x80);
- tm6000_set_reg(dev, TM6010_REQ08_R12_A_AGC_U, 0x12);
- tm6000_set_reg(dev, TM6010_REQ08_R13_A_AGC_ERR_T, 0xfe);
- tm6000_set_reg(dev, TM6010_REQ08_R14_A_AGC_GAIN_INIT, 0x20);
- tm6000_set_reg(dev, TM6010_REQ08_R15_A_AGC_STEP_THR, 0x14);
- tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe);
- tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01);
- tm6000_set_reg(dev, TM6010_REQ08_R18_A_TR_CTRL, 0xa0);
- tm6000_set_reg(dev, TM6010_REQ08_R19_A_FH_2FH_GAIN, 0x32);
- tm6000_set_reg(dev, TM6010_REQ08_R1A_A_NICAM_SER_MAX, 0x64);
- tm6000_set_reg(dev, TM6010_REQ08_R1B_A_NICAM_SER_MIN, 0x20);
- tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1c, 0x00);
- tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1d, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
- tm6000_set_reg(dev, TM6010_REQ08_R1F_A_TEST_INTF_SEL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R20_A_TEST_PIN_SEL, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3);
- tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00);
- tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc);
} else {
/* Enables soft reset */
@@ -325,15 +295,22 @@ int tm6000_init_analog_mode(struct tm6000_core *dev)
/* Tuner firmware can now be loaded */
- /*FIXME: Hack!!! */
- struct v4l2_frequency f;
- mutex_lock(&dev->lock);
+ /*
+ * FIXME: This is a hack! xc3028 "sleeps" when no channel is detected
+ * for more than a few seconds. Not sure why, as this behavior does
+ * not happen on other devices with xc3028. So, I suspect that it
+ * is yet another bug at tm6000. After start sleeping, decoding
+ * doesn't start automatically. Instead, it requires some
+ * I2C commands to wake it up. As we want to have image at the
+ * beginning, we needed to add this hack. The better would be to
+ * discover some way to make tm6000 to wake up without this hack.
+ */
f.frequency = dev->freq;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
- mutex_unlock(&dev->lock);
msleep(100);
tm6000_set_standard(dev, &dev->norm);
+ tm6000_set_vbi(dev);
tm6000_set_audio_bitrate(dev, 48000);
/* switch dvb led off */
@@ -361,7 +338,6 @@ int tm6000_init_digital_mode(struct tm6000_core *dev)
tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, 0x28);
tm6000_set_reg(dev, TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xfc);
tm6000_set_reg(dev, TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0xff);
- tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe);
tm6000_read_write_usb(dev, 0xc0, 0x0e, 0x00c2, 0x0008, buf, 2);
printk(KERN_INFO"buf %#x %#x\n", buf[0], buf[1]);
} else {
@@ -660,7 +636,6 @@ void tm6000_add_into_devlist(struct tm6000_core *dev)
*/
static LIST_HEAD(tm6000_extension_devlist);
-static DEFINE_MUTEX(tm6000_extension_devlist_lock);
int tm6000_call_fillbuf(struct tm6000_core *dev, enum tm6000_ops_type type,
char *buf, int size)
@@ -684,14 +659,12 @@ int tm6000_register_extension(struct tm6000_ops *ops)
struct tm6000_core *dev = NULL;
mutex_lock(&tm6000_devlist_mutex);
- mutex_lock(&tm6000_extension_devlist_lock);
list_add_tail(&ops->next, &tm6000_extension_devlist);
list_for_each_entry(dev, &tm6000_devlist, devlist) {
ops->init(dev);
printk(KERN_INFO "%s: Initialized (%s) extension\n",
dev->name, ops->name);
}
- mutex_unlock(&tm6000_extension_devlist_lock);
mutex_unlock(&tm6000_devlist_mutex);
return 0;
}
@@ -707,10 +680,8 @@ void tm6000_unregister_extension(struct tm6000_ops *ops)
ops->fini(dev);
}
- mutex_lock(&tm6000_extension_devlist_lock);
printk(KERN_INFO "tm6000: Remove (%s) extension\n", ops->name);
list_del(&ops->next);
- mutex_unlock(&tm6000_extension_devlist_lock);
mutex_unlock(&tm6000_devlist_mutex);
}
EXPORT_SYMBOL(tm6000_unregister_extension);
@@ -719,26 +690,26 @@ void tm6000_init_extension(struct tm6000_core *dev)
{
struct tm6000_ops *ops = NULL;
- mutex_lock(&tm6000_extension_devlist_lock);
+ mutex_lock(&tm6000_devlist_mutex);
if (!list_empty(&tm6000_extension_devlist)) {
list_for_each_entry(ops, &tm6000_extension_devlist, next) {
if (ops->init)
ops->init(dev);
}
}
- mutex_unlock(&tm6000_extension_devlist_lock);
+ mutex_unlock(&tm6000_devlist_mutex);
}
void tm6000_close_extension(struct tm6000_core *dev)
{
struct tm6000_ops *ops = NULL;
- mutex_lock(&tm6000_extension_devlist_lock);
+ mutex_lock(&tm6000_devlist_mutex);
if (!list_empty(&tm6000_extension_devlist)) {
list_for_each_entry(ops, &tm6000_extension_devlist, next) {
if (ops->fini)
ops->fini(dev);
}
}
- mutex_unlock(&tm6000_extension_devlist_lock);
+ mutex_lock(&tm6000_devlist_mutex);
}
diff --git a/drivers/staging/tm6000/tm6000-dvb.c b/drivers/staging/tm6000/tm6000-dvb.c
index f501edccf9c4..ff04c89e45a3 100644
--- a/drivers/staging/tm6000/tm6000-dvb.c
+++ b/drivers/staging/tm6000/tm6000-dvb.c
@@ -1,20 +1,20 @@
/*
- tm6000-dvb.c - dvb-t support for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-dvb.c - dvb-t support for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/staging/tm6000/tm6000-i2c.c
index 79bc67f0311f..3e46866dd279 100644
--- a/drivers/staging/tm6000/tm6000-i2c.c
+++ b/drivers/staging/tm6000/tm6000-i2c.c
@@ -1,23 +1,23 @@
/*
- tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
-
- Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
- - Fix SMBus Read Byte command
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ * - Fix SMBus Read Byte command
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -32,11 +32,9 @@
#include "tuner-xc2028.h"
-/*FIXME: Hack to avoid needing to patch i2c-id.h */
-#define I2C_HW_B_TM6000 I2C_HW_B_EM28XX
/* ----------------------------------------------------------- */
-static unsigned int i2c_debug = 0;
+static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
@@ -324,7 +322,6 @@ static struct i2c_adapter tm6000_adap_template = {
.owner = THIS_MODULE,
.class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL,
.name = "tm6000",
- .id = I2C_HW_B_TM6000,
.algo = &tm6000_algo,
};
diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/staging/tm6000/tm6000-input.c
index 54f7667cc706..6022caaa739b 100644
--- a/drivers/staging/tm6000/tm6000-input.c
+++ b/drivers/staging/tm6000/tm6000-input.c
@@ -1,20 +1,20 @@
/*
- tm6000-input.c - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2010 Stefan Ringel <stefan.ringel@arcor.de>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-input.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2010 Stefan Ringel <stefan.ringel@arcor.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(ir_debug, "enable debug message [IR]");
static unsigned int enable_ir = 1;
module_param(enable_ir, int, 0644);
-MODULE_PARM_DESC(enable_ir, "enable ir (default is enable");
+MODULE_PARM_DESC(enable_ir, "enable ir (default is enable)");
#undef dprintk
diff --git a/drivers/staging/tm6000/tm6000-regs.h b/drivers/staging/tm6000/tm6000-regs.h
index 1c5289c971fa..1f0ced8fa20f 100644
--- a/drivers/staging/tm6000/tm6000-regs.h
+++ b/drivers/staging/tm6000/tm6000-regs.h
@@ -1,20 +1,20 @@
/*
- tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/staging/tm6000/tm6000-stds.c b/drivers/staging/tm6000/tm6000-stds.c
index 6bf4a73b320d..cc7b8664fc20 100644
--- a/drivers/staging/tm6000/tm6000-stds.c
+++ b/drivers/staging/tm6000/tm6000-stds.c
@@ -1,20 +1,20 @@
/*
- tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -28,21 +28,37 @@ struct tm6000_reg_settings {
unsigned char value;
};
+enum tm6000_audio_std {
+ BG_NICAM,
+ BTSC,
+ BG_A2,
+ DK_NICAM,
+ EIAJ,
+ FM_RADIO,
+ I_NICAM,
+ KOREA_A2,
+ L_NICAM,
+};
+
struct tm6000_std_tv_settings {
v4l2_std_id id;
+ enum tm6000_audio_std audio_default_std;
+
struct tm6000_reg_settings sif[12];
struct tm6000_reg_settings nosif[12];
- struct tm6000_reg_settings common[25];
+ struct tm6000_reg_settings common[26];
};
struct tm6000_std_settings {
v4l2_std_id id;
+ enum tm6000_audio_std audio_default_std;
struct tm6000_reg_settings common[37];
};
static struct tm6000_std_tv_settings tv_stds[] = {
{
.id = V4L2_STD_PAL_M,
+ .audio_default_std = BTSC,
.sif = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
@@ -96,11 +112,14 @@ static struct tm6000_std_tv_settings tv_stds[] = {
{TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
{TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+
{TM6010_REQ07_R3F_RESET, 0x00},
+
{0, 0, 0},
},
}, {
.id = V4L2_STD_PAL_Nc,
+ .audio_default_std = BTSC,
.sif = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
@@ -154,11 +173,14 @@ static struct tm6000_std_tv_settings tv_stds[] = {
{TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
{TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+
{TM6010_REQ07_R3F_RESET, 0x00},
+
{0, 0, 0},
},
}, {
.id = V4L2_STD_PAL,
+ .audio_default_std = BG_A2,
.sif = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
@@ -212,11 +234,73 @@ static struct tm6000_std_tv_settings tv_stds[] = {
{TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
{TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+
{TM6010_REQ07_R3F_RESET, 0x00},
+
{0, 0, 0},
},
}, {
- .id = V4L2_STD_SECAM,
+ .id = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G,
+ .audio_default_std = BG_NICAM,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM_DK,
+ .audio_default_std = DK_NICAM,
.sif = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
@@ -269,11 +353,13 @@ static struct tm6000_std_tv_settings tv_stds[] = {
{TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
{TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+
{TM6010_REQ07_R3F_RESET, 0x00},
{0, 0, 0},
},
}, {
.id = V4L2_STD_NTSC,
+ .audio_default_std = BTSC,
.sif = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
@@ -327,7 +413,9 @@ static struct tm6000_std_tv_settings tv_stds[] = {
{TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
{TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+
{TM6010_REQ07_R3F_RESET, 0x00},
+
{0, 0, 0},
},
},
@@ -336,6 +424,7 @@ static struct tm6000_std_tv_settings tv_stds[] = {
static struct tm6000_std_settings composite_stds[] = {
{
.id = V4L2_STD_PAL_M,
+ .audio_default_std = BTSC,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
@@ -378,6 +467,7 @@ static struct tm6000_std_settings composite_stds[] = {
},
}, {
.id = V4L2_STD_PAL_Nc,
+ .audio_default_std = BTSC,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
@@ -420,6 +510,7 @@ static struct tm6000_std_settings composite_stds[] = {
},
}, {
.id = V4L2_STD_PAL,
+ .audio_default_std = BG_A2,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
@@ -462,6 +553,49 @@ static struct tm6000_std_settings composite_stds[] = {
},
}, {
.id = V4L2_STD_SECAM,
+ .audio_default_std = BG_NICAM,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM_DK,
+ .audio_default_std = DK_NICAM,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
@@ -503,6 +637,7 @@ static struct tm6000_std_settings composite_stds[] = {
},
}, {
.id = V4L2_STD_NTSC,
+ .audio_default_std = BTSC,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
@@ -549,6 +684,7 @@ static struct tm6000_std_settings composite_stds[] = {
static struct tm6000_std_settings svideo_stds[] = {
{
.id = V4L2_STD_PAL_M,
+ .audio_default_std = BTSC,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
@@ -591,6 +727,7 @@ static struct tm6000_std_settings svideo_stds[] = {
},
}, {
.id = V4L2_STD_PAL_Nc,
+ .audio_default_std = BTSC,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
@@ -633,6 +770,7 @@ static struct tm6000_std_settings svideo_stds[] = {
},
}, {
.id = V4L2_STD_PAL,
+ .audio_default_std = BG_A2,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
@@ -675,6 +813,49 @@ static struct tm6000_std_settings svideo_stds[] = {
},
}, {
.id = V4L2_STD_SECAM,
+ .audio_default_std = BG_NICAM,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x39},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM_DK,
+ .audio_default_std = DK_NICAM,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
@@ -716,6 +897,7 @@ static struct tm6000_std_settings svideo_stds[] = {
},
}, {
.id = V4L2_STD_NTSC,
+ .audio_default_std = BTSC,
.common = {
{TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
{TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
@@ -760,6 +942,133 @@ static struct tm6000_std_settings svideo_stds[] = {
},
};
+
+static int tm6000_set_audio_std(struct tm6000_core *dev,
+ enum tm6000_audio_std std)
+{
+ uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */
+ uint8_t areg_05 = 0x09; /* Auto 4.5 = M Japan, Auto 6.5 = DK */
+ uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */
+ uint8_t mono_flag = 0; /* No mono */
+ uint8_t nicam_flag = 0; /* No NICAM */
+
+ switch (std) {
+#if 0
+ case DK_MONO:
+ mono_flag = 1;
+ break;
+ case DK_A2_1:
+ break;
+ case DK_A2_3:
+ areg_05 = 0x0b;
+ break;
+ case BG_MONO:
+ mono_flag = 1;
+ areg_05 = 0x05;
+ break;
+#endif
+ case BG_NICAM:
+ areg_05 = 0x07;
+ nicam_flag = 1;
+ break;
+ case BTSC:
+ areg_05 = 0x02;
+ break;
+ case BG_A2:
+ areg_05 = 0x05;
+ break;
+ case DK_NICAM:
+ areg_05 = 0x06;
+ nicam_flag = 1;
+ break;
+ case EIAJ:
+ areg_05 = 0x02;
+ break;
+ case FM_RADIO:
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, 0x04);
+ tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, 0x0c);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x18);
+ tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91);
+ tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
+ return 0;
+ break;
+ case I_NICAM:
+ areg_05 = 0x08;
+ nicam_flag = 1;
+ break;
+ case KOREA_A2:
+ areg_05 = 0x04;
+ break;
+ case L_NICAM:
+ areg_02 = 0x02; /* GC1 Fixed gain +12dB */
+ areg_05 = 0x0a;
+ nicam_flag = 1;
+ break;
+ }
+
+#if 0
+ switch (tv_audio_mode) {
+ case TV_MONO:
+ areg_06 = (nicam_flag) ? 0x03 : 0x00;
+ break;
+ case TV_LANG_A:
+ areg_06 = 0x00;
+ break;
+ case TV_LANG_B:
+ areg_06 = 0x01;
+ break;
+ }
+#endif
+
+ if (mono_flag)
+ areg_06 = 0x00;
+
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, areg_02);
+ tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, areg_05);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, areg_06);
+ tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91);
+ tm6000_set_reg(dev, TM6010_REQ08_R0B_A_ASD_THRES1, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0E_A_MONO_THRES1, 0xf0);
+ tm6000_set_reg(dev, TM6010_REQ08_R0F_A_MONO_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R10_A_MUTE_THRES1, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ08_R11_A_MUTE_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R12_A_AGC_U, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R13_A_AGC_ERR_T, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R14_A_AGC_GAIN_INIT, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R15_A_AGC_STEP_THR, 0x14);
+ tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ08_R18_A_TR_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R19_A_FH_2FH_GAIN, 0x32);
+ tm6000_set_reg(dev, TM6010_REQ08_R1A_A_NICAM_SER_MAX, 0x64);
+ tm6000_set_reg(dev, TM6010_REQ08_R1B_A_NICAM_SER_MIN, 0x20);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1c, 0x00);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1d, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
+ tm6000_set_reg(dev, TM6010_REQ08_R1F_A_TEST_INTF_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R20_A_TEST_PIN_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc);
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
+
+ return 0;
+}
+
void tm6000_get_std_res(struct tm6000_core *dev)
{
/* Currently, those are the only supported resoltions */
@@ -820,6 +1129,8 @@ static int tm6000_set_tv(struct tm6000_core *dev, int pos)
rc = tm6000_load_std(dev, tv_stds[pos].common,
sizeof(tv_stds[pos].common));
+ tm6000_set_audio_std(dev, tv_stds[pos].audio_default_std);
+
return rc;
}
@@ -845,6 +1156,8 @@ int tm6000_set_standard(struct tm6000_core *dev, v4l2_std_id * norm)
rc = tm6000_load_std(dev, svideo_stds[i].common,
sizeof(svideo_stds[i].
common));
+ tm6000_set_audio_std(dev, svideo_stds[i].audio_default_std);
+
goto ret;
}
}
@@ -856,6 +1169,7 @@ int tm6000_set_standard(struct tm6000_core *dev, v4l2_std_id * norm)
composite_stds[i].common,
sizeof(composite_stds[i].
common));
+ tm6000_set_audio_std(dev, composite_stds[i].audio_default_std);
goto ret;
}
}
diff --git a/drivers/staging/tm6000/tm6000-usb-isoc.h b/drivers/staging/tm6000/tm6000-usb-isoc.h
index 138716a8f056..a9e61d95a9b2 100644
--- a/drivers/staging/tm6000/tm6000-usb-isoc.h
+++ b/drivers/staging/tm6000/tm6000-usb-isoc.h
@@ -1,20 +1,20 @@
/*
- tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/videodev2.h>
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index ce0a089a0771..9ec82796634e 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -1,23 +1,23 @@
/*
- tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
-
- Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
- - Fixed module load/unload
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ * - Fixed module load/unload
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/delay.h>
@@ -118,8 +118,9 @@ static struct tm6000_fmt format[] = {
};
/* ------------------------------------------------------------------
- DMA and thread functions
- ------------------------------------------------------------------*/
+ * DMA and thread functions
+ * ------------------------------------------------------------------
+ */
#define norm_maxw(a) 720
#define norm_maxh(a) 576
@@ -189,17 +190,17 @@ static int copy_streams(u8 *data, unsigned long len,
struct urb *urb)
{
struct tm6000_dmaqueue *dma_q = urb->context;
- struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
- u8 *ptr=data, *endp=data+len, c;
- unsigned long header=0;
- int rc=0;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ u8 *ptr = data, *endp = data+len, c;
+ unsigned long header = 0;
+ int rc = 0;
unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0;
struct tm6000_buffer *vbuf;
char *voutp = NULL;
unsigned int linewidth;
/* get video buffer */
- get_next_buf (dma_q, &vbuf);
+ get_next_buf(dma_q, &vbuf);
if (!vbuf)
return rc;
voutp = videobuf_to_vmalloc(&vbuf->vb);
@@ -213,7 +214,7 @@ static int copy_streams(u8 *data, unsigned long len,
/* from last urb or packet */
header = dev->isoc_ctl.tmp_buf;
if (4 - dev->isoc_ctl.tmp_buf_len > 0) {
- memcpy ((u8 *)&header +
+ memcpy((u8 *)&header +
dev->isoc_ctl.tmp_buf_len,
ptr,
4 - dev->isoc_ctl.tmp_buf_len);
@@ -224,7 +225,7 @@ static int copy_streams(u8 *data, unsigned long len,
if (ptr + 3 >= endp) {
/* have incomplete header */
dev->isoc_ctl.tmp_buf_len = endp - ptr;
- memcpy (&dev->isoc_ctl.tmp_buf, ptr,
+ memcpy(&dev->isoc_ctl.tmp_buf, ptr,
dev->isoc_ctl.tmp_buf_len);
return rc;
}
@@ -261,13 +262,13 @@ static int copy_streams(u8 *data, unsigned long len,
/* Announces that a new buffer
* were filled
*/
- buffer_filled (dev, dma_q, vbuf);
- dprintk (dev, V4L2_DEBUG_ISOC,
+ buffer_filled(dev, dma_q, vbuf);
+ dprintk(dev, V4L2_DEBUG_ISOC,
"new buffer filled\n");
- get_next_buf (dma_q, &vbuf);
+ get_next_buf(dma_q, &vbuf);
if (!vbuf)
return rc;
- voutp = videobuf_to_vmalloc (&vbuf->vb);
+ voutp = videobuf_to_vmalloc(&vbuf->vb);
if (!voutp)
return rc;
memset(voutp, 0, vbuf->vb.size);
@@ -301,9 +302,17 @@ static int copy_streams(u8 *data, unsigned long len,
case TM6000_URB_MSG_VIDEO:
/* Fills video buffer */
if (vbuf)
- memcpy (&voutp[pos], ptr, cpysize);
+ memcpy(&voutp[pos], ptr, cpysize);
break;
case TM6000_URB_MSG_AUDIO:
+ /* Need some code to copy audio buffer */
+ if (dev->fourcc == V4L2_PIX_FMT_YUYV) {
+ /* Swap word bytes */
+ int i;
+
+ for (i = 0; i < cpysize; i += 2)
+ swab16s((u16 *)(ptr + i));
+ }
tm6000_call_fillbuf(dev, TM6000_AUDIO, ptr, cpysize);
break;
case TM6000_URB_MSG_VBI:
@@ -338,9 +347,9 @@ static int copy_multiplexed(u8 *ptr, unsigned long len,
struct urb *urb)
{
struct tm6000_dmaqueue *dma_q = urb->context;
- struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
- unsigned int pos=dev->isoc_ctl.pos,cpysize;
- int rc=1;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ unsigned int pos = dev->isoc_ctl.pos, cpysize;
+ int rc = 1;
struct tm6000_buffer *buf;
char *outp = NULL;
@@ -351,19 +360,18 @@ static int copy_multiplexed(u8 *ptr, unsigned long len,
if (!outp)
return 0;
- while (len>0) {
- cpysize=min(len,buf->vb.size-pos);
- //printk("Copying %d bytes (max=%lu) from %p to %p[%u]\n",cpysize,(*buf)->vb.size,ptr,out_p,pos);
+ while (len > 0) {
+ cpysize = min(len, buf->vb.size-pos);
memcpy(&outp[pos], ptr, cpysize);
- pos+=cpysize;
- ptr+=cpysize;
- len-=cpysize;
+ pos += cpysize;
+ ptr += cpysize;
+ len -= cpysize;
if (pos >= buf->vb.size) {
- pos=0;
+ pos = 0;
/* Announces that a new buffer were filled */
- buffer_filled (dev, dma_q, buf);
+ buffer_filled(dev, dma_q, buf);
dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n");
- get_next_buf (dma_q, &buf);
+ get_next_buf(dma_q, &buf);
if (!buf)
break;
outp = videobuf_to_vmalloc(&(buf->vb));
@@ -373,16 +381,16 @@ static int copy_multiplexed(u8 *ptr, unsigned long len,
}
}
- dev->isoc_ctl.pos=pos;
+ dev->isoc_ctl.pos = pos;
return rc;
}
-static void inline print_err_status (struct tm6000_core *dev,
+static inline void print_err_status(struct tm6000_core *dev,
int packet, int status)
{
char *errmsg = "Unknown";
- switch(status) {
+ switch (status) {
case -ENOENT:
errmsg = "unlinked synchronuously";
break;
@@ -408,7 +416,7 @@ static void inline print_err_status (struct tm6000_core *dev,
errmsg = "Device does not respond";
break;
}
- if (packet<0) {
+ if (packet < 0) {
dprintk(dev, V4L2_DEBUG_QUEUE, "URB status %d [%s].\n",
status, errmsg);
} else {
@@ -424,20 +432,20 @@ static void inline print_err_status (struct tm6000_core *dev,
static inline int tm6000_isoc_copy(struct urb *urb)
{
struct tm6000_dmaqueue *dma_q = urb->context;
- struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
- int i, len=0, rc=1, status;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ int i, len = 0, rc = 1, status;
char *p;
if (urb->status < 0) {
- print_err_status (dev, -1, urb->status);
+ print_err_status(dev, -1, urb->status);
return 0;
}
for (i = 0; i < urb->number_of_packets; i++) {
status = urb->iso_frame_desc[i].status;
- if (status<0) {
- print_err_status (dev,i,status);
+ if (status < 0) {
+ print_err_status(dev, i, status);
continue;
}
@@ -446,9 +454,9 @@ static inline int tm6000_isoc_copy(struct urb *urb)
if (len > 0) {
p = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
if (!urb->iso_frame_desc[i].status) {
- if ((dev->fourcc)==V4L2_PIX_FMT_TM6000) {
- rc=copy_multiplexed(p, len, urb);
- if (rc<=0)
+ if ((dev->fourcc) == V4L2_PIX_FMT_TM6000) {
+ rc = copy_multiplexed(p, len, urb);
+ if (rc <= 0)
return rc;
} else {
copy_streams(p, len, urb);
@@ -460,8 +468,9 @@ static inline int tm6000_isoc_copy(struct urb *urb)
}
/* ------------------------------------------------------------------
- URB control
- ------------------------------------------------------------------*/
+ * URB control
+ * ------------------------------------------------------------------
+ */
/*
* IRQ callback, called by URB callback
@@ -501,7 +510,7 @@ static void tm6000_uninit_isoc(struct tm6000_core *dev)
dev->isoc_ctl.buf = NULL;
for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
- urb=dev->isoc_ctl.urb[i];
+ urb = dev->isoc_ctl.urb[i];
if (urb) {
usb_kill_urb(urb);
usb_unlink_urb(urb);
@@ -517,11 +526,11 @@ static void tm6000_uninit_isoc(struct tm6000_core *dev)
dev->isoc_ctl.transfer_buffer[i] = NULL;
}
- kfree (dev->isoc_ctl.urb);
- kfree (dev->isoc_ctl.transfer_buffer);
+ kfree(dev->isoc_ctl.urb);
+ kfree(dev->isoc_ctl.transfer_buffer);
- dev->isoc_ctl.urb=NULL;
- dev->isoc_ctl.transfer_buffer=NULL;
+ dev->isoc_ctl.urb = NULL;
+ dev->isoc_ctl.transfer_buffer = NULL;
dev->isoc_ctl.num_bufs = 0;
}
@@ -552,7 +561,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev, unsigned int framesize)
dev->isoc_ctl.max_pkt_size = size;
- max_packets = ( framesize + size - 1) / size;
+ max_packets = (framesize + size - 1) / size;
if (max_packets > TM6000_MAX_ISO_PACKETS)
max_packets = TM6000_MAX_ISO_PACKETS;
@@ -594,10 +603,10 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev, unsigned int framesize)
dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
- tm6000_err ("unable to allocate %i bytes for transfer"
+ tm6000_err("unable to allocate %i bytes for transfer"
" buffer %i%s\n",
sb_size, i,
- in_interrupt()?" while in int":"");
+ in_interrupt() ? " while in int" : "");
tm6000_uninit_isoc(dev);
return -ENOMEM;
}
@@ -619,13 +628,13 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev, unsigned int framesize)
return 0;
}
-static int tm6000_start_thread( struct tm6000_core *dev)
+static int tm6000_start_thread(struct tm6000_core *dev)
{
struct tm6000_dmaqueue *dma_q = &dev->vidq;
int i;
- dma_q->frame=0;
- dma_q->ini_jiffies=jiffies;
+ dma_q->frame = 0;
+ dma_q->ini_jiffies = jiffies;
init_waitqueue_head(&dma_q->wq);
@@ -644,8 +653,9 @@ static int tm6000_start_thread( struct tm6000_core *dev)
}
/* ------------------------------------------------------------------
- Videobuf operations
- ------------------------------------------------------------------*/
+ * Videobuf operations
+ * ------------------------------------------------------------------
+ */
static int
buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
@@ -656,9 +666,8 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
if (0 == *count)
*count = TM6000_DEF_BUF;
- if (*count < TM6000_MIN_BUF) {
- *count=TM6000_MIN_BUF;
- }
+ if (*count < TM6000_MIN_BUF)
+ *count = TM6000_MIN_BUF;
while (*size * *count > vid_limit * 1024 * 1024)
(*count)--;
@@ -698,7 +707,7 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct tm6000_fh *fh = vq->priv_data;
- struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb);
struct tm6000_core *dev = fh->dev;
int rc = 0, urb_init = 0;
@@ -753,7 +762,7 @@ fail:
static void
buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
- struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb);
struct tm6000_fh *fh = vq->priv_data;
struct tm6000_core *dev = fh->dev;
struct tm6000_dmaqueue *vidq = &dev->vidq;
@@ -764,9 +773,9 @@ buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
- struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb);
- free_buffer(vq,buf);
+ free_buffer(vq, buf);
}
static struct videobuf_queue_ops tm6000_video_qops = {
@@ -777,49 +786,66 @@ static struct videobuf_queue_ops tm6000_video_qops = {
};
/* ------------------------------------------------------------------
- IOCTL handling
- ------------------------------------------------------------------*/
+ * IOCTL handling
+ * ------------------------------------------------------------------
+ */
-static int res_get(struct tm6000_core *dev, struct tm6000_fh *fh)
+static bool is_res_read(struct tm6000_core *dev, struct tm6000_fh *fh)
{
- /* is it free? */
- mutex_lock(&dev->lock);
- if (dev->resources) {
- /* no, someone else uses it */
- mutex_unlock(&dev->lock);
- return 0;
- }
- /* it's free, grab it */
- dev->resources =1;
- dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: get\n");
- mutex_unlock(&dev->lock);
- return 1;
+ /* Is the current fh handling it? if so, that's OK */
+ if (dev->resources == fh && dev->is_res_read)
+ return true;
+
+ return false;
+}
+
+static bool is_res_streaming(struct tm6000_core *dev, struct tm6000_fh *fh)
+{
+ /* Is the current fh handling it? if so, that's OK */
+ if (dev->resources == fh)
+ return true;
+
+ return false;
}
-static int res_locked(struct tm6000_core *dev)
+static bool res_get(struct tm6000_core *dev, struct tm6000_fh *fh,
+ bool is_res_read)
{
- return (dev->resources);
+ /* Is the current fh handling it? if so, that's OK */
+ if (dev->resources == fh && dev->is_res_read == is_res_read)
+ return true;
+
+ /* is it free? */
+ if (dev->resources)
+ return false;
+
+ /* grab it */
+ dev->resources = fh;
+ dev->is_res_read = is_res_read;
+ dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: get\n");
+ return true;
}
static void res_free(struct tm6000_core *dev, struct tm6000_fh *fh)
{
- mutex_lock(&dev->lock);
- dev->resources = 0;
+ /* Is the current fh handling it? if so, that's OK */
+ if (dev->resources != fh)
+ return;
+
+ dev->resources = NULL;
dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: put\n");
- mutex_unlock(&dev->lock);
}
/* ------------------------------------------------------------------
- IOCTL vidioc handling
- ------------------------------------------------------------------*/
-static int vidioc_querycap (struct file *file, void *priv,
+ * IOCTL vidioc handling
+ * ------------------------------------------------------------------
+ */
+static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- // struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
- strlcpy(cap->card,"Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
- // strlcpy(cap->bus_info, dev->udev->dev.bus_id, sizeof(cap->bus_info));
+ strlcpy(cap->card, "Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
cap->version = TM6000_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
@@ -828,21 +854,21 @@ static int vidioc_querycap (struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(format)))
return -EINVAL;
- strlcpy(f->description,format[f->index].name,sizeof(f->description));
+ strlcpy(f->description, format[f->index].name, sizeof(f->description));
f->pixelformat = format[f->index].fourcc;
return 0;
}
-static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
f->fmt.pix.width = fh->width;
f->fmt.pix.height = fh->height;
@@ -853,10 +879,10 @@ static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
- return (0);
+ return 0;
}
-static struct tm6000_fmt* format_by_fourcc(unsigned int fourcc)
+static struct tm6000_fmt *format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
@@ -866,7 +892,7 @@ static struct tm6000_fmt* format_by_fourcc(unsigned int fourcc)
return NULL;
}
-static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
@@ -882,15 +908,14 @@ static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
field = f->fmt.pix.field;
- if (field == V4L2_FIELD_ANY) {
-// field=V4L2_FIELD_INTERLACED;
- field=V4L2_FIELD_SEQ_TB;
- } else if (V4L2_FIELD_INTERLACED != field) {
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_SEQ_TB;
+ else if (V4L2_FIELD_INTERLACED != field) {
dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n");
return -EINVAL;
}
- tm6000_get_std_res (dev);
+ tm6000_get_std_res(dev);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
@@ -908,14 +933,14 @@ static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
}
/*FIXME: This seems to be generic enough to be at videodev2 */
-static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- int ret = vidioc_try_fmt_vid_cap(file,fh,f);
+ int ret = vidioc_try_fmt_vid_cap(file, fh, f);
if (ret < 0)
- return (ret);
+ return ret;
fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
fh->width = f->fmt.pix.width;
@@ -927,52 +952,52 @@ static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
tm6000_set_fourcc_format(dev);
- return (0);
+ return 0;
}
-static int vidioc_reqbufs (struct file *file, void *priv,
+static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
- return (videobuf_reqbufs(&fh->vb_vidq, p));
+ return videobuf_reqbufs(&fh->vb_vidq, p);
}
-static int vidioc_querybuf (struct file *file, void *priv,
+static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
- return (videobuf_querybuf(&fh->vb_vidq, p));
+ return videobuf_querybuf(&fh->vb_vidq, p);
}
-static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
- return (videobuf_qbuf(&fh->vb_vidq, p));
+ return videobuf_qbuf(&fh->vb_vidq, p);
}
-static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
- return (videobuf_dqbuf(&fh->vb_vidq, p,
- file->f_flags & O_NONBLOCK));
+ return videobuf_dqbuf(&fh->vb_vidq, p,
+ file->f_flags & O_NONBLOCK);
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
-static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf)
+static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
- return videobuf_cgmbuf (&fh->vb_vidq, mbuf, 8);
+ return videobuf_cgmbuf(&fh->vb_vidq, mbuf, 8);
}
#endif
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -980,7 +1005,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- if (!res_get(dev,fh))
+ if (!res_get(dev, fh, false))
return -EBUSY;
return (videobuf_streamon(&fh->vb_vidq));
}
@@ -1007,7 +1032,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
struct tm6000_fh *fh=priv;
struct tm6000_core *dev = fh->dev;
- rc=tm6000_set_standard (dev, norm);
+ rc = tm6000_init_analog_mode(dev);
fh->width = dev->width;
fh->height = dev->height;
@@ -1020,21 +1045,21 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
return 0;
}
-static int vidioc_enum_input (struct file *file, void *priv,
+static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
switch (inp->index) {
case TM6000_INPUT_TV:
inp->type = V4L2_INPUT_TYPE_TUNER;
- strcpy(inp->name,"Television");
+ strcpy(inp->name, "Television");
break;
case TM6000_INPUT_COMPOSITE:
inp->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(inp->name,"Composite");
+ strcpy(inp->name, "Composite");
break;
case TM6000_INPUT_SVIDEO:
inp->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(inp->name,"S-Video");
+ strcpy(inp->name, "S-Video");
break;
default:
return -EINVAL;
@@ -1044,48 +1069,48 @@ static int vidioc_enum_input (struct file *file, void *priv,
return 0;
}
-static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- *i=dev->input;
+ *i = dev->input;
return 0;
}
-static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- int rc=0;
+ int rc = 0;
char buf[1];
switch (i) {
case TM6000_INPUT_TV:
- dev->input=i;
- *buf=0;
+ dev->input = i;
+ *buf = 0;
break;
case TM6000_INPUT_COMPOSITE:
case TM6000_INPUT_SVIDEO:
- dev->input=i;
- *buf=1;
+ dev->input = i;
+ *buf = 1;
break;
default:
return -EINVAL;
}
- rc=tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR,
+ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR,
REQ_03_SET_GET_MCU_PIN, 0x03, 1, buf, 1);
if (!rc) {
- dev->input=i;
- rc=vidioc_s_std (file, priv, &dev->vfd->current_norm);
+ dev->input = i;
+ rc = vidioc_s_std(file, priv, &dev->vfd->current_norm);
}
- return (rc);
+ return rc;
}
/* --- controls ---------------------------------------------- */
-static int vidioc_queryctrl (struct file *file, void *priv,
+static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
int i;
@@ -1094,16 +1119,16 @@ static int vidioc_queryctrl (struct file *file, void *priv,
if (qc->id && qc->id == tm6000_qctrl[i].id) {
memcpy(qc, &(tm6000_qctrl[i]),
sizeof(*qc));
- return (0);
+ return 0;
}
return -EINVAL;
}
-static int vidioc_g_ctrl (struct file *file, void *priv,
+static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
int val;
@@ -1125,41 +1150,41 @@ static int vidioc_g_ctrl (struct file *file, void *priv,
return -EINVAL;
}
- if (val<0)
+ if (val < 0)
return val;
- ctrl->value=val;
+ ctrl->value = val;
return 0;
}
-static int vidioc_s_ctrl (struct file *file, void *priv,
+static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct tm6000_fh *fh =priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- u8 val=ctrl->value;
+ u8 val = ctrl->value;
switch (ctrl->id) {
case V4L2_CID_CONTRAST:
- tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
+ tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
return 0;
case V4L2_CID_BRIGHTNESS:
- tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
+ tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
return 0;
case V4L2_CID_SATURATION:
- tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
+ tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
return 0;
case V4L2_CID_HUE:
- tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
+ tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
return 0;
}
return -EINVAL;
}
-static int vidioc_g_tuner (struct file *file, void *priv,
+static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct tm6000_fh *fh =priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
if (unlikely(UNSET == dev->tuner_type))
@@ -1176,10 +1201,10 @@ static int vidioc_g_tuner (struct file *file, void *priv,
return 0;
}
-static int vidioc_s_tuner (struct file *file, void *priv,
+static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct tm6000_fh *fh =priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
if (UNSET == dev->tuner_type)
@@ -1190,10 +1215,10 @@ static int vidioc_s_tuner (struct file *file, void *priv,
return 0;
}
-static int vidioc_g_frequency (struct file *file, void *priv,
+static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct tm6000_fh *fh =priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
if (unlikely(UNSET == dev->tuner_type))
@@ -1207,10 +1232,10 @@ static int vidioc_g_frequency (struct file *file, void *priv,
return 0;
}
-static int vidioc_s_frequency (struct file *file, void *priv,
+static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct tm6000_fh *fh =priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
if (unlikely(f->type != V4L2_TUNER_ANALOG_TV))
@@ -1221,10 +1246,8 @@ static int vidioc_s_frequency (struct file *file, void *priv,
if (unlikely(f->tuner != 0))
return -EINVAL;
-// mutex_lock(&dev->lock);
dev->freq = f->frequency;
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
-// mutex_unlock(&dev->lock);
return 0;
}
@@ -1239,7 +1262,7 @@ static int tm6000_open(struct file *file)
struct tm6000_core *dev = video_drvdata(file);
struct tm6000_fh *fh;
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- int i,rc;
+ int i, rc;
printk(KERN_INFO "tm6000: open called (dev=%s)\n",
video_device_node_name(vdev));
@@ -1256,7 +1279,7 @@ static int tm6000_open(struct file *file)
dev->users);
/* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh),GFP_KERNEL);
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (NULL == fh) {
dev->users--;
return -ENOMEM;
@@ -1284,23 +1307,23 @@ static int tm6000_open(struct file *file)
"active=%d\n",list_empty(&dev->vidq.active));
/* initialize hardware on analog mode */
- if (dev->mode!=TM6000_MODE_ANALOG) {
- rc=tm6000_init_analog_mode (dev);
- if (rc<0)
- return rc;
+ rc = tm6000_init_analog_mode(dev);
+ if (rc < 0)
+ return rc;
+ if (dev->mode != TM6000_MODE_ANALOG) {
/* Put all controls at a sane state */
for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
- qctl_regs[i] =tm6000_qctrl[i].default_value;
+ qctl_regs[i] = tm6000_qctrl[i].default_value;
- dev->mode=TM6000_MODE_ANALOG;
+ dev->mode = TM6000_MODE_ANALOG;
}
videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
- sizeof(struct tm6000_buffer),fh);
+ sizeof(struct tm6000_buffer), fh, &dev->lock);
return 0;
}
@@ -1311,7 +1334,7 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
struct tm6000_fh *fh = file->private_data;
if (fh->type==V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (res_locked(fh->dev))
+ if (!res_get(fh->dev, fh, true))
return -EBUSY;
return videobuf_read_stream(&fh->vb_vidq, data, count, pos, 0,
@@ -1329,7 +1352,10 @@ tm6000_poll(struct file *file, struct poll_table_struct *wait)
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
- if (res_get(fh->dev,fh)) {
+ if (!!is_res_streaming(fh->dev, fh))
+ return POLLERR;
+
+ if (!is_res_read(fh->dev, fh)) {
/* streaming capture */
if (list_empty(&fh->vb_vidq.stream))
return POLLERR;
@@ -1342,7 +1368,7 @@ tm6000_poll(struct file *file, struct poll_table_struct *wait)
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
- return POLLIN|POLLRDNORM;
+ return POLLIN | POLLRDNORM;
return 0;
}
@@ -1357,12 +1383,13 @@ static int tm6000_release(struct file *file)
dev->users--;
+ res_free(dev, fh);
if (!dev->users) {
tm6000_uninit_isoc(dev);
videobuf_mmap_free(&fh->vb_vidq);
}
- kfree (fh);
+ kfree(fh);
return 0;
}
@@ -1372,7 +1399,7 @@ static int tm6000_mmap(struct file *file, struct vm_area_struct * vma)
struct tm6000_fh *fh = file->private_data;
int ret;
- ret=videobuf_mmap_mapper(&fh->vb_vidq, vma);
+ ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
return ret;
}
@@ -1381,7 +1408,7 @@ static struct v4l2_file_operations tm6000_fops = {
.owner = THIS_MODULE,
.open = tm6000_open,
.release = tm6000_release,
- .ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.read = tm6000_read,
.poll = tm6000_poll,
.mmap = tm6000_mmap,
@@ -1425,8 +1452,9 @@ static struct video_device tm6000_template = {
};
/* -----------------------------------------------------------------
- Initialization and module stuff
- ------------------------------------------------------------------*/
+ * Initialization and module stuff
+ * ------------------------------------------------------------------
+ */
int tm6000_v4l2_register(struct tm6000_core *dev)
{
@@ -1443,8 +1471,10 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vidq.queued);
- memcpy (dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
- dev->vfd->debug=tm6000_debug;
+ memcpy(dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
+ dev->vfd->debug = tm6000_debug;
+ dev->vfd->lock = &dev->lock;
+
vfd->v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(vfd, dev);
@@ -1466,11 +1496,11 @@ int tm6000_v4l2_exit(void)
}
module_param(video_nr, int, 0);
-MODULE_PARM_DESC(video_nr,"Allow changing video device number");
+MODULE_PARM_DESC(video_nr, "Allow changing video device number");
-module_param_named (debug, tm6000_debug, int, 0444);
-MODULE_PARM_DESC(debug,"activates debug info");
+module_param_named(debug, tm6000_debug, int, 0444);
+MODULE_PARM_DESC(debug, "activates debug info");
-module_param(vid_limit,int,0644);
-MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
+module_param(vid_limit, int, 0644);
+MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
diff --git a/drivers/staging/tm6000/tm6000.h b/drivers/staging/tm6000/tm6000.h
index 1ec1bff9b294..46017b603190 100644
--- a/drivers/staging/tm6000/tm6000.h
+++ b/drivers/staging/tm6000/tm6000.h
@@ -1,23 +1,23 @@
/*
- tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
-
- Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
-
- Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
- - DVB-T support
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
+ *
+ * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *
+ * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ * - DVB-T support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Use the tm6000-hack, instead of the proper initialization code i*/
@@ -54,8 +54,9 @@ enum tm6000_devtype {
};
/* ------------------------------------------------------------------
- Basic structures
- ------------------------------------------------------------------*/
+ * Basic structures
+ * ------------------------------------------------------------------
+ */
struct tm6000_fmt {
char *name;
@@ -189,7 +190,9 @@ struct tm6000_core {
int users;
/* various device info */
- unsigned int resources;
+ struct tm6000_fh *resources; /* Points to fh that is streaming */
+ bool is_res_read;
+
struct video_device *vfd;
struct tm6000_dmaqueue vidq;
struct v4l2_device v4l2_dev;
@@ -205,6 +208,9 @@ struct tm6000_core {
/* audio support */
struct snd_tm6000_card *adev;
+ struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
+ atomic_t stream_started; /* stream should be running if true */
+
struct tm6000_IR *ir;
@@ -251,9 +257,9 @@ struct tm6000_fh {
enum v4l2_buf_type type;
};
-#define TM6000_STD V4L2_STD_PAL|V4L2_STD_PAL_N|V4L2_STD_PAL_Nc| \
+#define TM6000_STD (V4L2_STD_PAL|V4L2_STD_PAL_N|V4L2_STD_PAL_Nc| \
V4L2_STD_PAL_M|V4L2_STD_PAL_60|V4L2_STD_NTSC_M| \
- V4L2_STD_NTSC_M_JP|V4L2_STD_SECAM
+ V4L2_STD_NTSC_M_JP|V4L2_STD_SECAM)
/* In tm6000-cards.c */
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 095fa5366690..e2f63c0ea09d 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -276,6 +276,7 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de
struct inode *inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index e4f595055208..f276e9594f00 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -980,6 +980,7 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
if (likely(inode)) {
struct timespec current_time = CURRENT_TIME;
+ inode->i_ino = get_next_ino();
inode->i_mode = perms->mode;
inode->i_uid = perms->uid;
inode->i_gid = perms->gid;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index d1d72d946b04..ba145e7fbe03 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1991,6 +1991,7 @@ gadgetfs_make_inode (struct super_block *sb,
struct inode *inode = new_inode (sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = default_uid;
inode->i_gid = default_gid;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 596ef6b922bf..27c1fb4b1e0d 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -17,6 +17,8 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/drm/Kconfig"
+source "drivers/gpu/stub/Kconfig"
+
config VGASTATE
tristate
default n
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index e77e8e4280fb..4ea187d93768 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1079,7 +1079,7 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
* clock can only be obtain by dividing this value by an even integer.
* Fallback to a slower pixel clock if necessary. */
pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin);
- pixclock = min(pixclock, min(fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2));
+ pixclock = min3(pixclock, fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2);
if (AU1200_LCD_MAX_CLK % pixclock) {
int diff = AU1200_LCD_MAX_CLK % pixclock;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 42e303ff862a..0e6aa3d96a42 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -697,9 +697,9 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct inode *inode = file->f_path.dentry->d_inode;
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
- u32 *buffer, *dst;
- u32 __iomem *src;
- int c, i, cnt = 0, err = 0;
+ u8 *buffer, *dst;
+ u8 __iomem *src;
+ int c, cnt = 0, err = 0;
unsigned long total_size;
if (!info || ! info->screen_base)
@@ -730,7 +730,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (!buffer)
return -ENOMEM;
- src = (u32 __iomem *) (info->screen_base + p);
+ src = (u8 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
@@ -738,17 +738,9 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
dst = buffer;
- for (i = c >> 2; i--; )
- *dst++ = fb_readl(src++);
- if (c & 3) {
- u8 *dst8 = (u8 *) dst;
- u8 __iomem *src8 = (u8 __iomem *) src;
-
- for (i = c & 3; i--;)
- *dst8++ = fb_readb(src8++);
-
- src = (u32 __iomem *) src8;
- }
+ fb_memcpy_fromfb(dst, src, c);
+ dst += c;
+ src += c;
if (copy_to_user(buf, buffer, c)) {
err = -EFAULT;
@@ -772,9 +764,9 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
struct inode *inode = file->f_path.dentry->d_inode;
int fbidx = iminor(inode);
struct fb_info *info = registered_fb[fbidx];
- u32 *buffer, *src;
- u32 __iomem *dst;
- int c, i, cnt = 0, err = 0;
+ u8 *buffer, *src;
+ u8 __iomem *dst;
+ int c, cnt = 0, err = 0;
unsigned long total_size;
if (!info || !info->screen_base)
@@ -811,7 +803,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (!buffer)
return -ENOMEM;
- dst = (u32 __iomem *) (info->screen_base + p);
+ dst = (u8 __iomem *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
@@ -825,19 +817,9 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
break;
}
- for (i = c >> 2; i--; )
- fb_writel(*src++, dst++);
-
- if (c & 3) {
- u8 *src8 = (u8 *) src;
- u8 __iomem *dst8 = (u8 __iomem *) dst;
-
- for (i = c & 3; i--; )
- fb_writeb(*src8++, dst8++);
-
- dst = (u32 __iomem *) dst8;
- }
-
+ fb_memcpy_tofb(dst, src, c);
+ dst += c;
+ src += c;
*ppos += c;
buf += c;
cnt += c;
@@ -877,13 +859,13 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
if ((err = info->fbops->fb_pan_display(var, info)))
return err;
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
- return 0;
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ info->var.vmode |= FB_VMODE_YWRAP;
+ else
+ info->var.vmode &= ~FB_VMODE_YWRAP;
+ return 0;
}
static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index ca3355e430bf..933899dca33a 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1143,8 +1143,10 @@ static int __devinit gbefb_probe(struct platform_device *p_dev)
return -ENOMEM;
#ifndef MODULE
- if (fb_get_options("gbefb", &options))
- return -ENODEV;
+ if (fb_get_options("gbefb", &options)) {
+ ret = -ENODEV;
+ goto out_release_framebuffer;
+ }
gbefb_setup(options);
#endif
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index f9fa0fd00292..1717623aabc0 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -869,12 +869,9 @@ static int MGAG100_preinit(struct matrox_fb_info *minfo)
minfo->capable.plnwt = minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG100
? minfo->devflags.sgram : 1;
-#ifdef CONFIG_FB_MATROX_G
if (minfo->devflags.g450dac) {
minfo->outputs[0].output = &g450out;
- } else
-#endif
- {
+ } else {
minfo->outputs[0].output = &m1064;
}
minfo->outputs[0].src = minfo->outputs[0].default_src;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 1e3e8f19783e..31b8f67477b7 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -280,7 +280,7 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
return fxtal * (*feed) / (*in) * ctl->den;
}
-static unsigned int matroxfb_mavenclock(const struct matrox_pll_ctl* ctl,
+static int matroxfb_mavenclock(const struct matrox_pll_ctl *ctl,
unsigned int htotal, unsigned int vtotal,
unsigned int* in, unsigned int* feed, unsigned int* post,
unsigned int* htotal2) {
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index 2ffb34af4c59..87785c215a52 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -1590,7 +1590,7 @@ static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
blizzard.auto_update_window.width = fbdev->panel->x_res;
blizzard.auto_update_window.height = fbdev->panel->y_res;
blizzard.auto_update_window.out_x = 0;
- blizzard.auto_update_window.out_x = 0;
+ blizzard.auto_update_window.out_y = 0;
blizzard.auto_update_window.out_width = fbdev->panel->x_res;
blizzard.auto_update_window.out_height = fbdev->panel->y_res;
blizzard.auto_update_window.format = 0;
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
index ca75cc2a87a5..d7c6c3e0afc6 100644
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -25,8 +25,6 @@
#include <linux/gpio.h>
#include <linux/i2c/twl.h>
-#include <plat/mux.h>
-#include <plat/mux.h>
#include <asm/mach-types.h>
#include "omapfb.h"
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 881c9f77c75a..12327bbfdbbb 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -40,7 +40,7 @@ config PANEL_TPO_TD043MTEA1
config PANEL_ACX565AKM
tristate "ACX565AKM Panel"
- depends on OMAP2_DSS_SDI
+ depends on OMAP2_DSS_SDI && SPI
select BACKLIGHT_CLASS_DEVICE
help
This is the LCD panel used on Nokia N900
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index 07fbb8a733bb..e77310653207 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -587,6 +587,9 @@ static int acx_panel_power_on(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "%s\n", __func__);
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
mutex_lock(&md->mutex);
r = omapdss_sdi_display_enable(dssdev);
@@ -644,6 +647,9 @@ static void acx_panel_power_off(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "%s\n", __func__);
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
mutex_lock(&md->mutex);
if (!md->enabled) {
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
index 300eff5de1b4..395a68de3990 100644
--- a/drivers/video/omap2/displays/panel-generic.c
+++ b/drivers/video/omap2/displays/panel-generic.c
@@ -39,6 +39,9 @@ static int generic_panel_power_on(struct omap_dss_device *dssdev)
{
int r;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
r = omapdss_dpi_display_enable(dssdev);
if (r)
goto err0;
@@ -58,6 +61,9 @@ err0:
static void generic_panel_power_off(struct omap_dss_device *dssdev)
{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
diff --git a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
index 10267461991c..0c6896cea2d0 100644
--- a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
+++ b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
@@ -43,6 +43,9 @@ static int sharp_lq_panel_power_on(struct omap_dss_device *dssdev)
{
int r;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
r = omapdss_dpi_display_enable(dssdev);
if (r)
goto err0;
@@ -65,6 +68,9 @@ err0:
static void sharp_lq_panel_power_off(struct omap_dss_device *dssdev)
{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 7d9eb2b1f5af..9a138f650e05 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -135,6 +135,9 @@ static int sharp_ls_power_on(struct omap_dss_device *dssdev)
{
int r = 0;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
r = omapdss_dpi_display_enable(dssdev);
if (r)
goto err0;
@@ -157,6 +160,9 @@ err0:
static void sharp_ls_power_off(struct omap_dss_device *dssdev)
{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
diff --git a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
index e320e67d06f3..526e906c8a6c 100644
--- a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
+++ b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
@@ -46,6 +46,9 @@ static int toppoly_tdo_panel_power_on(struct omap_dss_device *dssdev)
{
int r;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
r = omapdss_dpi_display_enable(dssdev);
if (r)
goto err0;
@@ -65,6 +68,9 @@ err0:
static void toppoly_tdo_panel_power_off(struct omap_dss_device *dssdev)
{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index e866e76b13d0..dbe9d43b4850 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -269,6 +269,9 @@ static int tpo_td043_power_on(struct omap_dss_device *dssdev)
int nreset_gpio = dssdev->reset_gpio;
int r;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
r = omapdss_dpi_display_enable(dssdev);
if (r)
goto err0;
@@ -308,6 +311,9 @@ static void tpo_td043_power_off(struct omap_dss_device *dssdev)
struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
int nreset_gpio = dssdev->reset_gpio;
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
+
tpo_td043_write(tpo_td043->spi, 3,
TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index d71b5d9d71b1..7db17b5e570c 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dispc.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dss_features.o dispc.o display.o manager.o overlay.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index b3a498f22d36..8e89f6049280 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -37,6 +37,7 @@
#include <plat/clock.h>
#include "dss.h"
+#include "dss_features.h"
static struct {
struct platform_device *pdev;
@@ -502,6 +503,8 @@ static int omap_dss_probe(struct platform_device *pdev)
core.pdev = pdev;
+ dss_features_init();
+
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 5ecdc0004094..fa40fa59a9ac 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -39,6 +39,7 @@
#include <plat/display.h>
#include "dss.h"
+#include "dss_features.h"
/* DISPC */
#define DISPC_BASE 0x48050400
@@ -139,6 +140,22 @@ struct omap_dispc_isr_data {
u32 mask;
};
+struct dispc_h_coef {
+ s8 hc4;
+ s8 hc3;
+ u8 hc2;
+ s8 hc1;
+ s8 hc0;
+};
+
+struct dispc_v_coef {
+ s8 vc22;
+ s8 vc2;
+ u8 vc1;
+ s8 vc0;
+ s8 vc00;
+};
+
#define REG_GET(idx, start, end) \
FLD_GET(dispc_read_reg(idx), start, end)
@@ -564,106 +581,77 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
int vscaleup, int five_taps)
{
/* Coefficients for horizontal up-sampling */
- static const u32 coef_hup[8] = {
- 0x00800000,
- 0x0D7CF800,
- 0x1E70F5FF,
- 0x335FF5FE,
- 0xF74949F7,
- 0xF55F33FB,
- 0xF5701EFE,
- 0xF87C0DFF,
+ static const struct dispc_h_coef coef_hup[8] = {
+ { 0, 0, 128, 0, 0 },
+ { -1, 13, 124, -8, 0 },
+ { -2, 30, 112, -11, -1 },
+ { -5, 51, 95, -11, -2 },
+ { 0, -9, 73, 73, -9 },
+ { -2, -11, 95, 51, -5 },
+ { -1, -11, 112, 30, -2 },
+ { 0, -8, 124, 13, -1 },
};
- /* Coefficients for horizontal down-sampling */
- static const u32 coef_hdown[8] = {
- 0x24382400,
- 0x28371FFE,
- 0x2C361BFB,
- 0x303516F9,
- 0x11343311,
- 0x1635300C,
- 0x1B362C08,
- 0x1F372804,
+ /* Coefficients for vertical up-sampling */
+ static const struct dispc_v_coef coef_vup_3tap[8] = {
+ { 0, 0, 128, 0, 0 },
+ { 0, 3, 123, 2, 0 },
+ { 0, 12, 111, 5, 0 },
+ { 0, 32, 89, 7, 0 },
+ { 0, 0, 64, 64, 0 },
+ { 0, 7, 89, 32, 0 },
+ { 0, 5, 111, 12, 0 },
+ { 0, 2, 123, 3, 0 },
};
- /* Coefficients for horizontal and vertical up-sampling */
- static const u32 coef_hvup[2][8] = {
- {
- 0x00800000,
- 0x037B02FF,
- 0x0C6F05FE,
- 0x205907FB,
- 0x00404000,
- 0x075920FE,
- 0x056F0CFF,
- 0x027B0300,
- },
- {
- 0x00800000,
- 0x0D7CF8FF,
- 0x1E70F5FE,
- 0x335FF5FB,
- 0xF7404000,
- 0xF55F33FE,
- 0xF5701EFF,
- 0xF87C0D00,
- },
+ static const struct dispc_v_coef coef_vup_5tap[8] = {
+ { 0, 0, 128, 0, 0 },
+ { -1, 13, 124, -8, 0 },
+ { -2, 30, 112, -11, -1 },
+ { -5, 51, 95, -11, -2 },
+ { 0, -9, 73, 73, -9 },
+ { -2, -11, 95, 51, -5 },
+ { -1, -11, 112, 30, -2 },
+ { 0, -8, 124, 13, -1 },
};
- /* Coefficients for horizontal and vertical down-sampling */
- static const u32 coef_hvdown[2][8] = {
- {
- 0x24382400,
- 0x28391F04,
- 0x2D381B08,
- 0x3237170C,
- 0x123737F7,
- 0x173732F9,
- 0x1B382DFB,
- 0x1F3928FE,
- },
- {
- 0x24382400,
- 0x28371F04,
- 0x2C361B08,
- 0x3035160C,
- 0x113433F7,
- 0x163530F9,
- 0x1B362CFB,
- 0x1F3728FE,
- },
+ /* Coefficients for horizontal down-sampling */
+ static const struct dispc_h_coef coef_hdown[8] = {
+ { 0, 36, 56, 36, 0 },
+ { 4, 40, 55, 31, -2 },
+ { 8, 44, 54, 27, -5 },
+ { 12, 48, 53, 22, -7 },
+ { -9, 17, 52, 51, 17 },
+ { -7, 22, 53, 48, 12 },
+ { -5, 27, 54, 44, 8 },
+ { -2, 31, 55, 40, 4 },
};
- /* Coefficients for vertical up-sampling */
- static const u32 coef_vup[8] = {
- 0x00000000,
- 0x0000FF00,
- 0x0000FEFF,
- 0x0000FBFE,
- 0x000000F7,
- 0x0000FEFB,
- 0x0000FFFE,
- 0x000000FF,
+ /* Coefficients for vertical down-sampling */
+ static const struct dispc_v_coef coef_vdown_3tap[8] = {
+ { 0, 36, 56, 36, 0 },
+ { 0, 40, 57, 31, 0 },
+ { 0, 45, 56, 27, 0 },
+ { 0, 50, 55, 23, 0 },
+ { 0, 18, 55, 55, 0 },
+ { 0, 23, 55, 50, 0 },
+ { 0, 27, 56, 45, 0 },
+ { 0, 31, 57, 40, 0 },
};
-
- /* Coefficients for vertical down-sampling */
- static const u32 coef_vdown[8] = {
- 0x00000000,
- 0x000004FE,
- 0x000008FB,
- 0x00000CF9,
- 0x0000F711,
- 0x0000F90C,
- 0x0000FB08,
- 0x0000FE04,
+ static const struct dispc_v_coef coef_vdown_5tap[8] = {
+ { 0, 36, 56, 36, 0 },
+ { 4, 40, 55, 31, -2 },
+ { 8, 44, 54, 27, -5 },
+ { 12, 48, 53, 22, -7 },
+ { -9, 17, 52, 51, 17 },
+ { -7, 22, 53, 48, 12 },
+ { -5, 27, 54, 44, 8 },
+ { -2, 31, 55, 40, 4 },
};
- const u32 *h_coef;
- const u32 *hv_coef;
- const u32 *hv_coef_mod;
- const u32 *v_coef;
+ const struct dispc_h_coef *h_coef;
+ const struct dispc_v_coef *v_coef;
int i;
if (hscaleup)
@@ -671,47 +659,34 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
else
h_coef = coef_hdown;
- if (vscaleup) {
- hv_coef = coef_hvup[five_taps];
- v_coef = coef_vup;
-
- if (hscaleup)
- hv_coef_mod = NULL;
- else
- hv_coef_mod = coef_hvdown[five_taps];
- } else {
- hv_coef = coef_hvdown[five_taps];
- v_coef = coef_vdown;
-
- if (hscaleup)
- hv_coef_mod = coef_hvup[five_taps];
- else
- hv_coef_mod = NULL;
- }
+ if (vscaleup)
+ v_coef = five_taps ? coef_vup_5tap : coef_vup_3tap;
+ else
+ v_coef = five_taps ? coef_vdown_5tap : coef_vdown_3tap;
for (i = 0; i < 8; i++) {
u32 h, hv;
- h = h_coef[i];
-
- hv = hv_coef[i];
-
- if (hv_coef_mod) {
- hv &= 0xffffff00;
- hv |= (hv_coef_mod[i] & 0xff);
- }
+ h = FLD_VAL(h_coef[i].hc0, 7, 0)
+ | FLD_VAL(h_coef[i].hc1, 15, 8)
+ | FLD_VAL(h_coef[i].hc2, 23, 16)
+ | FLD_VAL(h_coef[i].hc3, 31, 24);
+ hv = FLD_VAL(h_coef[i].hc4, 7, 0)
+ | FLD_VAL(v_coef[i].vc0, 15, 8)
+ | FLD_VAL(v_coef[i].vc1, 23, 16)
+ | FLD_VAL(v_coef[i].vc2, 31, 24);
_dispc_write_firh_reg(plane, i, h);
_dispc_write_firhv_reg(plane, i, hv);
}
- if (!five_taps)
- return;
-
- for (i = 0; i < 8; i++) {
- u32 v;
- v = v_coef[i];
- _dispc_write_firv_reg(plane, i, v);
+ if (five_taps) {
+ for (i = 0; i < 8; i++) {
+ u32 v;
+ v = FLD_VAL(v_coef[i].vc00, 7, 0)
+ | FLD_VAL(v_coef[i].vc22, 15, 8);
+ _dispc_write_firv_reg(plane, i, v);
+ }
}
}
@@ -800,12 +775,12 @@ static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
{
-
- BUG_ON(plane == OMAP_DSS_VIDEO1);
-
- if (cpu_is_omap24xx())
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return;
+ BUG_ON(!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+ plane == OMAP_DSS_VIDEO1);
+
if (plane == OMAP_DSS_GFX)
REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
else if (plane == OMAP_DSS_VIDEO2)
@@ -975,17 +950,14 @@ static void dispc_read_plane_fifo_sizes(void)
DISPC_VID_FIFO_SIZE_STATUS(1) };
u32 size;
int plane;
+ u8 start, end;
enable_clocks(1);
- for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
- if (cpu_is_omap24xx())
- size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 8, 0);
- else if (cpu_is_omap34xx())
- size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 10, 0);
- else
- BUG();
+ dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
+ for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
+ size = FLD_GET(dispc_read_reg(fsz_reg[plane]), start, end);
dispc.fifo_size[plane] = size;
}
@@ -1002,6 +974,8 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
DISPC_VID_FIFO_THRESHOLD(0),
DISPC_VID_FIFO_THRESHOLD(1) };
+ u8 hi_start, hi_end, lo_start, lo_end;
+
enable_clocks(1);
DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
@@ -1010,12 +984,12 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
REG_GET(ftrs_reg[plane], 27, 16),
low, high);
- if (cpu_is_omap24xx())
- dispc_write_reg(ftrs_reg[plane],
- FLD_VAL(high, 24, 16) | FLD_VAL(low, 8, 0));
- else
- dispc_write_reg(ftrs_reg[plane],
- FLD_VAL(high, 27, 16) | FLD_VAL(low, 11, 0));
+ dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
+ dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
+
+ dispc_write_reg(ftrs_reg[plane],
+ FLD_VAL(high, hi_start, hi_end) |
+ FLD_VAL(low, lo_start, lo_end));
enable_clocks(0);
}
@@ -1035,13 +1009,16 @@ static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc)
u32 val;
const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0),
DISPC_VID_FIR(1) };
+ u8 hinc_start, hinc_end, vinc_start, vinc_end;
BUG_ON(plane == OMAP_DSS_GFX);
- if (cpu_is_omap24xx())
- val = FLD_VAL(vinc, 27, 16) | FLD_VAL(hinc, 11, 0);
- else
- val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
+ dss_feat_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end);
+ dss_feat_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end);
+
+ val = FLD_VAL(vinc, vinc_start, vinc_end) |
+ FLD_VAL(hinc, hinc_start, hinc_end);
+
dispc_write_reg(fir_reg[plane-1], val);
}
@@ -1567,6 +1544,8 @@ static int _dispc_setup_plane(enum omap_plane plane,
case OMAP_DSS_COLOR_ARGB16:
case OMAP_DSS_COLOR_ARGB32:
case OMAP_DSS_COLOR_RGBA32:
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ return -EINVAL;
case OMAP_DSS_COLOR_RGBX32:
if (cpu_is_omap24xx())
return -EINVAL;
@@ -1607,9 +1586,10 @@ static int _dispc_setup_plane(enum omap_plane plane,
case OMAP_DSS_COLOR_ARGB16:
case OMAP_DSS_COLOR_ARGB32:
case OMAP_DSS_COLOR_RGBA32:
- if (cpu_is_omap24xx())
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return -EINVAL;
- if (plane == OMAP_DSS_VIDEO1)
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+ plane == OMAP_DSS_VIDEO1)
return -EINVAL;
break;
@@ -2002,7 +1982,7 @@ void dispc_enable_trans_key(enum omap_channel ch, bool enable)
}
void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
{
- if (cpu_is_omap24xx())
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return;
enable_clocks(1);
@@ -2016,7 +1996,7 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
{
bool enabled;
- if (cpu_is_omap24xx())
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return false;
enable_clocks(1);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index b3fa3a7db911..aa4f7a5fae29 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -3274,7 +3274,6 @@ int dsi_init(struct platform_device *pdev)
dsi.vdds_dsi_reg = dss_get_vdds_dsi();
if (IS_ERR(dsi.vdds_dsi_reg)) {
- iounmap(dsi.base);
DSSERR("can't get VDDS_DSI regulator\n");
r = PTR_ERR(dsi.vdds_dsi_reg);
goto err2;
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
new file mode 100644
index 000000000000..867f68de125f
--- /dev/null
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -0,0 +1,191 @@
+/*
+ * linux/drivers/video/omap2/dss/dss_features.c
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Archit Taneja <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss_features.h"
+
+/* Defines a generic omap register field */
+struct dss_reg_field {
+ enum dss_feat_reg_field id;
+ u8 start, end;
+};
+
+struct omap_dss_features {
+ const struct dss_reg_field *reg_fields;
+ const int num_reg_fields;
+
+ const u32 has_feature;
+
+ const int num_mgrs;
+ const int num_ovls;
+ const enum omap_display_type *supported_displays;
+ const enum omap_color_mode *supported_color_modes;
+};
+
+/* This struct is assigned to one of the below during initialization */
+static struct omap_dss_features *omap_current_dss_features;
+
+static const struct dss_reg_field omap2_dss_reg_fields[] = {
+ { FEAT_REG_FIRHINC, 11, 0 },
+ { FEAT_REG_FIRVINC, 27, 16 },
+ { FEAT_REG_FIFOLOWTHRESHOLD, 8, 0 },
+ { FEAT_REG_FIFOHIGHTHRESHOLD, 24, 16 },
+ { FEAT_REG_FIFOSIZE, 8, 0 },
+};
+
+static const struct dss_reg_field omap3_dss_reg_fields[] = {
+ { FEAT_REG_FIRHINC, 12, 0 },
+ { FEAT_REG_FIRVINC, 28, 16 },
+ { FEAT_REG_FIFOLOWTHRESHOLD, 11, 0 },
+ { FEAT_REG_FIFOHIGHTHRESHOLD, 27, 16 },
+ { FEAT_REG_FIFOSIZE, 10, 0 },
+};
+
+static const enum omap_display_type omap2_dss_supported_displays[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
+ OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DISPLAY_TYPE_VENC,
+};
+
+static const enum omap_display_type omap3_dss_supported_displays[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
+ OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DISPLAY_TYPE_VENC,
+};
+
+static const enum omap_color_mode omap2_dss_supported_color_modes[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
+ OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
+ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+ OMAP_DSS_COLOR_UYVY,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+ OMAP_DSS_COLOR_UYVY,
+};
+
+static const enum omap_color_mode omap3_dss_supported_color_modes[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
+ OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
+ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
+ OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+ OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
+ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+};
+
+/* OMAP2 DSS Features */
+static struct omap_dss_features omap2_dss_features = {
+ .reg_fields = omap2_dss_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
+
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .supported_displays = omap2_dss_supported_displays,
+ .supported_color_modes = omap2_dss_supported_color_modes,
+};
+
+/* OMAP3 DSS Features */
+static struct omap_dss_features omap3_dss_features = {
+ .reg_fields = omap3_dss_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+
+ .has_feature = FEAT_GLOBAL_ALPHA,
+
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .supported_displays = omap3_dss_supported_displays,
+ .supported_color_modes = omap3_dss_supported_color_modes,
+};
+
+/* Functions returning values related to a DSS feature */
+int dss_feat_get_num_mgrs(void)
+{
+ return omap_current_dss_features->num_mgrs;
+}
+
+int dss_feat_get_num_ovls(void)
+{
+ return omap_current_dss_features->num_ovls;
+}
+
+enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel)
+{
+ return omap_current_dss_features->supported_displays[channel];
+}
+
+enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane)
+{
+ return omap_current_dss_features->supported_color_modes[plane];
+}
+
+/* DSS has_feature check */
+bool dss_has_feature(enum dss_feat_id id)
+{
+ return omap_current_dss_features->has_feature & id;
+}
+
+void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
+{
+ if (id >= omap_current_dss_features->num_reg_fields)
+ BUG();
+
+ *start = omap_current_dss_features->reg_fields[id].start;
+ *end = omap_current_dss_features->reg_fields[id].end;
+}
+
+void dss_features_init(void)
+{
+ if (cpu_is_omap24xx())
+ omap_current_dss_features = &omap2_dss_features;
+ else
+ omap_current_dss_features = &omap3_dss_features;
+}
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
new file mode 100644
index 000000000000..cb231eaa9b31
--- /dev/null
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -0,0 +1,50 @@
+/*
+ * linux/drivers/video/omap2/dss/dss_features.h
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Archit Taneja <archit@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP2_DSS_FEATURES_H
+#define __OMAP2_DSS_FEATURES_H
+
+#define MAX_DSS_MANAGERS 2
+#define MAX_DSS_OVERLAYS 3
+
+/* DSS has feature id */
+enum dss_feat_id {
+ FEAT_GLOBAL_ALPHA = 1 << 0,
+ FEAT_GLOBAL_ALPHA_VID1 = 1 << 1,
+};
+
+/* DSS register field id */
+enum dss_feat_reg_field {
+ FEAT_REG_FIRHINC,
+ FEAT_REG_FIRVINC,
+ FEAT_REG_FIFOHIGHTHRESHOLD,
+ FEAT_REG_FIFOLOWTHRESHOLD,
+ FEAT_REG_FIFOSIZE,
+};
+
+/* DSS Feature Functions */
+int dss_feat_get_num_mgrs(void);
+int dss_feat_get_num_ovls(void);
+enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
+enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+
+bool dss_has_feature(enum dss_feat_id id);
+void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
+void dss_features_init(void);
+#endif
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 6a649ab5539e..545e9b9a4d92 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -33,6 +33,7 @@
#include <plat/cpu.h>
#include "dss.h"
+#include "dss_features.h"
static int num_managers;
static struct list_head manager_list;
@@ -448,8 +449,8 @@ struct manager_cache_data {
static struct {
spinlock_t lock;
- struct overlay_cache_data overlay_cache[3];
- struct manager_cache_data manager_cache[2];
+ struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
+ struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
bool irq_enabled;
} dss_cache;
@@ -882,12 +883,12 @@ static int configure_dispc(void)
{
struct overlay_cache_data *oc;
struct manager_cache_data *mc;
- const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
- const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+ const int num_ovls = dss_feat_get_num_ovls();
+ const int num_mgrs = dss_feat_get_num_mgrs();
int i;
int r;
- bool mgr_busy[2];
- bool mgr_go[2];
+ bool mgr_busy[MAX_DSS_MANAGERS];
+ bool mgr_go[MAX_DSS_MANAGERS];
bool busy;
r = 0;
@@ -989,7 +990,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
{
struct overlay_cache_data *oc;
struct manager_cache_data *mc;
- const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+ const int num_ovls = dss_feat_get_num_ovls();
struct omap_overlay_manager *mgr;
int i;
u16 x, y, w, h;
@@ -1121,8 +1122,8 @@ void dss_start_update(struct omap_dss_device *dssdev)
{
struct manager_cache_data *mc;
struct overlay_cache_data *oc;
- const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
- const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+ const int num_ovls = dss_feat_get_num_ovls();
+ const int num_mgrs = dss_feat_get_num_mgrs();
struct omap_overlay_manager *mgr;
int i;
@@ -1151,10 +1152,10 @@ static void dss_apply_irq_handler(void *data, u32 mask)
{
struct manager_cache_data *mc;
struct overlay_cache_data *oc;
- const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
- const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+ const int num_ovls = dss_feat_get_num_ovls();
+ const int num_mgrs = dss_feat_get_num_mgrs();
int i, r;
- bool mgr_busy[2];
+ bool mgr_busy[MAX_DSS_MANAGERS];
mgr_busy[0] = dispc_go_busy(0);
mgr_busy[1] = dispc_go_busy(1);
@@ -1461,7 +1462,7 @@ int dss_init_overlay_managers(struct platform_device *pdev)
num_managers = 0;
- for (i = 0; i < 2; ++i) {
+ for (i = 0; i < dss_feat_get_num_mgrs(); ++i) {
struct omap_overlay_manager *mgr;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
@@ -1471,14 +1472,10 @@ int dss_init_overlay_managers(struct platform_device *pdev)
case 0:
mgr->name = "lcd";
mgr->id = OMAP_DSS_CHANNEL_LCD;
- mgr->supported_displays =
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI;
break;
case 1:
mgr->name = "tv";
mgr->id = OMAP_DSS_CHANNEL_DIGIT;
- mgr->supported_displays = OMAP_DISPLAY_TYPE_VENC;
break;
}
@@ -1494,6 +1491,8 @@ int dss_init_overlay_managers(struct platform_device *pdev)
mgr->disable = &dss_mgr_disable;
mgr->caps = OMAP_DSS_OVL_MGR_CAP_DISPC;
+ mgr->supported_displays =
+ dss_feat_get_supported_displays(mgr->id);
dss_overlay_setup_dispc_manager(mgr);
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 244dca81a399..75642c22cac7 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -35,6 +35,7 @@
#include <plat/cpu.h>
#include "dss.h"
+#include "dss_features.h"
static int num_overlays;
static struct list_head overlay_list;
@@ -237,7 +238,8 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
/* Video1 plane does not support global alpha
* to always make it 255 completely opaque
*/
- if (ovl->id == OMAP_DSS_VIDEO1)
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+ ovl->id == OMAP_DSS_VIDEO1)
info.global_alpha = 255;
else
info.global_alpha = simple_strtoul(buf, NULL, 10);
@@ -510,11 +512,11 @@ static void omap_dss_add_overlay(struct omap_overlay *overlay)
list_add_tail(&overlay->list, &overlay_list);
}
-static struct omap_overlay *dispc_overlays[3];
+static struct omap_overlay *dispc_overlays[MAX_DSS_OVERLAYS];
void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr)
{
- mgr->num_overlays = 3;
+ mgr->num_overlays = dss_feat_get_num_ovls();
mgr->overlays = dispc_overlays;
}
@@ -535,7 +537,7 @@ void dss_init_overlays(struct platform_device *pdev)
num_overlays = 0;
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
struct omap_overlay *ovl;
ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
@@ -545,18 +547,12 @@ void dss_init_overlays(struct platform_device *pdev)
case 0:
ovl->name = "gfx";
ovl->id = OMAP_DSS_GFX;
- ovl->supported_modes = cpu_is_omap34xx() ?
- OMAP_DSS_COLOR_GFX_OMAP3 :
- OMAP_DSS_COLOR_GFX_OMAP2;
ovl->caps = OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
break;
case 1:
ovl->name = "vid1";
ovl->id = OMAP_DSS_VIDEO1;
- ovl->supported_modes = cpu_is_omap34xx() ?
- OMAP_DSS_COLOR_VID1_OMAP3 :
- OMAP_DSS_COLOR_VID_OMAP2;
ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
@@ -564,9 +560,6 @@ void dss_init_overlays(struct platform_device *pdev)
case 2:
ovl->name = "vid2";
ovl->id = OMAP_DSS_VIDEO2;
- ovl->supported_modes = cpu_is_omap34xx() ?
- OMAP_DSS_COLOR_VID2_OMAP3 :
- OMAP_DSS_COLOR_VID_OMAP2;
ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
OMAP_DSS_OVL_CAP_DISPC;
ovl->info.global_alpha = 255;
@@ -579,6 +572,9 @@ void dss_init_overlays(struct platform_device *pdev)
ovl->get_overlay_info = &dss_ovl_get_overlay_info;
ovl->wait_for_go = &dss_ovl_wait_for_go;
+ ovl->supported_modes =
+ dss_feat_get_supported_color_modes(ovl->id);
+
omap_dss_add_overlay(ovl);
r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
@@ -651,7 +647,7 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
}
if (mgr) {
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < dss_feat_get_num_ovls(); i++) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
if (!ovl->manager || force) {
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 43496d6c377f..65149b22cf37 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -3,7 +3,7 @@ menuconfig FB_OMAP2
depends on FB && OMAP2_DSS
select OMAP2_VRAM
- select OMAP2_VRFB
+ select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 04034d410d6d..6a704f176c22 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -714,10 +714,10 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
var->pixclock = timings.pixel_clock != 0 ?
KHZ2PICOS(timings.pixel_clock) :
0;
- var->left_margin = timings.hfp;
- var->right_margin = timings.hbp;
- var->upper_margin = timings.vfp;
- var->lower_margin = timings.vbp;
+ var->left_margin = timings.hbp;
+ var->right_margin = timings.hfp;
+ var->upper_margin = timings.vbp;
+ var->lower_margin = timings.vfp;
var->hsync_len = timings.hsw;
var->vsync_len = timings.vsw;
} else {
@@ -2059,10 +2059,10 @@ static int omapfb_mode_to_timings(const char *mode_str,
if (r != 0) {
timings->pixel_clock = PICOS2KHZ(var.pixclock);
- timings->hfp = var.left_margin;
- timings->hbp = var.right_margin;
- timings->vfp = var.upper_margin;
- timings->vbp = var.lower_margin;
+ timings->hbp = var.left_margin;
+ timings->hfp = var.right_margin;
+ timings->vbp = var.upper_margin;
+ timings->vfp = var.lower_margin;
timings->hsw = var.hsync_len;
timings->vsw = var.vsync_len;
timings->x_res = var.xres;
@@ -2198,6 +2198,16 @@ static int omapfb_probe(struct platform_device *pdev)
goto err0;
}
+ /* TODO : Replace cpu check with omap_has_vrfb once HAS_FEATURE
+ * available for OMAP2 and OMAP3
+ */
+ if (def_vrfb && !cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+ def_vrfb = 0;
+ dev_warn(&pdev->dev, "VRFB is not supported on this hardware, "
+ "ignoring the module parameter vrfb=y\n");
+ }
+
+
mutex_init(&fbdev->mtx);
fbdev->dev = &pdev->dev;
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index ed371c868b3a..b16e6138fdd4 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -181,6 +181,15 @@ void savagefb_create_i2c_busses(struct fb_info *info)
par->chan.algo.getscl = prosavage_gpio_getscl;
break;
case FB_ACCEL_SAVAGE4:
+ par->chan.reg = CR_SERIAL1;
+ if (par->pcidev->revision > 1 && !(VGArCR(0xa6, par) & 0x40))
+ par->chan.reg = CR_SERIAL2;
+ par->chan.ioaddr = par->mmio.vbase;
+ par->chan.algo.setsda = prosavage_gpio_setsda;
+ par->chan.algo.setscl = prosavage_gpio_setscl;
+ par->chan.algo.getsda = prosavage_gpio_getsda;
+ par->chan.algo.getscl = prosavage_gpio_getscl;
+ break;
case FB_ACCEL_SAVAGE2000:
par->chan.reg = 0xff20;
par->chan.ioaddr = par->mmio.vbase;
diff --git a/drivers/video/via/Makefile b/drivers/video/via/Makefile
index d496adb0f832..96f01ee2a412 100644
--- a/drivers/video/via/Makefile
+++ b/drivers/video/via/Makefile
@@ -5,5 +5,5 @@
obj-$(CONFIG_FB_VIA) += viafb.o
viafb-y :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o \
- via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o \
+ via_utility.o vt1636.o global.o tblDPASetting.o viamode.o \
via-core.o via-gpio.o via_modesetting.o
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index e44893ea590d..4b67b8e6030a 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -283,11 +283,12 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
writel(tmp, engine + 0x1C);
}
- if (op != VIA_BITBLT_COLOR)
+ if (op == VIA_BITBLT_FILL) {
+ writel(fg_color, engine + 0x58);
+ } else if (op == VIA_BITBLT_MONO) {
writel(fg_color, engine + 0x4C);
-
- if (op == VIA_BITBLT_MONO)
writel(bg_color, engine + 0x50);
+ }
if (op == VIA_BITBLT_FILL)
ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
@@ -314,13 +315,11 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
return 0;
}
-int viafb_init_engine(struct fb_info *info)
+int viafb_setup_engine(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
void __iomem *engine;
- int highest_reg, i;
- u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
- vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
+ u32 chip_name = viapar->shared->chip_info.gfx_chip_name;
engine = viapar->shared->vdev->engine_mmio;
if (!engine) {
@@ -329,18 +328,6 @@ int viafb_init_engine(struct fb_info *info)
return -ENOMEM;
}
- /* Initialize registers to reset the 2D engine */
- switch (viapar->shared->chip_info.twod_engine) {
- case VIA_2D_ENG_M1:
- highest_reg = 0x5c;
- break;
- default:
- highest_reg = 0x40;
- break;
- }
- for (i = 0; i <= highest_reg; i += 4)
- writel(0x0, engine + i);
-
switch (chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
@@ -356,6 +343,7 @@ int viafb_init_engine(struct fb_info *info)
break;
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
viapar->shared->hw_bitblt = hw_bitblt_2;
break;
default:
@@ -370,7 +358,7 @@ int viafb_init_engine(struct fb_info *info)
viapar->shared->vq_vram_addr = viapar->fbmem_free;
viapar->fbmem_used += VQ_SIZE;
-#if defined(CONFIG_FB_VIA_CAMERA) || defined(CONFIG_FB_VIA_CAMERA_MODULE)
+#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
/*
* Set aside a chunk of framebuffer memory for the camera
* driver. Someday this driver probably needs a proper allocator
@@ -386,12 +374,36 @@ int viafb_init_engine(struct fb_info *info)
viapar->shared->vdev->camera_fbmem_offset = viapar->fbmem_free;
#endif
+ viafb_reset_engine(viapar);
+ return 0;
+}
+
+void viafb_reset_engine(struct viafb_par *viapar)
+{
+ void __iomem *engine = viapar->shared->vdev->engine_mmio;
+ int highest_reg, i;
+ u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
+ vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
+
+ /* Initialize registers to reset the 2D engine */
+ switch (viapar->shared->chip_info.twod_engine) {
+ case VIA_2D_ENG_M1:
+ highest_reg = 0x5c;
+ break;
+ default:
+ highest_reg = 0x40;
+ break;
+ }
+ for (i = 0; i <= highest_reg; i += 4)
+ writel(0x0, engine + i);
+
/* Init AGP and VQ regs */
switch (chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
writel(0x00100000, engine + VIA_REG_CR_TRANSET);
writel(0x680A0000, engine + VIA_REG_CR_TRANSPACE);
writel(0x02000000, engine + VIA_REG_CR_TRANSPACE);
@@ -428,6 +440,7 @@ int viafb_init_engine(struct fb_info *info)
case UNICHROME_P4M900:
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
vq_start_low |= 0x20000000;
vq_end_low |= 0x20000000;
vq_high |= 0x20000000;
@@ -473,7 +486,7 @@ int viafb_init_engine(struct fb_info *info)
writel(0x0, engine + VIA_REG_CURSOR_ORG);
writel(0x0, engine + VIA_REG_CURSOR_BG);
writel(0x0, engine + VIA_REG_CURSOR_FG);
- return 0;
+ return;
}
void viafb_show_hw_cursor(struct fb_info *info, int Status)
diff --git a/drivers/video/via/accel.h b/drivers/video/via/accel.h
index 2c122d292365..79d5e10cc835 100644
--- a/drivers/video/via/accel.h
+++ b/drivers/video/via/accel.h
@@ -203,7 +203,8 @@
#define VIA_BITBLT_MONO 2
#define VIA_BITBLT_FILL 3
-int viafb_init_engine(struct fb_info *info);
+int viafb_setup_engine(struct fb_info *info);
+void viafb_reset_engine(struct viafb_par *viapar);
void viafb_show_hw_cursor(struct fb_info *info, int Status);
void viafb_wait_engine_idle(struct fb_info *info);
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index ef1f3de2e052..48f1342897bd 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -71,6 +71,9 @@
#define UNICHROME_VX855 12
#define UNICHROME_VX855_DID 0x5122
+#define UNICHROME_VX900 13
+#define UNICHROME_VX900_DID 0x7122
+
/**************************************************/
/* Definition TMDS Trasmitter Information */
/**************************************************/
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index 39b040bb3817..84e21b39dd0b 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -25,10 +25,12 @@
static void tmds_register_write(int index, u8 data);
static int tmds_register_read(int index);
static int tmds_register_read_bytes(int index, u8 *buff, int buff_len);
-static void dvi_get_panel_size_from_DDCv1(struct tmds_chip_information
- *tmds_chip, struct tmds_setting_information *tmds_setting);
-static void dvi_get_panel_size_from_DDCv2(struct tmds_chip_information
- *tmds_chip, struct tmds_setting_information *tmds_setting);
+static void __devinit dvi_get_panel_size_from_DDCv1(
+ struct tmds_chip_information *tmds_chip,
+ struct tmds_setting_information *tmds_setting);
+static void __devinit dvi_get_panel_size_from_DDCv2(
+ struct tmds_chip_information *tmds_chip,
+ struct tmds_setting_information *tmds_setting);
static int viafb_dvi_query_EDID(void);
static int check_tmds_chip(int device_id_subaddr, int device_id)
@@ -39,7 +41,7 @@ static int check_tmds_chip(int device_id_subaddr, int device_id)
return FAIL;
}
-void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
+void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
struct tmds_setting_information *tmds_setting)
{
DEBUG_MSG(KERN_INFO "viafb_init_dvi_size()\n");
@@ -60,7 +62,7 @@ void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
return;
}
-int viafb_tmds_trasmitter_identify(void)
+int __devinit viafb_tmds_trasmitter_identify(void)
{
unsigned char sr2a = 0, sr1e = 0, sr3e = 0;
@@ -208,8 +210,6 @@ void viafb_dvi_set_mode(struct VideoModeTable *mode, int mode_bpp,
}
}
viafb_fill_crtc_timing(pDviTiming, mode, mode_bpp / 8, set_iga);
- viafb_set_output_path(DEVICE_DVI, set_iga,
- viaparinfo->chip_info->tmds_chip_info.output_interface);
}
/* Sense DVI Connector */
@@ -313,8 +313,9 @@ static int viafb_dvi_query_EDID(void)
}
/* Get Panel Size Using EDID1 Table */
-static void dvi_get_panel_size_from_DDCv1(struct tmds_chip_information
- *tmds_chip, struct tmds_setting_information *tmds_setting)
+static void __devinit dvi_get_panel_size_from_DDCv1(
+ struct tmds_chip_information *tmds_chip,
+ struct tmds_setting_information *tmds_setting)
{
int i, max_h = 0, tmp, restore;
unsigned char rData;
@@ -418,8 +419,9 @@ static void dvi_get_panel_size_from_DDCv1(struct tmds_chip_information
}
/* Get Panel Size Using EDID2 Table */
-static void dvi_get_panel_size_from_DDCv2(struct tmds_chip_information
- *tmds_chip, struct tmds_setting_information *tmds_setting)
+static void __devinit dvi_get_panel_size_from_DDCv2(
+ struct tmds_chip_information *tmds_chip,
+ struct tmds_setting_information *tmds_setting)
{
int restore;
unsigned char R_Buffer[2];
@@ -468,64 +470,107 @@ static void dvi_get_panel_size_from_DDCv2(struct tmds_chip_information
void viafb_dvi_disable(void)
{
if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DVP0)
- viafb_write_reg(SR1E, VIASR,
- viafb_read_reg(VIASR, SR1E) & (~0xC0));
-
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DVP1)
- viafb_write_reg(SR1E, VIASR,
- viafb_read_reg(VIASR, SR1E) & (~0x30));
-
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DFP_HIGH)
- viafb_write_reg(SR2A, VIASR,
- viafb_read_reg(VIASR, SR2A) & (~0x0C));
-
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DFP_LOW)
- viafb_write_reg(SR2A, VIASR,
- viafb_read_reg(VIASR, SR2A) & (~0x03));
-
- if (viaparinfo->chip_info->
tmds_chip_info.output_interface == INTERFACE_TMDS)
/* Turn off TMDS power. */
viafb_write_reg(CRD2, VIACR,
viafb_read_reg(VIACR, CRD2) | 0x08);
}
+static void dvi_patch_skew_dvp0(void)
+{
+ /* Reset data driving first: */
+ viafb_write_reg_mask(SR1B, VIASR, 0, BIT1);
+ viafb_write_reg_mask(SR2A, VIASR, 0, BIT4);
+
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_P4M890:
+ {
+ if ((viaparinfo->tmds_setting_info->h_active == 1600) &&
+ (viaparinfo->tmds_setting_info->v_active ==
+ 1200))
+ viafb_write_reg_mask(CR96, VIACR, 0x03,
+ BIT0 + BIT1 + BIT2);
+ else
+ viafb_write_reg_mask(CR96, VIACR, 0x07,
+ BIT0 + BIT1 + BIT2);
+ break;
+ }
+
+ case UNICHROME_P4M900:
+ {
+ viafb_write_reg_mask(CR96, VIACR, 0x07,
+ BIT0 + BIT1 + BIT2 + BIT3);
+ viafb_write_reg_mask(SR1B, VIASR, 0x02, BIT1);
+ viafb_write_reg_mask(SR2A, VIASR, 0x10, BIT4);
+ break;
+ }
+
+ default:
+ {
+ break;
+ }
+ }
+}
+
+static void dvi_patch_skew_dvp_low(void)
+{
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_K8M890:
+ {
+ viafb_write_reg_mask(CR99, VIACR, 0x03, BIT0 + BIT1);
+ break;
+ }
+
+ case UNICHROME_P4M900:
+ {
+ viafb_write_reg_mask(CR99, VIACR, 0x08,
+ BIT0 + BIT1 + BIT2 + BIT3);
+ break;
+ }
+
+ case UNICHROME_P4M890:
+ {
+ viafb_write_reg_mask(CR99, VIACR, 0x0F,
+ BIT0 + BIT1 + BIT2 + BIT3);
+ break;
+ }
+
+ default:
+ {
+ break;
+ }
+ }
+}
+
/* If Enable DVI, turn off pad */
void viafb_dvi_enable(void)
{
u8 data;
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DVP0) {
- viafb_write_reg(SR1E, VIASR,
- viafb_read_reg(VIASR, SR1E) | 0xC0);
+ switch (viaparinfo->chip_info->tmds_chip_info.output_interface) {
+ case INTERFACE_DVP0:
+ viafb_write_reg_mask(CR6B, VIACR, 0x01, BIT0);
+ viafb_write_reg_mask(CR6C, VIACR, 0x21, BIT0 + BIT5);
+ dvi_patch_skew_dvp0();
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
tmds_register_write(0x88, 0x3b);
else
/*clear CR91[5] to direct on display period
in the secondary diplay path */
- viafb_write_reg(CR91, VIACR,
- viafb_read_reg(VIACR, CR91) & 0xDF);
- }
+ via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
+ break;
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DVP1) {
- viafb_write_reg(SR1E, VIASR,
- viafb_read_reg(VIASR, SR1E) | 0x30);
+ case INTERFACE_DVP1:
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+ viafb_write_reg_mask(CR93, VIACR, 0x21, BIT0 + BIT5);
/*fix dvi cann't be enabled with MB VT5718C4 - Al Zhang */
- if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
tmds_register_write(0x88, 0x3b);
- } else {
+ else
/*clear CR91[5] to direct on display period
in the secondary diplay path */
- viafb_write_reg(CR91, VIACR,
- viafb_read_reg(VIACR, CR91) & 0xDF);
- }
+ via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
/*fix DVI cannot enable on EPIA-M board */
if (viafb_platform_epia_dvi == 1) {
@@ -537,36 +582,40 @@ void viafb_dvi_enable(void)
else
data = 0x37;
viafb_i2c_writebyte(viaparinfo->chip_info->
- tmds_chip_info.i2c_port,
- viaparinfo->chip_info->
- tmds_chip_info.tmds_chip_slave_addr,
- 0x08, data);
+ tmds_chip_info.i2c_port,
+ viaparinfo->chip_info->
+ tmds_chip_info.tmds_chip_slave_addr,
+ 0x08, data);
}
}
- }
+ break;
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DFP_HIGH) {
- viafb_write_reg(SR2A, VIASR,
- viafb_read_reg(VIASR, SR2A) | 0x0C);
- viafb_write_reg(CR91, VIACR,
- viafb_read_reg(VIACR, CR91) & 0xDF);
- }
+ case INTERFACE_DFP_HIGH:
+ if (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266)
+ via_write_reg_mask(VIACR, CR97, 0x03, 0x03);
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_DFP_LOW) {
- viafb_write_reg(SR2A, VIASR,
- viafb_read_reg(VIASR, SR2A) | 0x03);
- viafb_write_reg(CR91, VIACR,
- viafb_read_reg(VIACR, CR91) & 0xDF);
- }
- if (viaparinfo->chip_info->
- tmds_chip_info.output_interface == INTERFACE_TMDS) {
+ via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
+ break;
+
+ case INTERFACE_DFP_LOW:
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+ break;
+
+ dvi_patch_skew_dvp_low();
+ via_write_reg_mask(VIACR, 0x91, 0x00, 0x20);
+ break;
+
+ case INTERFACE_TMDS:
/* Turn on Display period in the panel path. */
viafb_write_reg_mask(CR91, VIACR, 0, BIT7);
/* Turn on TMDS power. */
viafb_write_reg_mask(CRD2, VIACR, 0, BIT3);
+ break;
}
-}
+ if (viaparinfo->tmds_setting_info->iga_path == IGA2) {
+ /* Disable LCD Scaling */
+ viafb_write_reg_mask(CR79, VIACR, 0x00, BIT0);
+ }
+}
diff --git a/drivers/video/via/dvi.h b/drivers/video/via/dvi.h
index 0dffcfd395f3..2c525c0c1adb 100644
--- a/drivers/video/via/dvi.h
+++ b/drivers/video/via/dvi.h
@@ -56,8 +56,8 @@
int viafb_dvi_sense(void);
void viafb_dvi_disable(void);
void viafb_dvi_enable(void);
-int viafb_tmds_trasmitter_identify(void);
-void viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
+int __devinit viafb_tmds_trasmitter_identify(void);
+void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
struct tmds_setting_information *tmds_setting);
void viafb_dvi_set_mode(struct VideoModeTable *videoMode, int mode_bpp,
int set_iga);
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index 28221a062dda..38ef5ac66953 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -48,7 +48,6 @@
#include "via_utility.h"
#include "vt1636.h"
#include "tblDPASetting.h"
-#include "tbl1636.h"
/* External struct*/
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 7dcb4d5bb9c3..36d73f940d8b 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -718,16 +718,20 @@ static struct rgbLUT palLUT_table[] = {
0x00}
};
-static void set_crt_output_path(int set_iga);
-static void dvi_patch_skew_dvp0(void);
-static void dvi_patch_skew_dvp1(void);
-static void dvi_patch_skew_dvp_low(void);
-static void set_dvi_output_path(int set_iga, int output_interface);
-static void set_lcd_output_path(int set_iga, int output_interface);
+static struct via_device_mapping device_mapping[] = {
+ {VIA_LDVP0, "LDVP0"},
+ {VIA_LDVP1, "LDVP1"},
+ {VIA_DVP0, "DVP0"},
+ {VIA_CRT, "CRT"},
+ {VIA_DVP1, "DVP1"},
+ {VIA_LVDS1, "LVDS1"},
+ {VIA_LVDS2, "LVDS2"}
+};
+
static void load_fix_bit_crtc_reg(void);
-static void init_gfx_chip_info(int chip_type);
-static void init_tmds_chip_info(void);
-static void init_lvds_chip_info(void);
+static void __devinit init_gfx_chip_info(int chip_type);
+static void __devinit init_tmds_chip_info(void);
+static void __devinit init_lvds_chip_info(void);
static void device_screen_off(void);
static void device_screen_on(void);
static void set_display_channel(void);
@@ -755,6 +759,66 @@ void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
outb(b, LUT_DATA);
}
+static u32 get_dvi_devices(int output_interface)
+{
+ switch (output_interface) {
+ case INTERFACE_DVP0:
+ return VIA_DVP0 | VIA_LDVP0;
+
+ case INTERFACE_DVP1:
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+ return VIA_LDVP1;
+ else
+ return VIA_DVP1;
+
+ case INTERFACE_DFP_HIGH:
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+ return 0;
+ else
+ return VIA_LVDS2 | VIA_DVP0;
+
+ case INTERFACE_DFP_LOW:
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
+ return 0;
+ else
+ return VIA_DVP1 | VIA_LVDS1;
+
+ case INTERFACE_TMDS:
+ return VIA_LVDS1;
+ }
+
+ return 0;
+}
+
+static u32 get_lcd_devices(int output_interface)
+{
+ switch (output_interface) {
+ case INTERFACE_DVP0:
+ return VIA_DVP0;
+
+ case INTERFACE_DVP1:
+ return VIA_DVP1;
+
+ case INTERFACE_DFP_HIGH:
+ return VIA_LVDS2 | VIA_DVP0;
+
+ case INTERFACE_DFP_LOW:
+ return VIA_LVDS1 | VIA_DVP1;
+
+ case INTERFACE_DFP:
+ return VIA_LVDS1 | VIA_LVDS2;
+
+ case INTERFACE_LVDS0:
+ case INTERFACE_LVDS0LVDS1:
+ return VIA_LVDS1;
+
+ case INTERFACE_LVDS1:
+ return VIA_LVDS2;
+ }
+
+ return 0;
+}
+
/*Set IGA path for each device*/
void viafb_set_iga_path(void)
{
@@ -821,6 +885,48 @@ void viafb_set_iga_path(void)
viaparinfo->tmds_setting_info->iga_path = IGA1;
}
}
+
+ viaparinfo->shared->iga1_devices = 0;
+ viaparinfo->shared->iga2_devices = 0;
+ if (viafb_CRT_ON) {
+ if (viaparinfo->crt_setting_info->iga_path == IGA1)
+ viaparinfo->shared->iga1_devices |= VIA_CRT;
+ else
+ viaparinfo->shared->iga2_devices |= VIA_CRT;
+ }
+
+ if (viafb_DVI_ON) {
+ if (viaparinfo->tmds_setting_info->iga_path == IGA1)
+ viaparinfo->shared->iga1_devices |= get_dvi_devices(
+ viaparinfo->chip_info->
+ tmds_chip_info.output_interface);
+ else
+ viaparinfo->shared->iga2_devices |= get_dvi_devices(
+ viaparinfo->chip_info->
+ tmds_chip_info.output_interface);
+ }
+
+ if (viafb_LCD_ON) {
+ if (viaparinfo->lvds_setting_info->iga_path == IGA1)
+ viaparinfo->shared->iga1_devices |= get_lcd_devices(
+ viaparinfo->chip_info->
+ lvds_chip_info.output_interface);
+ else
+ viaparinfo->shared->iga2_devices |= get_lcd_devices(
+ viaparinfo->chip_info->
+ lvds_chip_info.output_interface);
+ }
+
+ if (viafb_LCD2_ON) {
+ if (viaparinfo->lvds_setting_info2->iga_path == IGA1)
+ viaparinfo->shared->iga1_devices |= get_lcd_devices(
+ viaparinfo->chip_info->
+ lvds_chip_info2.output_interface);
+ else
+ viaparinfo->shared->iga2_devices |= get_lcd_devices(
+ viaparinfo->chip_info->
+ lvds_chip_info2.output_interface);
+ }
}
static void set_color_register(u8 index, u8 red, u8 green, u8 blue)
@@ -844,295 +950,266 @@ void viafb_set_secondary_color_register(u8 index, u8 red, u8 green, u8 blue)
set_color_register(index, red, green, blue);
}
-void viafb_set_output_path(int device, int set_iga, int output_interface)
+static void set_source_common(u8 index, u8 offset, u8 iga)
{
- switch (device) {
- case DEVICE_CRT:
- set_crt_output_path(set_iga);
- break;
- case DEVICE_DVI:
- set_dvi_output_path(set_iga, output_interface);
+ u8 value, mask = 1 << offset;
+
+ switch (iga) {
+ case IGA1:
+ value = 0x00;
break;
- case DEVICE_LCD:
- set_lcd_output_path(set_iga, output_interface);
+ case IGA2:
+ value = mask;
break;
+ default:
+ printk(KERN_WARNING "viafb: Unsupported source: %d\n", iga);
+ return;
}
+
+ via_write_reg_mask(VIACR, index, value, mask);
}
-static void set_crt_output_path(int set_iga)
+static void set_crt_source(u8 iga)
{
- viafb_write_reg_mask(CR36, VIACR, 0x00, BIT4 + BIT5);
+ u8 value;
- switch (set_iga) {
+ switch (iga) {
case IGA1:
- viafb_write_reg_mask(SR16, VIASR, 0x00, BIT6);
+ value = 0x00;
break;
case IGA2:
- viafb_write_reg_mask(CR6A, VIACR, 0xC0, BIT6 + BIT7);
- viafb_write_reg_mask(SR16, VIASR, 0x40, BIT6);
+ value = 0x40;
break;
+ default:
+ printk(KERN_WARNING "viafb: Unsupported source: %d\n", iga);
+ return;
}
+
+ via_write_reg_mask(VIASR, 0x16, value, 0x40);
}
-static void dvi_patch_skew_dvp0(void)
+static inline void set_ldvp0_source(u8 iga)
{
- /* Reset data driving first: */
- viafb_write_reg_mask(SR1B, VIASR, 0, BIT1);
- viafb_write_reg_mask(SR2A, VIASR, 0, BIT4);
-
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_P4M890:
- {
- if ((viaparinfo->tmds_setting_info->h_active == 1600) &&
- (viaparinfo->tmds_setting_info->v_active ==
- 1200))
- viafb_write_reg_mask(CR96, VIACR, 0x03,
- BIT0 + BIT1 + BIT2);
- else
- viafb_write_reg_mask(CR96, VIACR, 0x07,
- BIT0 + BIT1 + BIT2);
- break;
- }
+ set_source_common(0x6C, 7, iga);
+}
- case UNICHROME_P4M900:
- {
- viafb_write_reg_mask(CR96, VIACR, 0x07,
- BIT0 + BIT1 + BIT2 + BIT3);
- viafb_write_reg_mask(SR1B, VIASR, 0x02, BIT1);
- viafb_write_reg_mask(SR2A, VIASR, 0x10, BIT4);
- break;
- }
+static inline void set_ldvp1_source(u8 iga)
+{
+ set_source_common(0x93, 7, iga);
+}
- default:
- {
- break;
- }
- }
+static inline void set_dvp0_source(u8 iga)
+{
+ set_source_common(0x96, 4, iga);
}
-static void dvi_patch_skew_dvp1(void)
+static inline void set_dvp1_source(u8 iga)
{
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_CX700:
- {
- break;
- }
+ set_source_common(0x9B, 4, iga);
+}
- default:
- {
- break;
- }
- }
+static inline void set_lvds1_source(u8 iga)
+{
+ set_source_common(0x99, 4, iga);
}
-static void dvi_patch_skew_dvp_low(void)
+static inline void set_lvds2_source(u8 iga)
{
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_K8M890:
- {
- viafb_write_reg_mask(CR99, VIACR, 0x03, BIT0 + BIT1);
- break;
- }
+ set_source_common(0x97, 4, iga);
+}
- case UNICHROME_P4M900:
- {
- viafb_write_reg_mask(CR99, VIACR, 0x08,
- BIT0 + BIT1 + BIT2 + BIT3);
- break;
- }
+void via_set_source(u32 devices, u8 iga)
+{
+ if (devices & VIA_LDVP0)
+ set_ldvp0_source(iga);
+ if (devices & VIA_LDVP1)
+ set_ldvp1_source(iga);
+ if (devices & VIA_DVP0)
+ set_dvp0_source(iga);
+ if (devices & VIA_CRT)
+ set_crt_source(iga);
+ if (devices & VIA_DVP1)
+ set_dvp1_source(iga);
+ if (devices & VIA_LVDS1)
+ set_lvds1_source(iga);
+ if (devices & VIA_LVDS2)
+ set_lvds2_source(iga);
+}
- case UNICHROME_P4M890:
- {
- viafb_write_reg_mask(CR99, VIACR, 0x0F,
- BIT0 + BIT1 + BIT2 + BIT3);
- break;
- }
+static void set_crt_state(u8 state)
+{
+ u8 value;
+ switch (state) {
+ case VIA_STATE_ON:
+ value = 0x00;
+ break;
+ case VIA_STATE_STANDBY:
+ value = 0x10;
+ break;
+ case VIA_STATE_SUSPEND:
+ value = 0x20;
+ break;
+ case VIA_STATE_OFF:
+ value = 0x30;
+ break;
default:
- {
- break;
- }
+ return;
}
+
+ via_write_reg_mask(VIACR, 0x36, value, 0x30);
}
-static void set_dvi_output_path(int set_iga, int output_interface)
+static void set_dvp0_state(u8 state)
{
- switch (output_interface) {
- case INTERFACE_DVP0:
- viafb_write_reg_mask(CR6B, VIACR, 0x01, BIT0);
-
- if (set_iga == IGA1) {
- viafb_write_reg_mask(CR96, VIACR, 0x00, BIT4);
- viafb_write_reg_mask(CR6C, VIACR, 0x21, BIT0 +
- BIT5 + BIT7);
- } else {
- viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
- viafb_write_reg_mask(CR6C, VIACR, 0xA1, BIT0 +
- BIT5 + BIT7);
- }
-
- viafb_write_reg_mask(SR1E, VIASR, 0xC0, BIT7 + BIT6);
+ u8 value;
- dvi_patch_skew_dvp0();
+ switch (state) {
+ case VIA_STATE_ON:
+ value = 0xC0;
break;
+ case VIA_STATE_OFF:
+ value = 0x00;
+ break;
+ default:
+ return;
+ }
- case INTERFACE_DVP1:
- if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR93, VIACR, 0x21,
- BIT0 + BIT5 + BIT7);
- else
- viafb_write_reg_mask(CR93, VIACR, 0xA1,
- BIT0 + BIT5 + BIT7);
- } else {
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR9B, VIACR, 0x00, BIT4);
- else
- viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
- }
+ via_write_reg_mask(VIASR, 0x1E, value, 0xC0);
+}
+
+static void set_dvp1_state(u8 state)
+{
+ u8 value;
- viafb_write_reg_mask(SR1E, VIASR, 0x30, BIT4 + BIT5);
- dvi_patch_skew_dvp1();
+ switch (state) {
+ case VIA_STATE_ON:
+ value = 0x30;
break;
- case INTERFACE_DFP_HIGH:
- if (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266) {
- if (set_iga == IGA1) {
- viafb_write_reg_mask(CR96, VIACR, 0x00, BIT4);
- viafb_write_reg_mask(CR97, VIACR, 0x03,
- BIT0 + BIT1 + BIT4);
- } else {
- viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
- viafb_write_reg_mask(CR97, VIACR, 0x13,
- BIT0 + BIT1 + BIT4);
- }
- }
- viafb_write_reg_mask(SR2A, VIASR, 0x0C, BIT2 + BIT3);
+ case VIA_STATE_OFF:
+ value = 0x00;
break;
+ default:
+ return;
+ }
- case INTERFACE_DFP_LOW:
- if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)
- break;
+ via_write_reg_mask(VIASR, 0x1E, value, 0x30);
+}
- if (set_iga == IGA1) {
- viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
- viafb_write_reg_mask(CR9B, VIACR, 0x00, BIT4);
- } else {
- viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
- viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
- }
+static void set_lvds1_state(u8 state)
+{
+ u8 value;
- viafb_write_reg_mask(SR2A, VIASR, 0x03, BIT0 + BIT1);
- dvi_patch_skew_dvp_low();
+ switch (state) {
+ case VIA_STATE_ON:
+ value = 0x03;
break;
-
- case INTERFACE_TMDS:
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
- else
- viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
+ case VIA_STATE_OFF:
+ value = 0x00;
break;
+ default:
+ return;
}
- if (set_iga == IGA2) {
- enable_second_display_channel();
- /* Disable LCD Scaling */
- viafb_write_reg_mask(CR79, VIACR, 0x00, BIT0);
- }
+ via_write_reg_mask(VIASR, 0x2A, value, 0x03);
}
-static void set_lcd_output_path(int set_iga, int output_interface)
+static void set_lvds2_state(u8 state)
{
- DEBUG_MSG(KERN_INFO
- "set_lcd_output_path, iga:%d,out_interface:%d\n",
- set_iga, output_interface);
- switch (set_iga) {
- case IGA1:
- viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3);
- viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
+ u8 value;
- disable_second_display_channel();
+ switch (state) {
+ case VIA_STATE_ON:
+ value = 0x0C;
break;
-
- case IGA2:
- viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3);
- viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
-
- enable_second_display_channel();
+ case VIA_STATE_OFF:
+ value = 0x00;
break;
+ default:
+ return;
}
- switch (output_interface) {
- case INTERFACE_DVP0:
- if (set_iga == IGA1) {
- viafb_write_reg_mask(CR96, VIACR, 0x00, BIT4);
- } else {
- viafb_write_reg(CR91, VIACR, 0x00);
- viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
- }
- break;
-
- case INTERFACE_DVP1:
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR9B, VIACR, 0x00, BIT4);
- else {
- viafb_write_reg(CR91, VIACR, 0x00);
- viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
- }
- break;
+ via_write_reg_mask(VIASR, 0x2A, value, 0x0C);
+}
- case INTERFACE_DFP_HIGH:
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR97, VIACR, 0x00, BIT4);
- else {
- viafb_write_reg(CR91, VIACR, 0x00);
- viafb_write_reg_mask(CR97, VIACR, 0x10, BIT4);
- viafb_write_reg_mask(CR96, VIACR, 0x10, BIT4);
- }
- break;
+void via_set_state(u32 devices, u8 state)
+{
+ /*
+ TODO: Can we enable/disable these devices? How?
+ if (devices & VIA_LDVP0)
+ if (devices & VIA_LDVP1)
+ */
+ if (devices & VIA_DVP0)
+ set_dvp0_state(state);
+ if (devices & VIA_CRT)
+ set_crt_state(state);
+ if (devices & VIA_DVP1)
+ set_dvp1_state(state);
+ if (devices & VIA_LVDS1)
+ set_lvds1_state(state);
+ if (devices & VIA_LVDS2)
+ set_lvds2_state(state);
+}
- case INTERFACE_DFP_LOW:
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
- else {
- viafb_write_reg(CR91, VIACR, 0x00);
- viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
- viafb_write_reg_mask(CR9B, VIACR, 0x10, BIT4);
- }
+void via_set_sync_polarity(u32 devices, u8 polarity)
+{
+ if (polarity & ~(VIA_HSYNC_NEGATIVE | VIA_VSYNC_NEGATIVE)) {
+ printk(KERN_WARNING "viafb: Unsupported polarity: %d\n",
+ polarity);
+ return;
+ }
- break;
+ if (devices & VIA_CRT)
+ via_write_misc_reg_mask(polarity << 6, 0xC0);
+ if (devices & VIA_DVP1)
+ via_write_reg_mask(VIACR, 0x9B, polarity << 5, 0x60);
+ if (devices & VIA_LVDS1)
+ via_write_reg_mask(VIACR, 0x99, polarity << 5, 0x60);
+ if (devices & VIA_LVDS2)
+ via_write_reg_mask(VIACR, 0x97, polarity << 5, 0x60);
+}
- case INTERFACE_DFP:
- if ((UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name)
- || (UNICHROME_P4M890 ==
- viaparinfo->chip_info->gfx_chip_name))
- viafb_write_reg_mask(CR97, VIACR, 0x84,
- BIT7 + BIT2 + BIT1 + BIT0);
- if (set_iga == IGA1) {
- viafb_write_reg_mask(CR97, VIACR, 0x00, BIT4);
- viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
- } else {
- viafb_write_reg(CR91, VIACR, 0x00);
- viafb_write_reg_mask(CR97, VIACR, 0x10, BIT4);
- viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
+u32 via_parse_odev(char *input, char **end)
+{
+ char *ptr = input;
+ u32 odev = 0;
+ bool next = true;
+ int i, len;
+
+ while (next) {
+ next = false;
+ for (i = 0; i < ARRAY_SIZE(device_mapping); i++) {
+ len = strlen(device_mapping[i].name);
+ if (!strncmp(ptr, device_mapping[i].name, len)) {
+ odev |= device_mapping[i].device;
+ ptr += len;
+ if (*ptr == ',') {
+ ptr++;
+ next = true;
+ }
+ }
}
- break;
+ }
- case INTERFACE_LVDS0:
- case INTERFACE_LVDS0LVDS1:
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR99, VIACR, 0x00, BIT4);
- else
- viafb_write_reg_mask(CR99, VIACR, 0x10, BIT4);
+ *end = ptr;
+ return odev;
+}
- break;
+void via_odev_to_seq(struct seq_file *m, u32 odev)
+{
+ int i, count = 0;
- case INTERFACE_LVDS1:
- if (set_iga == IGA1)
- viafb_write_reg_mask(CR97, VIACR, 0x00, BIT4);
- else
- viafb_write_reg_mask(CR97, VIACR, 0x10, BIT4);
- break;
+ for (i = 0; i < ARRAY_SIZE(device_mapping); i++) {
+ if (odev & device_mapping[i].device) {
+ if (count > 0)
+ seq_putc(m, ',');
+
+ seq_puts(m, device_mapping[i].name);
+ count++;
+ }
}
+
+ seq_putc(m, '\n');
}
static void load_fix_bit_crtc_reg(void)
@@ -1352,6 +1429,15 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
VX855_IGA1_DISPLAY_QUEUE_EXPIRE_NUM;
}
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX900) {
+ iga1_fifo_max_depth = VX900_IGA1_FIFO_MAX_DEPTH;
+ iga1_fifo_threshold = VX900_IGA1_FIFO_THRESHOLD;
+ iga1_fifo_high_threshold =
+ VX900_IGA1_FIFO_HIGH_THRESHOLD;
+ iga1_display_queue_expire_num =
+ VX900_IGA1_DISPLAY_QUEUE_EXPIRE_NUM;
+ }
+
/* Set Display FIFO Depath Select */
reg_value = IGA1_FIFO_DEPTH_SELECT_FORMULA(iga1_fifo_max_depth);
viafb_load_reg_num =
@@ -1492,6 +1578,15 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
VX855_IGA2_DISPLAY_QUEUE_EXPIRE_NUM;
}
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX900) {
+ iga2_fifo_max_depth = VX900_IGA2_FIFO_MAX_DEPTH;
+ iga2_fifo_threshold = VX900_IGA2_FIFO_THRESHOLD;
+ iga2_fifo_high_threshold =
+ VX900_IGA2_FIFO_HIGH_THRESHOLD;
+ iga2_display_queue_expire_num =
+ VX900_IGA2_DISPLAY_QUEUE_EXPIRE_NUM;
+ }
+
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) {
/* Set Display FIFO Depath Select */
reg_value =
@@ -1612,6 +1707,7 @@ u32 viafb_get_clk_value(int clk)
break;
case UNICHROME_VX855:
+ case UNICHROME_VX900:
value = vx855_encode_pll(pll_value[i].vx855_pll);
break;
}
@@ -1645,6 +1741,7 @@ void viafb_set_vclock(u32 clk, int set_iga)
case UNICHROME_P4M900:
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
via_write_reg(VIASR, SR44, (clk & 0x0000FF));
via_write_reg(VIASR, SR45, (clk & 0x00FF00) >> 8);
via_write_reg(VIASR, SR46, (clk & 0xFF0000) >> 16);
@@ -1671,6 +1768,7 @@ void viafb_set_vclock(u32 clk, int set_iga)
case UNICHROME_P4M900:
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
via_write_reg(VIASR, SR4A, (clk & 0x0000FF));
via_write_reg(VIASR, SR4B, (clk & 0x00FF00) >> 8);
via_write_reg(VIASR, SR4C, (clk & 0xFF0000) >> 16);
@@ -1688,8 +1786,8 @@ void viafb_set_vclock(u32 clk, int set_iga)
}
if (set_iga == IGA2) {
- viafb_write_reg_mask(SR40, VIASR, 0x01, BIT0);
- viafb_write_reg_mask(SR40, VIASR, 0x00, BIT0);
+ viafb_write_reg_mask(SR40, VIASR, 0x04, BIT2);
+ viafb_write_reg_mask(SR40, VIASR, 0x00, BIT2);
}
/* Fire! */
@@ -1937,7 +2035,6 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
int index = 0;
int h_addr, v_addr;
u32 pll_D_N;
- u8 polarity = 0;
for (i = 0; i < video_mode->mode_array; i++) {
index = i;
@@ -1964,14 +2061,6 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
h_addr = crt_reg.hor_addr;
v_addr = crt_reg.ver_addr;
-
- /* update polarity for CRT timing */
- if (crt_table[index].h_sync_polarity == NEGATIVE)
- polarity |= BIT6;
- if (crt_table[index].v_sync_polarity == NEGATIVE)
- polarity |= BIT7;
- via_write_misc_reg_mask(polarity, BIT6 | BIT7);
-
if (set_iga == IGA1) {
viafb_unlock_crt();
viafb_write_reg(CR09, VIACR, 0x00); /*initial CR09=0 */
@@ -2004,7 +2093,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
}
-void viafb_init_chip_info(int chip_type)
+void __devinit viafb_init_chip_info(int chip_type)
{
init_gfx_chip_info(chip_type);
init_tmds_chip_info();
@@ -2071,7 +2160,7 @@ void viafb_update_device_setting(int hres, int vres,
}
}
-static void init_gfx_chip_info(int chip_type)
+static void __devinit init_gfx_chip_info(int chip_type)
{
u8 tmp;
@@ -2111,6 +2200,7 @@ static void init_gfx_chip_info(int chip_type)
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
viaparinfo->chip_info->twod_engine = VIA_2D_ENG_M1;
break;
case UNICHROME_K8M890:
@@ -2123,7 +2213,7 @@ static void init_gfx_chip_info(int chip_type)
}
}
-static void init_tmds_chip_info(void)
+static void __devinit init_tmds_chip_info(void)
{
viafb_tmds_trasmitter_identify();
@@ -2168,7 +2258,7 @@ static void init_tmds_chip_info(void)
&viaparinfo->shared->tmds_setting_info);
}
-static void init_lvds_chip_info(void)
+static void __devinit init_lvds_chip_info(void)
{
viafb_lvds_trasmitter_identify();
viafb_init_lcd_size();
@@ -2202,7 +2292,7 @@ static void init_lvds_chip_info(void)
viaparinfo->chip_info->lvds_chip_info.output_interface);
}
-void viafb_init_dac(int set_iga)
+void __devinit viafb_init_dac(int set_iga)
{
int i;
u8 tmp;
@@ -2275,11 +2365,24 @@ static void set_display_channel(void)
}
}
+static u8 get_sync(struct fb_info *info)
+{
+ u8 polarity = 0;
+
+ if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
+ polarity |= VIA_HSYNC_NEGATIVE;
+ if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
+ polarity |= VIA_VSYNC_NEGATIVE;
+ return polarity;
+}
+
int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
struct VideoModeTable *vmode_tbl1, int video_bpp1)
{
int i, j;
int port;
+ u32 devices = viaparinfo->shared->iga1_devices
+ | viaparinfo->shared->iga2_devices;
u8 value, index, mask;
struct crt_mode_table *crt_timing;
struct crt_mode_table *crt_timing1 = NULL;
@@ -2322,11 +2425,13 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
break;
case UNICHROME_VX855:
+ case UNICHROME_VX900:
viafb_write_regx(VX855_ModeXregs, NUM_TOTAL_VX855_ModeXregs);
break;
}
device_off();
+ via_set_state(devices, VIA_STATE_OFF);
/* Fill VPIT Parameters */
/* Write Misc Register */
@@ -2337,7 +2442,6 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
via_write_reg(VIASR, i, VPIT.SR[i - 1]);
viafb_write_reg_mask(0x15, VIASR, 0xA2, 0xA2);
- viafb_set_iga_path();
/* Write CRTC */
viafb_fill_crtc_timing(crt_timing, vmode_tbl, video_bpp / 8, IGA1);
@@ -2377,6 +2481,13 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
via_set_primary_color_depth(viaparinfo->depth);
via_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
: viaparinfo->depth);
+ via_set_source(viaparinfo->shared->iga1_devices, IGA1);
+ via_set_source(viaparinfo->shared->iga2_devices, IGA2);
+ if (viaparinfo->shared->iga2_devices)
+ enable_second_display_channel();
+ else
+ disable_second_display_channel();
+
/* Update Refresh Rate Setting */
/* Clear On Screen */
@@ -2394,8 +2505,6 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
viaparinfo->crt_setting_info->iga_path);
}
- set_crt_output_path(viaparinfo->crt_setting_info->iga_path);
-
/* Patch if set_hres is not 8 alignment (1366) to viafb_setmode
to 8 alignment (1368),there is several pixels (2 pixels)
on right side of screen. */
@@ -2482,10 +2591,16 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
viafb_DeviceStatus = CRT_Device;
}
device_on();
+ if (!viafb_dual_fb)
+ via_set_sync_polarity(devices, get_sync(viafbinfo));
+ else {
+ via_set_sync_polarity(viaparinfo->shared->iga1_devices,
+ get_sync(viafbinfo));
+ via_set_sync_polarity(viaparinfo->shared->iga2_devices,
+ get_sync(viafbinfo1));
+ }
- if (viafb_SAMM_ON == 1)
- viafb_write_reg_mask(CR6A, VIACR, 0xC0, BIT6 + BIT7);
-
+ via_set_state(devices, VIA_STATE_ON);
device_screen_on();
return 1;
}
@@ -2526,31 +2641,18 @@ int viafb_get_refresh(int hres, int vres, u32 long_refresh)
static void device_off(void)
{
- viafb_crt_disable();
viafb_dvi_disable();
viafb_lcd_disable();
}
static void device_on(void)
{
- if (viafb_CRT_ON == 1)
- viafb_crt_enable();
if (viafb_DVI_ON == 1)
viafb_dvi_enable();
if (viafb_LCD_ON == 1)
viafb_lcd_enable();
}
-void viafb_crt_disable(void)
-{
- viafb_write_reg_mask(CR36, VIACR, BIT5 + BIT4, BIT5 + BIT4);
-}
-
-void viafb_crt_enable(void)
-{
- viafb_write_reg_mask(CR36, VIACR, 0x0, BIT5 + BIT4);
-}
-
static void enable_second_display_channel(void)
{
/* to enable second display channel. */
@@ -2567,7 +2669,6 @@ static void disable_second_display_channel(void)
viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6);
}
-
void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
*p_gfx_dpa_setting)
{
@@ -2652,4 +2753,9 @@ void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
crt_reg.ver_total - (crt_reg.ver_sync_start + crt_reg.ver_sync_end);
var->lower_margin = crt_reg.ver_sync_start - crt_reg.ver_addr;
var->vsync_len = crt_reg.ver_sync_end;
+ var->sync = 0;
+ if (crt_timing[index].h_sync_polarity == POSITIVE)
+ var->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (crt_timing[index].v_sync_polarity == POSITIVE)
+ var->sync |= FB_SYNC_VERT_HIGH_ACT;
}
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index c44399895294..668d534542ef 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -22,6 +22,8 @@
#ifndef __HW_H__
#define __HW_H__
+#include <linux/seq_file.h>
+
#include "viamode.h"
#include "global.h"
#include "via_modesetting.h"
@@ -30,6 +32,25 @@
#define viafb_write_reg(i, p, d) via_write_reg(p, i, d)
#define viafb_write_reg_mask(i, p, d, m) via_write_reg_mask(p, i, d, m)
+/* VIA output devices */
+#define VIA_LDVP0 0x00000001
+#define VIA_LDVP1 0x00000002
+#define VIA_DVP0 0x00000004
+#define VIA_CRT 0x00000010
+#define VIA_DVP1 0x00000020
+#define VIA_LVDS1 0x00000040
+#define VIA_LVDS2 0x00000080
+
+/* VIA output device power states */
+#define VIA_STATE_ON 0
+#define VIA_STATE_STANDBY 1
+#define VIA_STATE_SUSPEND 2
+#define VIA_STATE_OFF 3
+
+/* VIA output device sync polarity */
+#define VIA_HSYNC_NEGATIVE 0x01
+#define VIA_VSYNC_NEGATIVE 0x02
+
/***************************************************
* Definition IGA1 Design Method of CRTC Registers *
****************************************************/
@@ -341,6 +362,17 @@ is reserved, so it may have problem to set 1600x1200 on IGA2. */
#define VX855_IGA2_FIFO_HIGH_THRESHOLD 160
#define VX855_IGA2_DISPLAY_QUEUE_EXPIRE_NUM 320
+/* For VT3410 */
+#define VX900_IGA1_FIFO_MAX_DEPTH 400
+#define VX900_IGA1_FIFO_THRESHOLD 320
+#define VX900_IGA1_FIFO_HIGH_THRESHOLD 320
+#define VX900_IGA1_DISPLAY_QUEUE_EXPIRE_NUM 160
+
+#define VX900_IGA2_FIFO_MAX_DEPTH 192
+#define VX900_IGA2_FIFO_THRESHOLD 160
+#define VX900_IGA2_FIFO_HIGH_THRESHOLD 160
+#define VX900_IGA2_DISPLAY_QUEUE_EXPIRE_NUM 320
+
#define IGA1_FIFO_DEPTH_SELECT_REG_NUM 1
#define IGA1_FIFO_THRESHOLD_REG_NUM 2
#define IGA1_FIFO_HIGH_THRESHOLD_REG_NUM 2
@@ -858,6 +890,8 @@ struct iga2_crtc_timing {
#define VX800_FUNCTION3 0x3353
/* VT3409 chipset*/
#define VX855_FUNCTION3 0x3409
+/* VT3410 chipset*/
+#define VX900_FUNCTION3 0x3410
#define NUM_TOTAL_PLL_TABLE ARRAY_SIZE(pll_value)
@@ -873,6 +907,11 @@ struct pci_device_id_info {
u32 chip_index;
};
+struct via_device_mapping {
+ u32 device;
+ const char *name;
+};
+
extern unsigned int viafb_second_virtual_xres;
extern int viafb_SAMM_ON;
extern int viafb_dual_fb;
@@ -881,9 +920,6 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-void viafb_set_output_path(int device, int set_iga,
- int output_interface);
-
void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
struct VideoModeTable *video_mode, int bpp_byte, int set_iga);
@@ -891,8 +927,11 @@ void viafb_set_vclock(u32 CLK, int set_iga);
void viafb_load_reg(int timing_value, int viafb_load_reg_num,
struct io_register *reg,
int io_type);
-void viafb_crt_disable(void);
-void viafb_crt_enable(void);
+void via_set_source(u32 devices, u8 iga);
+void via_set_state(u32 devices, u8 state);
+void via_set_sync_polarity(u32 devices, u8 polarity);
+u32 via_parse_odev(char *input, char **end);
+void via_odev_to_seq(struct seq_file *m, u32 odev);
void init_ad9389(void);
/* Access I/O Function */
void viafb_lock_crt(void);
@@ -908,8 +947,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
struct VideoModeTable *vmode_tbl1, int video_bpp1);
void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
struct VideoModeTable *vmode_tbl);
-void viafb_init_chip_info(int chip_type);
-void viafb_init_dac(int set_iga);
+void __devinit viafb_init_chip_info(int chip_type);
+void __devinit viafb_init_dac(int set_iga);
int viafb_get_pixclock(int hres, int vres, int vmode_refresh);
int viafb_get_refresh(int hres, int vres, u32 float_refresh);
void viafb_update_device_setting(int hres, int vres, int bpp,
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index 4d553d0b8d7a..ea1c51428823 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -94,6 +94,7 @@ int viafb_ioctl_hotplug(int hres, int vres, int bpp)
viafb_CRT_ON = 0;
viafb_LCD_ON = 0;
viafb_DeviceStatus = DVI_Device;
+ viafb_set_iga_path();
return viafb_DeviceStatus;
}
status = 1;
@@ -107,6 +108,7 @@ int viafb_ioctl_hotplug(int hres, int vres, int bpp)
viafb_LCD_ON = 0;
viafb_DeviceStatus = CRT_Device;
+ viafb_set_iga_path();
return viafb_DeviceStatus;
}
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index fc25ae30c5f6..3425c3969806 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -21,10 +21,16 @@
#include <linux/via-core.h>
#include <linux/via_i2c.h>
#include "global.h"
-#include "lcdtbl.h"
#define viafb_compact_res(x, y) (((x)<<16)|(y))
+/* CLE266 Software Power Sequence */
+/* {Mask}, {Data}, {Delay} */
+int PowerSequenceOn[3][3] = { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06},
+ {0x19, 0x1FE, 0x01} };
+int PowerSequenceOff[3][3] = { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00},
+ {0xD2, 0x19, 0x01} };
+
static struct _lcd_scaling_factor lcd_scaling_factor = {
/* LCD Horizontal Scaling Factor Register */
{LCD_HOR_SCALING_FACTOR_REG_NUM,
@@ -42,7 +48,7 @@ static struct _lcd_scaling_factor lcd_scaling_factor_CLE = {
static int check_lvds_chip(int device_id_subaddr, int device_id);
static bool lvds_identify_integratedlvds(void);
-static void fp_id_to_vindex(int panel_id);
+static void __devinit fp_id_to_vindex(int panel_id);
static int lvds_register_read(int index);
static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
int panel_vres);
@@ -84,7 +90,7 @@ static int check_lvds_chip(int device_id_subaddr, int device_id)
return FAIL;
}
-void viafb_init_lcd_size(void)
+void __devinit viafb_init_lcd_size(void)
{
DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n");
@@ -144,7 +150,7 @@ static bool lvds_identify_integratedlvds(void)
return true;
}
-int viafb_lvds_trasmitter_identify(void)
+int __devinit viafb_lvds_trasmitter_identify(void)
{
if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
@@ -185,7 +191,7 @@ int viafb_lvds_trasmitter_identify(void)
return FAIL;
}
-static void fp_id_to_vindex(int panel_id)
+static void __devinit fp_id_to_vindex(int panel_id)
{
DEBUG_MSG(KERN_INFO "fp_get_panel_id()\n");
@@ -436,6 +442,7 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
case UNICHROME_CN750:
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
reg_value =
K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres);
/* Horizontal scaling enabled */
@@ -479,6 +486,7 @@ static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres,
case UNICHROME_CN750:
case UNICHROME_VX800:
case UNICHROME_VX855:
+ case UNICHROME_VX900:
reg_value =
K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres);
/* Vertical scaling enabled */
@@ -655,9 +663,6 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
pll_D_N = viafb_get_clk_value(panel_crt_table[0].clk);
DEBUG_MSG(KERN_INFO "PLL=0x%x", pll_D_N);
viafb_set_vclock(pll_D_N, set_iga);
-
- viafb_set_output_path(DEVICE_LCD, set_iga,
- plvds_chip_info->output_interface);
lcd_patch_skew(plvds_setting_info, plvds_chip_info);
/* If K8M800, enable LCD Prefetch Mode. */
@@ -700,9 +705,6 @@ static void integrated_lvds_disable(struct lvds_setting_information
viafb_write_reg_mask(CR91, VIACR, 0xC0, BIT6 + BIT7);
}
- /* Turn DFP High/Low Pad off. */
- viafb_write_reg_mask(SR2A, VIASR, 0, BIT0 + BIT1 + BIT2 + BIT3);
-
/* Power off LVDS channel. */
switch (plvds_chip_info->output_interface) {
case INTERFACE_LVDS0:
@@ -758,9 +760,6 @@ static void integrated_lvds_enable(struct lvds_setting_information
break;
}
- /* Turn DFP High/Low pad on. */
- viafb_write_reg_mask(SR2A, VIASR, 0x0F, BIT0 + BIT1 + BIT2 + BIT3);
-
/* Power on LVDS channel. */
switch (plvds_chip_info->output_interface) {
case INTERFACE_LVDS0:
@@ -809,29 +808,48 @@ void viafb_lcd_disable(void)
viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info);
} else {
- /* DFP-HL pad off */
- viafb_write_reg_mask(SR2A, VIASR, 0x00, 0x0F);
/* Backlight off */
viafb_write_reg_mask(SR3D, VIASR, 0x00, 0x20);
/* 24 bit DI data paht off */
viafb_write_reg_mask(CR91, VIACR, 0x80, 0x80);
- /* Simultaneout disabled */
- viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08);
}
/* Disable expansion bit */
viafb_write_reg_mask(CR79, VIACR, 0x00, 0x01);
- /* CRT path set to IGA1 */
- viafb_write_reg_mask(SR16, VIASR, 0x00, 0x40);
/* Simultaneout disabled */
viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08);
- /* IGA2 path disabled */
- viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x80);
+}
+static void set_lcd_output_path(int set_iga, int output_interface)
+{
+ switch (output_interface) {
+ case INTERFACE_DFP:
+ if ((UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name)
+ || (UNICHROME_P4M890 ==
+ viaparinfo->chip_info->gfx_chip_name))
+ viafb_write_reg_mask(CR97, VIACR, 0x84,
+ BIT7 + BIT2 + BIT1 + BIT0);
+ case INTERFACE_DVP0:
+ case INTERFACE_DVP1:
+ case INTERFACE_DFP_HIGH:
+ case INTERFACE_DFP_LOW:
+ if (set_iga == IGA2)
+ viafb_write_reg(CR91, VIACR, 0x00);
+ break;
+ }
}
void viafb_lcd_enable(void)
{
+ viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3);
+ viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
+ set_lcd_output_path(viaparinfo->lvds_setting_info->iga_path,
+ viaparinfo->chip_info->lvds_chip_info.output_interface);
+ if (viafb_LCD2_ON)
+ set_lcd_output_path(viaparinfo->lvds_setting_info2->iga_path,
+ viaparinfo->chip_info->
+ lvds_chip_info2.output_interface);
+
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
/* DI1 pad on */
viafb_write_reg_mask(SR1E, VIASR, 0x30, 0x30);
@@ -855,39 +873,13 @@ void viafb_lcd_enable(void)
viafb_enable_lvds_vt1636(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info);
} else {
- /* DFP-HL pad on */
- viafb_write_reg_mask(SR2A, VIASR, 0x0F, 0x0F);
/* Backlight on */
viafb_write_reg_mask(SR3D, VIASR, 0x20, 0x20);
/* 24 bit DI data paht on */
viafb_write_reg_mask(CR91, VIACR, 0x00, 0x80);
-
- /* Set data source selection bit by iga path */
- if (viaparinfo->lvds_setting_info->iga_path == IGA1) {
- /* DFP-H set to IGA1 */
- viafb_write_reg_mask(CR97, VIACR, 0x00, 0x10);
- /* DFP-L set to IGA1 */
- viafb_write_reg_mask(CR99, VIACR, 0x00, 0x10);
- } else {
- /* DFP-H set to IGA2 */
- viafb_write_reg_mask(CR97, VIACR, 0x10, 0x10);
- /* DFP-L set to IGA2 */
- viafb_write_reg_mask(CR99, VIACR, 0x10, 0x10);
- }
/* LCD enabled */
viafb_write_reg_mask(CR6A, VIACR, 0x48, 0x48);
}
-
- if (viaparinfo->lvds_setting_info->iga_path == IGA1) {
- /* CRT path set to IGA2 */
- viafb_write_reg_mask(SR16, VIASR, 0x40, 0x40);
- /* IGA2 path disabled */
- viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x80);
- /* IGA2 path enabled */
- } else { /* IGA2 */
- viafb_write_reg_mask(CR6A, VIACR, 0x80, 0x80);
- }
-
}
static void lcd_powersequence_off(void)
@@ -993,7 +985,7 @@ static void check_diport_of_integrated_lvds(
plvds_chip_info->output_interface);
}
-void viafb_init_lvds_output_interface(struct lvds_chip_information
+void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info)
diff --git a/drivers/video/via/lcd.h b/drivers/video/via/lcd.h
index b348efc360b8..c7909fe29550 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/via/lcd.h
@@ -71,15 +71,15 @@ void viafb_enable_lvds_vt1636(struct lvds_setting_information
struct lvds_chip_information *plvds_chip_info);
void viafb_lcd_disable(void);
void viafb_lcd_enable(void);
-void viafb_init_lcd_size(void);
-void viafb_init_lvds_output_interface(struct lvds_chip_information
+void __devinit viafb_init_lcd_size(void);
+void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
*plvds_setting_info);
void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information *plvds_chip_info);
-int viafb_lvds_trasmitter_identify(void);
+int __devinit viafb_lvds_trasmitter_identify(void);
void viafb_init_lvds_output_interface(struct lvds_chip_information
*plvds_chip_info,
struct lvds_setting_information
diff --git a/drivers/video/via/lcdtbl.h b/drivers/video/via/lcdtbl.h
deleted file mode 100644
index 6f3dd800be59..000000000000
--- a/drivers/video/via/lcdtbl.h
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2, or (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
- * the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE.See the GNU General Public License
- * for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#ifndef __LCDTBL_H__
-#define __LCDTBL_H__
-
-#include "share.h"
-
-/* CLE266 Software Power Sequence */
-/* {Mask}, {Data}, {Delay} */
-int PowerSequenceOn[3][3] =
- { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06}, {0x19, 0x1FE, 0x01} };
-int PowerSequenceOff[3][3] =
- { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00}, {0xD2, 0x19, 0x01} };
-
-/* ++++++ P880 ++++++ */
-/* Panel 1600x1200 */
-struct io_reg P880_LCD_RES_6X4_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x73}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x73}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x5A}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x5E},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xD6}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR44, 0xFF, 0x7D}, {VIASR, SR45, 0xFF, 0x8C},
- {VIASR, SR46, 0xFF, 0x02}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_6X4_16X12 ARRAY_SIZE(P880_LCD_RES_6X4_16X12)
-
-struct io_reg P880_LCD_RES_7X4_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x67}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x67}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x74}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x78},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xF5}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR44, 0xFF, 0x78}, {VIASR, SR45, 0xFF, 0x8C},
- {VIASR, SR46, 0xFF, 0x01}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_7X4_16X12 ARRAY_SIZE(P880_LCD_RES_7X4_16X12)
-
-struct io_reg P880_LCD_RES_8X6_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x83},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xE1}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR44, 0xFF, 0x6D}, {VIASR, SR45, 0xFF, 0x88},
- {VIASR, SR46, 0xFF, 0x03}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_8X6_16X12 ARRAY_SIZE(P880_LCD_RES_8X6_16X12)
-
-struct io_reg P880_LCD_RES_10X7_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xAB}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xAF},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xF0}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR44, 0xFF, 0x92}, {VIASR, SR45, 0xFF, 0x88},
- {VIASR, SR46, 0xFF, 0x03}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_10X7_16X12 ARRAY_SIZE(P880_LCD_RES_10X7_16X12)
-
-struct io_reg P880_LCD_RES_12X10_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x7D}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x7D}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xD0}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xD4},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xFA}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR44, 0xFF, 0xF6}, {VIASR, SR45, 0xFF, 0x88},
- {VIASR, SR46, 0xFF, 0x05}
-
-};
-
-#define NUM_TOTAL_P880_LCD_RES_12X10_16X12 ARRAY_SIZE(P880_LCD_RES_12X10_16X12)
-
-/* Panel 1400x1050 */
-struct io_reg P880_LCD_RES_6X4_14X10[] = {
- /* 640x480 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x63},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xB4}, {VIACR, CR67, 0x03, 0x00},
- /* VCLK */
- {VIASR, SR44, 0xFF, 0xC6}, {VIASR, SR45, 0xFF, 0x8C},
- {VIASR, SR46, 0xFF, 0x05}
-};
-
-#define NUM_TOTAL_P880_LCD_RES_6X4_14X10 ARRAY_SIZE(P880_LCD_RES_6X4_14X10)
-
-struct io_reg P880_LCD_RES_8X6_14X10[] = {
- /* 800x600 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x83},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x00},
- /* VCLK */
- {VIASR, SR44, 0xFF, 0x06}, {VIASR, SR45, 0xFF, 0x8D},
- {VIASR, SR46, 0xFF, 0x05}
-};
-
-#define NUM_TOTAL_P880_LCD_RES_8X6_14X10 ARRAY_SIZE(P880_LCD_RES_8X6_14X10)
-
-/* ++++++ K400 ++++++ */
-/* Panel 1600x1200 */
-struct io_reg K400_LCD_RES_6X4_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x73}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x73}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x5A}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x5E},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xDA}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0xC4}, {VIASR, SR47, 0xFF, 0x7F}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_16X12 ARRAY_SIZE(K400_LCD_RES_6X4_16X12)
-
-struct io_reg K400_LCD_RES_7X4_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x67}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x67}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x74}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x78},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xF5}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x46}, {VIASR, SR47, 0xFF, 0x3D}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_16X12 ARRAY_SIZE(K400_LCD_RES_7X4_16X12)
-
-struct io_reg K400_LCD_RES_8X6_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x83},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xE1}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x85}, {VIASR, SR47, 0xFF, 0x6F}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_16X12 ARRAY_SIZE(K400_LCD_RES_8X6_16X12)
-
-struct io_reg K400_LCD_RES_10X7_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x65}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x65}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xAB}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xAF},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xF0}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x45}, {VIASR, SR47, 0xFF, 0x4A}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_16X12 ARRAY_SIZE(K400_LCD_RES_10X7_16X12)
-
-struct io_reg K400_LCD_RES_12X10_16X12[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x7D}, {VIACR, CR55, 0x0F, 0x08},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x7D}, {VIACR, CR54, 0x38, 0x00},
- {VIACR, CR5D, 0x40, 0x40},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xD0}, {VIACR, CR71, 0x08, 0x00},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xD4},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xFA}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x47}, {VIASR, SR47, 0xFF, 0x7C}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_12X10_16X12 ARRAY_SIZE(K400_LCD_RES_12X10_16X12)
-
-/* Panel 1400x1050 */
-struct io_reg K400_LCD_RES_6X4_14X10[] = {
- /* 640x400 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x63},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xB4}, {VIACR, CR67, 0x03, 0x00},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x19}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_14X10 ARRAY_SIZE(K400_LCD_RES_6X4_14X10)
-
-struct io_reg K400_LCD_RES_8X6_14X10[] = {
- /* 800x600 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x83},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x00},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x21}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_14X10 ARRAY_SIZE(K400_LCD_RES_8X6_14X10)
-
-struct io_reg K400_LCD_RES_10X7_14X10[] = {
- /* 1024x768 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xA3}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xA7},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xC3}, {VIACR, CR67, 0x03, 0x04},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x1E}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_14X10 ARRAY_SIZE(K400_LCD_RES_10X7_14X10)
-
-struct io_reg K400_LCD_RES_12X10_14X10[] = {
- /* 1280x768, 1280x960, 1280x1024 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x97}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x97}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xCE}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xD2},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xC9}, {VIACR, CR67, 0x03, 0x04},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0x79}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_12X10_14X10 ARRAY_SIZE(K400_LCD_RES_12X10_14X10)
-
-/* ++++++ K400 ++++++ */
-/* Panel 1366x768 */
-struct io_reg K400_LCD_RES_6X4_1366X7[] = {
- /* 640x400 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x47}, {VIACR, CR55, 0x0F, 0x35},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x47}, {VIACR, CR54, 0x38, 0x2B},
- {VIACR, CR5D, 0x40, 0x13},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x60}, {VIACR, CR71, 0x08, 0x23},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x64},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x87}, {VIASR, SR47, 0xFF, 0x4C}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_1366X7 ARRAY_SIZE(K400_LCD_RES_6X4_1366X7)
-
-struct io_reg K400_LCD_RES_7X4_1366X7[] = {
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x3B}, {VIACR, CR55, 0x0F, 0x35},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x3B}, {VIACR, CR54, 0x38, 0x2B},
- {VIACR, CR5D, 0x40, 0x13},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x71}, {VIACR, CR71, 0x08, 0x23},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x75},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x96}, {VIACR, CR67, 0x03, 0x00},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x10}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_1366X7 ARRAY_SIZE(K400_LCD_RES_7X4_1366X7)
-
-struct io_reg K400_LCD_RES_8X6_1366X7[] = {
- /* 800x600 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x37}, {VIACR, CR55, 0x0F, 0x35},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x37}, {VIACR, CR54, 0x38, 0x2B},
- {VIACR, CR5D, 0x40, 0x13},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7E}, {VIACR, CR71, 0x08, 0x23},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x82},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0xB9}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_1366X7 ARRAY_SIZE(K400_LCD_RES_8X6_1366X7)
-
-struct io_reg K400_LCD_RES_10X7_1366X7[] = {
- /* 1024x768 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xA3}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xA7},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xC3}, {VIACR, CR67, 0x03, 0x04},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x1E}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_1366X7 ARRAY_SIZE(K400_LCD_RES_10X7_1366X7)
-
-struct io_reg K400_LCD_RES_12X10_1366X7[] = {
- /* 1280x768, 1280x960, 1280x1024 */
- /* IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x97}, {VIACR, CR55, 0x0F, 0x56},
- /* IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x97}, {VIACR, CR54, 0x38, 0x75},
- {VIACR, CR5D, 0x40, 0x24},
- /* IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xCE}, {VIACR, CR71, 0x08, 0x44},
- /* IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xD2},
- /* IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xC9}, {VIACR, CR67, 0x03, 0x04},
- /* VCLK */
- {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0x79}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_12X10_1366X7\
- ARRAY_SIZE(K400_LCD_RES_12X10_1366X7)
-
-/* ++++++ K400 ++++++ */
-/* Panel 1280x1024 */
-struct io_reg K400_LCD_RES_6X4_12X10[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
- {VIACR, CR5D, 0x40, 0x1C},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x34},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x63},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xAA}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x19}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_12X10 ARRAY_SIZE(K400_LCD_RES_6X4_12X10)
-
-struct io_reg K400_LCD_RES_7X4_12X10[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
- {VIACR, CR5D, 0x40, 0x1C},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x68}, {VIACR, CR71, 0x08, 0x34},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x6C},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xA8}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x87}, {VIASR, SR47, 0xFF, 0xED}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_12X10 ARRAY_SIZE(K400_LCD_RES_7X4_12X10)
-
-struct io_reg K400_LCD_RES_8X6_12X10[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
- {VIACR, CR5D, 0x40, 0x1C},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x34},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x83},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x07}, {VIASR, SR47, 0xFF, 0x21}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_12X10 ARRAY_SIZE(K400_LCD_RES_8X6_12X10)
-
-struct io_reg K400_LCD_RES_10X7_12X10[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x9D}, {VIACR, CR55, 0x0F, 0x46},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x9D}, {VIACR, CR54, 0x38, 0x74},
- {VIACR, CR5D, 0x40, 0x1C},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0xA3}, {VIACR, CR71, 0x08, 0x34},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0xA7},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0xBE}, {VIACR, CR67, 0x03, 0x04},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x1E}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_10X7_12X10 ARRAY_SIZE(K400_LCD_RES_10X7_12X10)
-
-/* ++++++ K400 ++++++ */
-/* Panel 1024x768 */
-struct io_reg K400_LCD_RES_6X4_10X7[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x47}, {VIACR, CR55, 0x0F, 0x35},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x47}, {VIACR, CR54, 0x38, 0x2B},
- {VIACR, CR5D, 0x40, 0x13},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x60}, {VIACR, CR71, 0x08, 0x23},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x64},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x87}, {VIASR, SR47, 0xFF, 0x4C}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_10X7 ARRAY_SIZE(K400_LCD_RES_6X4_10X7)
-
-struct io_reg K400_LCD_RES_7X4_10X7[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x3B}, {VIACR, CR55, 0x0F, 0x35},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x3B}, {VIACR, CR54, 0x38, 0x2B},
- {VIACR, CR5D, 0x40, 0x13},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x71}, {VIACR, CR71, 0x08, 0x23},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x75},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x96}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x05}, {VIASR, SR47, 0xFF, 0x10}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_10X7 ARRAY_SIZE(K400_LCD_RES_7X4_10X7)
-
-struct io_reg K400_LCD_RES_8X6_10X7[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x37}, {VIACR, CR55, 0x0F, 0x35},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x37}, {VIACR, CR54, 0x38, 0x2B},
- {VIACR, CR5D, 0x40, 0x13},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7E}, {VIACR, CR71, 0x08, 0x23},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x82},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x8C}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x84}, {VIASR, SR47, 0xFF, 0xB9}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_8X6_10X7 ARRAY_SIZE(K400_LCD_RES_8X6_10X7)
-
-/* ++++++ K400 ++++++ */
-/* Panel 800x600 */
-struct io_reg K400_LCD_RES_6X4_8X6[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x1A}, {VIACR, CR55, 0x0F, 0x34},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x1A}, {VIACR, CR54, 0x38, 0xE3},
- {VIACR, CR5D, 0x40, 0x12},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x5F}, {VIACR, CR71, 0x08, 0x22},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x63},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x6E}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0x86}, {VIASR, SR47, 0xFF, 0xB3}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_6X4_8X6 ARRAY_SIZE(K400_LCD_RES_6X4_8X6)
-
-struct io_reg K400_LCD_RES_7X4_8X6[] = {
- /*IGA2 Horizontal Total */
- {VIACR, CR50, 0xFF, 0x1F}, {VIACR, CR55, 0x0F, 0x34},
- /*IGA2 Horizontal Blank End */
- {VIACR, CR53, 0xFF, 0x1F}, {VIACR, CR54, 0x38, 0xE3},
- {VIACR, CR5D, 0x40, 0x12},
- /*IGA2 Horizontal Total Shadow */
- {VIACR, CR6D, 0xFF, 0x7F}, {VIACR, CR71, 0x08, 0x22},
- /*IGA2 Horizontal Blank End Shadow */
- {VIACR, CR6E, 0xFF, 0x83},
- /*IGA2 Offset */
- {VIACR, CR66, 0xFF, 0x78}, {VIACR, CR67, 0x03, 0x00},
- /*VCLK*/ {VIASR, SR46, 0xFF, 0xC4}, {VIASR, SR47, 0xFF, 0x59}
-};
-
-#define NUM_TOTAL_K400_LCD_RES_7X4_8X6 ARRAY_SIZE(K400_LCD_RES_7X4_8X6)
-
-#endif /* __LCDTBL_H__ */
diff --git a/drivers/video/via/tbl1636.c b/drivers/video/via/tbl1636.c
deleted file mode 100644
index 2d8453429d4a..000000000000
--- a/drivers/video/via/tbl1636.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2, or (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
- * the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE.See the GNU General Public License
- * for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include "global.h"
-struct IODATA COMMON_INIT_TBL_VT1636[] = {
-/* Index, Mask, Value */
- /* Set panel power sequence timing */
- {0x10, 0xC0, 0x00},
- /* T1: VDD on - Data on. Each increment is 1 ms. (50ms = 031h) */
- {0x0B, 0xFF, 0x40},
- /* T2: Data on - Backlight on. Each increment is 2 ms. (210ms = 068h) */
- {0x0C, 0xFF, 0x31},
- /* T3: Backlight off -Data off. Each increment is 2 ms. (210ms = 068h)*/
- {0x0D, 0xFF, 0x31},
- /* T4: Data off - VDD off. Each increment is 1 ms. (50ms = 031h) */
- {0x0E, 0xFF, 0x68},
- /* T5: VDD off - VDD on. Each increment is 100 ms. (500ms = 04h) */
- {0x0F, 0xFF, 0x68},
- /* LVDS output power up */
- {0x09, 0xA0, 0xA0},
- /* turn on back light */
- {0x10, 0x33, 0x13}
-};
-
-struct IODATA DUAL_CHANNEL_ENABLE_TBL_VT1636[] = {
-/* Index, Mask, Value */
- {0x08, 0xF0, 0xE0} /* Input Data Mode Select */
-};
-
-struct IODATA SINGLE_CHANNEL_ENABLE_TBL_VT1636[] = {
-/* Index, Mask, Value */
- {0x08, 0xF0, 0x00} /* Input Data Mode Select */
-};
-
-struct IODATA DITHERING_ENABLE_TBL_VT1636[] = {
-/* Index, Mask, Value */
- {0x0A, 0x70, 0x50}
-};
-
-struct IODATA DITHERING_DISABLE_TBL_VT1636[] = {
-/* Index, Mask, Value */
- {0x0A, 0x70, 0x00}
-};
-
-struct IODATA VDD_ON_TBL_VT1636[] = {
-/* Index, Mask, Value */
- {0x10, 0x20, 0x20}
-};
-
-struct IODATA VDD_OFF_TBL_VT1636[] = {
-/* Index, Mask, Value */
- {0x10, 0x20, 0x00}
-};
diff --git a/drivers/video/via/tbl1636.h b/drivers/video/via/tbl1636.h
deleted file mode 100644
index d906055f1511..000000000000
--- a/drivers/video/via/tbl1636.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2, or (at your option) any later version.
-
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
- * the implied warranty of MERCHANTABILITY or FITNESS FOR
- * A PARTICULAR PURPOSE.See the GNU General Public License
- * for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef _TBL1636_H_
-#define _TBL1636_H_
-#include "hw.h"
-
-extern struct IODATA COMMON_INIT_TBL_VT1636[8];
-extern struct IODATA DUAL_CHANNEL_ENABLE_TBL_VT1636[1];
-extern struct IODATA SINGLE_CHANNEL_ENABLE_TBL_VT1636[1];
-extern struct IODATA DITHERING_ENABLE_TBL_VT1636[1];
-extern struct IODATA DITHERING_DISABLE_TBL_VT1636[1];
-extern struct IODATA VDD_ON_TBL_VT1636[1];
-extern struct IODATA VDD_OFF_TBL_VT1636[1];
-
-#endif /* _VIA_TBL1636_H_ */
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c
index 66f403033111..a3aa91709503 100644
--- a/drivers/video/via/via-core.c
+++ b/drivers/video/via/via-core.c
@@ -20,7 +20,7 @@
* The default port config.
*/
static struct via_port_cfg adap_configs[] = {
- [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_OFF, VIASR, 0x26 },
+ [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 },
[VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
[VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
[VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
@@ -95,6 +95,13 @@ EXPORT_SYMBOL_GPL(viafb_irq_disable);
/* ---------------------------------------------------------------------- */
/*
+ * Currently, the camera driver is the only user of the DMA code, so we
+ * only compile it in if the camera driver is being built. Chances are,
+ * most viafb systems will not need to have this extra code for a while.
+ * As soon as another user comes long, the ifdef can be removed.
+ */
+#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
+/*
* Access to the DMA engine. This currently provides what the camera
* driver needs (i.e. outgoing only) but is easily expandable if need
* be.
@@ -322,7 +329,7 @@ int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
return 0;
}
EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
-
+#endif /* CONFIG_VIDEO_VIA_CAMERA */
/* ---------------------------------------------------------------------- */
/*
@@ -333,7 +340,7 @@ EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
static u16 via_function3[] = {
CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
- P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3,
+ P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3,
};
/* Get the BIOS-configured framebuffer size from PCI configuration space
@@ -370,6 +377,7 @@ static int viafb_get_fb_size_from_pci(int chip_type)
case P4M900_FUNCTION3:
case VX800_FUNCTION3:
case VX855_FUNCTION3:
+ case VX900_FUNCTION3:
/*case CN750_FUNCTION3: */
offset = 0xA0;
break;
@@ -474,7 +482,10 @@ static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
* Eventually we want to move away from mapping this
* entire region.
*/
- vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
+ if (vdev->chip_type == UNICHROME_VX900)
+ vdev->fbmem_start = pci_resource_start(vdev->pdev, 2);
+ else
+ vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
if (ret < 0)
goto out_unmap;
@@ -507,7 +518,12 @@ static struct viafb_subdev_info {
},
{
.name = "viafb-i2c",
- }
+ },
+#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
+ {
+ .name = "viafb-camera",
+ },
+#endif
};
#define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
@@ -635,6 +651,8 @@ static struct pci_device_id via_pci_table[] __devinitdata = {
.driver_data = UNICHROME_VX800 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
.driver_data = UNICHROME_VX855 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX900_DID),
+ .driver_data = UNICHROME_VX900 },
{ }
};
MODULE_DEVICE_TABLE(pci, via_pci_table);
@@ -644,6 +662,10 @@ static struct pci_driver via_driver = {
.id_table = via_pci_table,
.probe = via_pci_probe,
.remove = __devexit_p(via_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = viafb_suspend,
+ .resume = viafb_resume,
+#endif
};
static int __init via_core_init(void)
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
index da9e4ca94b17..3844b558b7bd 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/via/via_i2c.c
@@ -114,6 +114,7 @@ static void via_i2c_setsda(void *data, int state)
int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
{
+ int ret;
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
@@ -126,11 +127,18 @@ int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = 1;
msgs[0].buf = mm1; msgs[1].buf = pdata;
- return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
}
int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
{
+ int ret;
u8 msg[2] = { index, data };
struct i2c_msg msgs;
@@ -140,11 +148,18 @@ int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
msgs.addr = slave_addr / 2;
msgs.len = 2;
msgs.buf = msg;
- return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
+ ret = i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
+ if (ret == 1)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
}
int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len)
{
+ int ret;
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
@@ -156,7 +171,13 @@ int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = buff_len;
msgs[0].buf = mm1; msgs[1].buf = buff;
- return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
}
/*
@@ -181,8 +202,8 @@ static int create_i2c_bus(struct i2c_adapter *adapter,
algo->setscl = via_i2c_setscl;
algo->getsda = via_i2c_getsda;
algo->getscl = via_i2c_getscl;
- algo->udelay = 40;
- algo->timeout = 20;
+ algo->udelay = 10;
+ algo->timeout = 2;
algo->data = adap_cfg;
sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index bdd0e4130f4e..d298cfccd6fc 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -56,6 +56,32 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
static struct fb_ops viafb_ops;
+/* supported output devices on each IGP
+ * only CX700, VX800, VX855, VX900 were documented
+ * VIA_CRT should be everywhere
+ * VIA_6C can be onle pre-CX700 (probably only on CLE266) as 6C is used for PLL
+ * source selection on CX700 and later
+ * K400 seems to support VIA_96, VIA_DVP1, VIA_LVDS{1,2} as in viamode.c
+ */
+static const u32 supported_odev_map[] = {
+ [UNICHROME_CLE266] = VIA_CRT | VIA_LDVP0 | VIA_LDVP1,
+ [UNICHROME_K400] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+ | VIA_LVDS2,
+ [UNICHROME_K800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+ | VIA_LVDS2,
+ [UNICHROME_PM800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+ | VIA_LVDS2,
+ [UNICHROME_CN700] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1
+ | VIA_LVDS2,
+ [UNICHROME_CX700] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+ [UNICHROME_CN750] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+ [UNICHROME_K8M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+ [UNICHROME_P4M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+ [UNICHROME_P4M900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+ [UNICHROME_VX800] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+ [UNICHROME_VX855] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+ [UNICHROME_VX900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2,
+};
static void viafb_fill_var_color_info(struct fb_var_screeninfo *var, u8 depth)
{
@@ -332,22 +358,22 @@ static int viafb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_UNBLANK:
/* Screen: On, HSync: On, VSync: On */
/* control CRT monitor power management */
- viafb_write_reg_mask(CR36, VIACR, 0x00, BIT4 + BIT5);
+ via_set_state(VIA_CRT, VIA_STATE_ON);
break;
case FB_BLANK_HSYNC_SUSPEND:
/* Screen: Off, HSync: Off, VSync: On */
/* control CRT monitor power management */
- viafb_write_reg_mask(CR36, VIACR, 0x10, BIT4 + BIT5);
+ via_set_state(VIA_CRT, VIA_STATE_STANDBY);
break;
case FB_BLANK_VSYNC_SUSPEND:
/* Screen: Off, HSync: On, VSync: Off */
/* control CRT monitor power management */
- viafb_write_reg_mask(CR36, VIACR, 0x20, BIT4 + BIT5);
+ via_set_state(VIA_CRT, VIA_STATE_SUSPEND);
break;
case FB_BLANK_POWERDOWN:
/* Screen: Off, HSync: Off, VSync: Off */
/* control CRT monitor power management */
- viafb_write_reg_mask(CR36, VIACR, 0x30, BIT4 + BIT5);
+ via_set_state(VIA_CRT, VIA_STATE_OFF);
break;
}
@@ -457,7 +483,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
if (copy_from_user(&gpu32, argp, sizeof(gpu32)))
return -EFAULT;
if (gpu32 & CRT_Device)
- viafb_crt_enable();
+ via_set_state(VIA_CRT, VIA_STATE_ON);
if (gpu32 & DVI_Device)
viafb_dvi_enable();
if (gpu32 & LCD_Device)
@@ -467,7 +493,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
if (copy_from_user(&gpu32, argp, sizeof(gpu32)))
return -EFAULT;
if (gpu32 & CRT_Device)
- viafb_crt_disable();
+ via_set_state(VIA_CRT, VIA_STATE_OFF);
if (gpu32 & DVI_Device)
viafb_dvi_disable();
if (gpu32 & LCD_Device)
@@ -787,7 +813,8 @@ static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
bg_color = cursor->image.bg_color;
if (chip_name == UNICHROME_CX700 ||
chip_name == UNICHROME_VX800 ||
- chip_name == UNICHROME_VX855) {
+ chip_name == UNICHROME_VX855 ||
+ chip_name == UNICHROME_VX900) {
fg_color =
((info->cmap.red[fg_color] & 0xFFC0) << 14) |
((info->cmap.green[fg_color] & 0xFFC0) << 4) |
@@ -961,7 +988,7 @@ static void retrieve_device_setting(struct viafb_ioctl_setting
setting_info->lcd_attributes.lcd_mode = viafb_lcd_mode;
}
-static int parse_active_dev(void)
+static int __init parse_active_dev(void)
{
viafb_CRT_ON = STATE_OFF;
viafb_DVI_ON = STATE_OFF;
@@ -1031,7 +1058,7 @@ static int parse_active_dev(void)
return 0;
}
-static int parse_port(char *opt_str, int *output_interface)
+static int __devinit parse_port(char *opt_str, int *output_interface)
{
if (!strncmp(opt_str, "DVP0", 4))
*output_interface = INTERFACE_DVP0;
@@ -1048,7 +1075,7 @@ static int parse_port(char *opt_str, int *output_interface)
return 0;
}
-static void parse_lcd_port(void)
+static void __devinit parse_lcd_port(void)
{
parse_port(viafb_lcd_port, &viaparinfo->chip_info->lvds_chip_info.
output_interface);
@@ -1061,7 +1088,7 @@ static void parse_lcd_port(void)
output_interface);
}
-static void parse_dvi_port(void)
+static void __devinit parse_dvi_port(void)
{
parse_port(viafb_dvi_port, &viaparinfo->chip_info->tmds_chip_info.
output_interface);
@@ -1431,38 +1458,196 @@ static const struct file_operations viafb_vt1636_proc_fops = {
.write = viafb_vt1636_proc_write,
};
-static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+
+static int viafb_sup_odev_proc_show(struct seq_file *m, void *v)
{
- *viafb_entry = proc_mkdir("viafb", NULL);
- if (*viafb_entry) {
- proc_create("dvp0", 0, *viafb_entry, &viafb_dvp0_proc_fops);
- proc_create("dvp1", 0, *viafb_entry, &viafb_dvp1_proc_fops);
- proc_create("dfph", 0, *viafb_entry, &viafb_dfph_proc_fops);
- proc_create("dfpl", 0, *viafb_entry, &viafb_dfpl_proc_fops);
- if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_name || VT1636_LVDS ==
- viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) {
- proc_create("vt1636", 0, *viafb_entry, &viafb_vt1636_proc_fops);
- }
+ via_odev_to_seq(m, supported_odev_map[
+ viaparinfo->shared->chip_info.gfx_chip_name]);
+ return 0;
+}
+
+static int viafb_sup_odev_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_sup_odev_proc_show, NULL);
+}
+
+static const struct file_operations viafb_sup_odev_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_sup_odev_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t odev_update(const char __user *buffer, size_t count, u32 *odev)
+{
+ char buf[64], *ptr = buf;
+ u32 devices;
+ bool add, sub;
+
+ if (count < 1 || count > 63)
+ return -EINVAL;
+ if (copy_from_user(&buf[0], buffer, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ add = buf[0] == '+';
+ sub = buf[0] == '-';
+ if (add || sub)
+ ptr++;
+ devices = via_parse_odev(ptr, &ptr);
+ if (*ptr == '\n')
+ ptr++;
+ if (*ptr != 0)
+ return -EINVAL;
+ if (add)
+ *odev |= devices;
+ else if (sub)
+ *odev &= ~devices;
+ else
+ *odev = devices;
+ return count;
+}
+
+static int viafb_iga1_odev_proc_show(struct seq_file *m, void *v)
+{
+ via_odev_to_seq(m, viaparinfo->shared->iga1_devices);
+ return 0;
+}
+
+static int viafb_iga1_odev_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_iga1_odev_proc_show, NULL);
+}
+
+static ssize_t viafb_iga1_odev_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ u32 dev_on, dev_off, dev_old, dev_new;
+ ssize_t res;
+
+ dev_old = dev_new = viaparinfo->shared->iga1_devices;
+ res = odev_update(buffer, count, &dev_new);
+ if (res != count)
+ return res;
+ dev_off = dev_old & ~dev_new;
+ dev_on = dev_new & ~dev_old;
+ viaparinfo->shared->iga1_devices = dev_new;
+ viaparinfo->shared->iga2_devices &= ~dev_new;
+ via_set_state(dev_off, VIA_STATE_OFF);
+ via_set_source(dev_new, IGA1);
+ via_set_state(dev_on, VIA_STATE_ON);
+ return res;
+}
+
+static const struct file_operations viafb_iga1_odev_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_iga1_odev_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = viafb_iga1_odev_proc_write,
+};
+
+static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v)
+{
+ via_odev_to_seq(m, viaparinfo->shared->iga2_devices);
+ return 0;
+}
+static int viafb_iga2_odev_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_iga2_odev_proc_show, NULL);
+}
+
+static ssize_t viafb_iga2_odev_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ u32 dev_on, dev_off, dev_old, dev_new;
+ ssize_t res;
+
+ dev_old = dev_new = viaparinfo->shared->iga2_devices;
+ res = odev_update(buffer, count, &dev_new);
+ if (res != count)
+ return res;
+ dev_off = dev_old & ~dev_new;
+ dev_on = dev_new & ~dev_old;
+ viaparinfo->shared->iga2_devices = dev_new;
+ viaparinfo->shared->iga1_devices &= ~dev_new;
+ via_set_state(dev_off, VIA_STATE_OFF);
+ via_set_source(dev_new, IGA2);
+ via_set_state(dev_on, VIA_STATE_ON);
+ return res;
+}
+
+static const struct file_operations viafb_iga2_odev_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_iga2_odev_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = viafb_iga2_odev_proc_write,
+};
+
+#define IS_VT1636(lvds_chip) ((lvds_chip).lvds_chip_name == VT1636_LVDS)
+static void viafb_init_proc(struct viafb_shared *shared)
+{
+ struct proc_dir_entry *iga1_entry, *iga2_entry,
+ *viafb_entry = proc_mkdir("viafb", NULL);
+
+ shared->proc_entry = viafb_entry;
+ if (viafb_entry) {
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+ proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops);
+ proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops);
+ proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops);
+ proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops);
+ if (IS_VT1636(shared->chip_info.lvds_chip_info)
+ || IS_VT1636(shared->chip_info.lvds_chip_info2))
+ proc_create("vt1636", 0, viafb_entry,
+ &viafb_vt1636_proc_fops);
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+
+ proc_create("supported_output_devices", 0, viafb_entry,
+ &viafb_sup_odev_proc_fops);
+ iga1_entry = proc_mkdir("iga1", viafb_entry);
+ shared->iga1_proc_entry = iga1_entry;
+ proc_create("output_devices", 0, iga1_entry,
+ &viafb_iga1_odev_proc_fops);
+ iga2_entry = proc_mkdir("iga2", viafb_entry);
+ shared->iga2_proc_entry = iga2_entry;
+ proc_create("output_devices", 0, iga2_entry,
+ &viafb_iga2_odev_proc_fops);
}
}
-static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
+static void viafb_remove_proc(struct viafb_shared *shared)
{
- struct chip_information *chip_info = &viaparinfo->shared->chip_info;
+ struct proc_dir_entry *viafb_entry = shared->proc_entry,
+ *iga1_entry = shared->iga1_proc_entry,
+ *iga2_entry = shared->iga2_proc_entry;
+ if (!viafb_entry)
+ return;
+
+ remove_proc_entry("output_devices", iga2_entry);
+ remove_proc_entry("iga2", viafb_entry);
+ remove_proc_entry("output_devices", iga1_entry);
+ remove_proc_entry("iga1", viafb_entry);
+ remove_proc_entry("supported_output_devices", viafb_entry);
+
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
remove_proc_entry("dvp0", viafb_entry);/* parent dir */
remove_proc_entry("dvp1", viafb_entry);
remove_proc_entry("dfph", viafb_entry);
remove_proc_entry("dfpl", viafb_entry);
- if (chip_info->lvds_chip_info.lvds_chip_name == VT1636_LVDS
- || chip_info->lvds_chip_info2.lvds_chip_name == VT1636_LVDS)
+ if (IS_VT1636(shared->chip_info.lvds_chip_info)
+ || IS_VT1636(shared->chip_info.lvds_chip_info2))
remove_proc_entry("vt1636", viafb_entry);
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
remove_proc_entry("viafb", NULL);
}
-
-#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+#undef IS_VT1636
static int parse_mode(const char *str, u32 *xres, u32 *yres)
{
@@ -1486,6 +1671,47 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
}
+#ifdef CONFIG_PM
+int viafb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ if (state.event == PM_EVENT_SUSPEND) {
+ acquire_console_sem();
+ fb_set_suspend(viafbinfo, 1);
+
+ viafb_sync(viafbinfo);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ release_console_sem();
+ }
+
+ return 0;
+}
+
+int viafb_resume(struct pci_dev *pdev)
+{
+ acquire_console_sem();
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev))
+ goto fail;
+ pci_set_master(pdev);
+ if (viaparinfo->shared->vdev->engine_mmio)
+ viafb_reset_engine(viaparinfo);
+ viafb_set_par(viafbinfo);
+ if (viafb_dual_fb)
+ viafb_set_par(viafbinfo1);
+ fb_set_suspend(viafbinfo, 0);
+
+fail:
+ release_console_sem();
+ return 0;
+}
+
+#endif
+
+
int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
{
u32 default_xres, default_yres;
@@ -1544,7 +1770,7 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
viafbinfo->pseudo_palette = pseudo_pal;
- if (viafb_accel && !viafb_init_engine(viafbinfo)) {
+ if (viafb_accel && !viafb_setup_engine(viafbinfo)) {
viafbinfo->flags |= FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
default_var.accel_flags = FB_ACCELF_TEXT;
@@ -1671,9 +1897,7 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
default_var.yres, default_var.bits_per_pixel);
-#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
- viafb_init_proc(&viaparinfo->shared->proc_entry);
-#endif
+ viafb_init_proc(viaparinfo->shared);
viafb_init_dac(IGA2);
return 0;
@@ -1700,9 +1924,7 @@ void __devexit via_fb_pci_remove(struct pci_dev *pdev)
unregister_framebuffer(viafbinfo);
if (viafb_dual_fb)
unregister_framebuffer(viafbinfo1);
-#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
- viafb_remove_proc(viaparinfo->shared->proc_entry);
-#endif
+ viafb_remove_proc(viaparinfo->shared);
framebuffer_release(viafbinfo);
if (viafb_dual_fb)
framebuffer_release(viafbinfo1);
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index 52a35fabba91..4960e3da6645 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -40,7 +40,12 @@
#define VIAFB_NUM_I2C 5
struct viafb_shared {
+ u32 iga1_devices;
+ u32 iga2_devices;
+
struct proc_dir_entry *proc_entry; /*viafb proc entry */
+ struct proc_dir_entry *iga1_proc_entry;
+ struct proc_dir_entry *iga2_proc_entry;
struct viafb_dev *vdev; /* Global dev info */
/* All the information will be needed to set engine */
@@ -103,4 +108,6 @@ void via_fb_pci_remove(struct pci_dev *pdev);
/* Temporary */
int viafb_init(void);
void viafb_exit(void);
+int viafb_suspend(struct pci_dev *pdev, pm_message_t state);
+int viafb_resume(struct pci_dev *pdev);
#endif /* __VIAFBDEV_H__ */
diff --git a/drivers/video/via/vt1636.c b/drivers/video/via/vt1636.c
index d65bf1aee87c..60e4192c2b34 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/via/vt1636.c
@@ -23,6 +23,34 @@
#include <linux/via_i2c.h>
#include "global.h"
+static const struct IODATA common_init_data[] = {
+/* Index, Mask, Value */
+ /* Set panel power sequence timing */
+ {0x10, 0xC0, 0x00},
+ /* T1: VDD on - Data on. Each increment is 1 ms. (50ms = 031h) */
+ {0x0B, 0xFF, 0x40},
+ /* T2: Data on - Backlight on. Each increment is 2 ms. (210ms = 068h) */
+ {0x0C, 0xFF, 0x31},
+ /* T3: Backlight off -Data off. Each increment is 2 ms. (210ms = 068h)*/
+ {0x0D, 0xFF, 0x31},
+ /* T4: Data off - VDD off. Each increment is 1 ms. (50ms = 031h) */
+ {0x0E, 0xFF, 0x68},
+ /* T5: VDD off - VDD on. Each increment is 100 ms. (500ms = 04h) */
+ {0x0F, 0xFF, 0x68},
+ /* LVDS output power up */
+ {0x09, 0xA0, 0xA0},
+ /* turn on back light */
+ {0x10, 0x33, 0x13}
+};
+
+/* Index, Mask, Value */
+static const struct IODATA dual_channel_enable_data = {0x08, 0xF0, 0xE0};
+static const struct IODATA single_channel_enable_data = {0x08, 0xF0, 0x00};
+static const struct IODATA dithering_enable_data = {0x0A, 0x70, 0x50};
+static const struct IODATA dithering_disable_data = {0x0A, 0x70, 0x00};
+static const struct IODATA vdd_on_data = {0x10, 0x20, 0x20};
+static const struct IODATA vdd_off_data = {0x10, 0x20, 0x00};
+
u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info,
u8 index)
@@ -55,108 +83,41 @@ void viafb_init_lvds_vt1636(struct lvds_setting_information
int reg_num, i;
/* Common settings: */
- reg_num = ARRAY_SIZE(COMMON_INIT_TBL_VT1636);
-
- for (i = 0; i < reg_num; i++) {
+ reg_num = ARRAY_SIZE(common_init_data);
+ for (i = 0; i < reg_num; i++)
viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
- plvds_chip_info,
- COMMON_INIT_TBL_VT1636[i]);
- }
+ plvds_chip_info, common_init_data[i]);
/* Input Data Mode Select */
- if (plvds_setting_info->device_lcd_dualedge) {
+ if (plvds_setting_info->device_lcd_dualedge)
viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
- plvds_chip_info,
- DUAL_CHANNEL_ENABLE_TBL_VT1636[0]);
- } else {
+ plvds_chip_info, dual_channel_enable_data);
+ else
viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
- plvds_chip_info,
- SINGLE_CHANNEL_ENABLE_TBL_VT1636[0]);
- }
+ plvds_chip_info, single_channel_enable_data);
- if (plvds_setting_info->LCDDithering) {
+ if (plvds_setting_info->LCDDithering)
viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
- plvds_chip_info,
- DITHERING_ENABLE_TBL_VT1636[0]);
- } else {
+ plvds_chip_info, dithering_enable_data);
+ else
viafb_gpio_i2c_write_mask_lvds(plvds_setting_info,
- plvds_chip_info,
- DITHERING_DISABLE_TBL_VT1636[0]);
- }
+ plvds_chip_info, dithering_disable_data);
}
void viafb_enable_lvds_vt1636(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
-
viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info,
- VDD_ON_TBL_VT1636[0]);
-
- /* Pad on: */
- switch (plvds_chip_info->output_interface) {
- case INTERFACE_DVP0:
- {
- viafb_write_reg_mask(SR1E, VIASR, 0xC0, 0xC0);
- break;
- }
-
- case INTERFACE_DVP1:
- {
- viafb_write_reg_mask(SR1E, VIASR, 0x30, 0x30);
- break;
- }
-
- case INTERFACE_DFP_LOW:
- {
- viafb_write_reg_mask(SR2A, VIASR, 0x03, 0x03);
- break;
- }
-
- case INTERFACE_DFP_HIGH:
- {
- viafb_write_reg_mask(SR2A, VIASR, 0x03, 0x0C);
- break;
- }
-
- }
+ vdd_on_data);
}
void viafb_disable_lvds_vt1636(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
-
viafb_gpio_i2c_write_mask_lvds(plvds_setting_info, plvds_chip_info,
- VDD_OFF_TBL_VT1636[0]);
-
- /* Pad off: */
- switch (plvds_chip_info->output_interface) {
- case INTERFACE_DVP0:
- {
- viafb_write_reg_mask(SR1E, VIASR, 0x00, 0xC0);
- break;
- }
-
- case INTERFACE_DVP1:
- {
- viafb_write_reg_mask(SR1E, VIASR, 0x00, 0x30);
- break;
- }
-
- case INTERFACE_DFP_LOW:
- {
- viafb_write_reg_mask(SR2A, VIASR, 0x00, 0x03);
- break;
- }
-
- case INTERFACE_DFP_HIGH:
- {
- viafb_write_reg_mask(SR2A, VIASR, 0x00, 0x0C);
- break;
- }
-
- }
+ vdd_off_data);
}
bool viafb_lvds_identify_vt1636(u8 i2c_adapter)
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 2839e281cd65..b7b5014ff714 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -517,10 +517,10 @@ static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO);
static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
-static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUGO);
-static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUGO);
+static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUSR | S_IWGRP);
+static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUSR | S_IWGRP);
static struct attribute *w1_master_default_attrs[] = {
&w1_master_attribute_name.attr,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7d24b0d94ed4..347f17edad77 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -261,7 +261,7 @@ static void init_evtchn_cpu_bindings(void)
}
#endif
- memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
+ memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
}
static inline void clear_evtchn(int port)
@@ -377,7 +377,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_edge_irq, "event");
+ handle_fasteoi_irq, "event");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
@@ -435,6 +435,11 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
irq = per_cpu(virq_to_irq, cpu)[virq];
if (irq == -1) {
+ irq = find_unbound_irq();
+
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
+
bind_virq.virq = virq;
bind_virq.vcpu = cpu;
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
@@ -442,11 +447,6 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
BUG();
evtchn = bind_virq.port;
- irq = find_unbound_irq();
-
- set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
- handle_percpu_irq, "virq");
-
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -578,41 +578,75 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
{
struct shared_info *sh = HYPERVISOR_shared_info;
int cpu = smp_processor_id();
+ unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
int i;
unsigned long flags;
static DEFINE_SPINLOCK(debug_lock);
+ struct vcpu_info *v;
spin_lock_irqsave(&debug_lock, flags);
- printk("vcpu %d\n ", cpu);
+ printk("\nvcpu %d\n ", cpu);
for_each_online_cpu(i) {
- struct vcpu_info *v = per_cpu(xen_vcpu, i);
- printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
- (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
- v->evtchn_upcall_pending,
- v->evtchn_pending_sel);
+ int pending;
+ v = per_cpu(xen_vcpu, i);
+ pending = (get_irq_regs() && i == cpu)
+ ? xen_irqs_disabled(get_irq_regs())
+ : v->evtchn_upcall_mask;
+ printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
+ pending, v->evtchn_upcall_pending,
+ (int)(sizeof(v->evtchn_pending_sel)*2),
+ v->evtchn_pending_sel);
+ }
+ v = per_cpu(xen_vcpu, cpu);
+
+ printk("\npending:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
+ printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
+ sh->evtchn_pending[i],
+ i % 8 == 0 ? "\n " : " ");
+ printk("\nglobal mask:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+ printk("%0*lx%s",
+ (int)(sizeof(sh->evtchn_mask[0])*2),
+ sh->evtchn_mask[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nglobally unmasked:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+ printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+ sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nlocal cpu%d mask:\n ", cpu);
+ for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
+ printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
+ cpu_evtchn[i],
+ i % 8 == 0 ? "\n " : " ");
+
+ printk("\nlocally unmasked:\n ");
+ for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
+ unsigned long pending = sh->evtchn_pending[i]
+ & ~sh->evtchn_mask[i]
+ & cpu_evtchn[i];
+ printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+ pending, i % 8 == 0 ? "\n " : " ");
}
- printk("pending:\n ");
- for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
- printk("%08lx%s", sh->evtchn_pending[i],
- i % 8 == 0 ? "\n " : " ");
- printk("\nmasks:\n ");
- for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%08lx%s", sh->evtchn_mask[i],
- i % 8 == 0 ? "\n " : " ");
-
- printk("\nunmasked:\n ");
- for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
- printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
- i % 8 == 0 ? "\n " : " ");
printk("\npending list:\n");
- for(i = 0; i < NR_EVENT_CHANNELS; i++) {
+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
if (sync_test_bit(i, sh->evtchn_pending)) {
- printk(" %d: event %d -> irq %d\n",
+ int word_idx = i / BITS_PER_LONG;
+ printk(" %d: event %d -> irq %d%s%s%s\n",
cpu_from_evtchn(i), i,
- evtchn_to_irq[i]);
+ evtchn_to_irq[i],
+ sync_test_bit(word_idx, &v->evtchn_pending_sel)
+ ? "" : " l2-clear",
+ !sync_test_bit(i, sh->evtchn_mask)
+ ? "" : " globally-masked",
+ sync_test_bit(i, cpu_evtchn)
+ ? "" : " locally-masked");
}
}
@@ -663,6 +697,9 @@ static void __xen_evtchn_do_upcall(void)
int irq = evtchn_to_irq[port];
struct irq_desc *desc;
+ mask_evtchn(port);
+ clear_evtchn(port);
+
if (irq != -1) {
desc = irq_to_desc(irq);
if (desc)
@@ -800,10 +837,10 @@ static void ack_dynirq(unsigned int irq)
{
int evtchn = evtchn_from_irq(irq);
- move_native_irq(irq);
+ move_masked_irq(irq);
if (VALID_EVTCHN(evtchn))
- clear_evtchn(evtchn);
+ unmask_evtchn(evtchn);
}
static int retrigger_dynirq(unsigned int irq)
@@ -959,7 +996,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.mask = disable_dynirq,
.unmask = enable_dynirq,
- .ack = ack_dynirq,
+ .eoi = ack_dynirq,
.set_affinity = set_affinity_irq,
.retrigger = retrigger_dynirq,
};
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index d409495876f1..132939f36020 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -64,9 +64,11 @@
int xen_store_evtchn;
-EXPORT_SYMBOL(xen_store_evtchn);
+EXPORT_SYMBOL_GPL(xen_store_evtchn);
struct xenstore_domain_interface *xen_store_interface;
+EXPORT_SYMBOL_GPL(xen_store_interface);
+
static unsigned long xen_store_mfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
index 25275c3bbdff..4fde9440fe1f 100644
--- a/drivers/xen/xenfs/Makefile
+++ b/drivers/xen/xenfs/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_XENFS) += xenfs.o
-xenfs-objs = super.o xenbus.o \ No newline at end of file
+xenfs-y = super.o xenbus.o privcmd.o
+xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
new file mode 100644
index 000000000000..f80be7f6eb95
--- /dev/null
+++ b/drivers/xen/xenfs/privcmd.c
@@ -0,0 +1,404 @@
+/******************************************************************************
+ * privcmd.c
+ *
+ * Interface to privileged domain-0 commands.
+ *
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
+#include <xen/privcmd.h>
+#include <xen/interface/xen.h>
+#include <xen/features.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
+
+#ifndef HAVE_ARCH_PRIVCMD_MMAP
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
+#endif
+
+static long privcmd_ioctl_hypercall(void __user *udata)
+{
+ struct privcmd_hypercall hypercall;
+ long ret;
+
+ if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
+ return -EFAULT;
+
+ ret = privcmd_call(hypercall.op,
+ hypercall.arg[0], hypercall.arg[1],
+ hypercall.arg[2], hypercall.arg[3],
+ hypercall.arg[4]);
+
+ return ret;
+}
+
+static void free_page_list(struct list_head *pages)
+{
+ struct page *p, *n;
+
+ list_for_each_entry_safe(p, n, pages, lru)
+ __free_page(p);
+
+ INIT_LIST_HEAD(pages);
+}
+
+/*
+ * Given an array of items in userspace, return a list of pages
+ * containing the data. If copying fails, either because of memory
+ * allocation failure or a problem reading user memory, return an
+ * error code; its up to the caller to dispose of any partial list.
+ */
+static int gather_array(struct list_head *pagelist,
+ unsigned nelem, size_t size,
+ void __user *data)
+{
+ unsigned pageidx;
+ void *pagedata;
+ int ret;
+
+ if (size > PAGE_SIZE)
+ return 0;
+
+ pageidx = PAGE_SIZE;
+ pagedata = NULL; /* quiet, gcc */
+ while (nelem--) {
+ if (pageidx > PAGE_SIZE-size) {
+ struct page *page = alloc_page(GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (page == NULL)
+ goto fail;
+
+ pagedata = page_address(page);
+
+ list_add_tail(&page->lru, pagelist);
+ pageidx = 0;
+ }
+
+ ret = -EFAULT;
+ if (copy_from_user(pagedata + pageidx, data, size))
+ goto fail;
+
+ data += size;
+ pageidx += size;
+ }
+
+ ret = 0;
+
+fail:
+ return ret;
+}
+
+/*
+ * Call function "fn" on each element of the array fragmented
+ * over a list of pages.
+ */
+static int traverse_pages(unsigned nelem, size_t size,
+ struct list_head *pos,
+ int (*fn)(void *data, void *state),
+ void *state)
+{
+ void *pagedata;
+ unsigned pageidx;
+ int ret = 0;
+
+ BUG_ON(size > PAGE_SIZE);
+
+ pageidx = PAGE_SIZE;
+ pagedata = NULL; /* hush, gcc */
+
+ while (nelem--) {
+ if (pageidx > PAGE_SIZE-size) {
+ struct page *page;
+ pos = pos->next;
+ page = list_entry(pos, struct page, lru);
+ pagedata = page_address(page);
+ pageidx = 0;
+ }
+
+ ret = (*fn)(pagedata + pageidx, state);
+ if (ret)
+ break;
+ pageidx += size;
+ }
+
+ return ret;
+}
+
+struct mmap_mfn_state {
+ unsigned long va;
+ struct vm_area_struct *vma;
+ domid_t domain;
+};
+
+static int mmap_mfn_range(void *data, void *state)
+{
+ struct privcmd_mmap_entry *msg = data;
+ struct mmap_mfn_state *st = state;
+ struct vm_area_struct *vma = st->vma;
+ int rc;
+
+ /* Do not allow range to wrap the address space. */
+ if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
+ ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
+ return -EINVAL;
+
+ /* Range chunks must be contiguous in va space. */
+ if ((msg->va != st->va) ||
+ ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
+ return -EINVAL;
+
+ rc = xen_remap_domain_mfn_range(vma,
+ msg->va & PAGE_MASK,
+ msg->mfn, msg->npages,
+ vma->vm_page_prot,
+ st->domain);
+ if (rc < 0)
+ return rc;
+
+ st->va += msg->npages << PAGE_SHIFT;
+
+ return 0;
+}
+
+static long privcmd_ioctl_mmap(void __user *udata)
+{
+ struct privcmd_mmap mmapcmd;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int rc;
+ LIST_HEAD(pagelist);
+ struct mmap_mfn_state state;
+
+ if (!xen_initial_domain())
+ return -EPERM;
+
+ if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
+ return -EFAULT;
+
+ rc = gather_array(&pagelist,
+ mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+ mmapcmd.entry);
+
+ if (rc || list_empty(&pagelist))
+ goto out;
+
+ down_write(&mm->mmap_sem);
+
+ {
+ struct page *page = list_first_entry(&pagelist,
+ struct page, lru);
+ struct privcmd_mmap_entry *msg = page_address(page);
+
+ vma = find_vma(mm, msg->va);
+ rc = -EINVAL;
+
+ if (!vma || (msg->va != vma->vm_start) ||
+ !privcmd_enforce_singleshot_mapping(vma))
+ goto out_up;
+ }
+
+ state.va = vma->vm_start;
+ state.vma = vma;
+ state.domain = mmapcmd.dom;
+
+ rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+ &pagelist,
+ mmap_mfn_range, &state);
+
+
+out_up:
+ up_write(&mm->mmap_sem);
+
+out:
+ free_page_list(&pagelist);
+
+ return rc;
+}
+
+struct mmap_batch_state {
+ domid_t domain;
+ unsigned long va;
+ struct vm_area_struct *vma;
+ int err;
+
+ xen_pfn_t __user *user;
+};
+
+static int mmap_batch_fn(void *data, void *state)
+{
+ xen_pfn_t *mfnp = data;
+ struct mmap_batch_state *st = state;
+
+ if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
+ st->vma->vm_page_prot, st->domain) < 0) {
+ *mfnp |= 0xf0000000U;
+ st->err++;
+ }
+ st->va += PAGE_SIZE;
+
+ return 0;
+}
+
+static int mmap_return_errors(void *data, void *state)
+{
+ xen_pfn_t *mfnp = data;
+ struct mmap_batch_state *st = state;
+
+ put_user(*mfnp, st->user++);
+
+ return 0;
+}
+
+static struct vm_operations_struct privcmd_vm_ops;
+
+static long privcmd_ioctl_mmap_batch(void __user *udata)
+{
+ int ret;
+ struct privcmd_mmapbatch m;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long nr_pages;
+ LIST_HEAD(pagelist);
+ struct mmap_batch_state state;
+
+ if (!xen_initial_domain())
+ return -EPERM;
+
+ if (copy_from_user(&m, udata, sizeof(m)))
+ return -EFAULT;
+
+ nr_pages = m.num;
+ if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
+ return -EINVAL;
+
+ ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
+ m.arr);
+
+ if (ret || list_empty(&pagelist))
+ goto out;
+
+ down_write(&mm->mmap_sem);
+
+ vma = find_vma(mm, m.addr);
+ ret = -EINVAL;
+ if (!vma ||
+ vma->vm_ops != &privcmd_vm_ops ||
+ (m.addr != vma->vm_start) ||
+ ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
+ !privcmd_enforce_singleshot_mapping(vma)) {
+ up_write(&mm->mmap_sem);
+ goto out;
+ }
+
+ state.domain = m.dom;
+ state.vma = vma;
+ state.va = m.addr;
+ state.err = 0;
+
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_batch_fn, &state);
+
+ up_write(&mm->mmap_sem);
+
+ if (state.err > 0) {
+ ret = 0;
+
+ state.user = m.arr;
+ traverse_pages(m.num, sizeof(xen_pfn_t),
+ &pagelist,
+ mmap_return_errors, &state);
+ }
+
+out:
+ free_page_list(&pagelist);
+
+ return ret;
+}
+
+static long privcmd_ioctl(struct file *file,
+ unsigned int cmd, unsigned long data)
+{
+ int ret = -ENOSYS;
+ void __user *udata = (void __user *) data;
+
+ switch (cmd) {
+ case IOCTL_PRIVCMD_HYPERCALL:
+ ret = privcmd_ioctl_hypercall(udata);
+ break;
+
+ case IOCTL_PRIVCMD_MMAP:
+ ret = privcmd_ioctl_mmap(udata);
+ break;
+
+ case IOCTL_PRIVCMD_MMAPBATCH:
+ ret = privcmd_ioctl_mmap_batch(udata);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#ifndef HAVE_ARCH_PRIVCMD_MMAP
+static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+ vma, vma->vm_start, vma->vm_end,
+ vmf->pgoff, vmf->virtual_address);
+
+ return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct privcmd_vm_ops = {
+ .fault = privcmd_fault
+};
+
+static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* Unsupported for auto-translate guests. */
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -ENOSYS;
+
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+ vma->vm_ops = &privcmd_vm_ops;
+ vma->vm_private_data = NULL;
+
+ return 0;
+}
+
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
+{
+ return (xchg(&vma->vm_private_data, (void *)1) == NULL);
+}
+#endif
+
+const struct file_operations privcmd_file_ops = {
+ .unlocked_ioctl = privcmd_ioctl,
+ .mmap = privcmd_mmap,
+};
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index bd96340063c1..d6662b789b6b 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/magic.h>
+#include <linux/mm.h>
+#include <linux/backing-dev.h>
#include <xen/xen.h>
@@ -22,6 +24,62 @@
MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL");
+static int xenfs_set_page_dirty(struct page *page)
+{
+ return !TestSetPageDirty(page);
+}
+
+static const struct address_space_operations xenfs_aops = {
+ .set_page_dirty = xenfs_set_page_dirty,
+};
+
+static struct backing_dev_info xenfs_backing_dev_info = {
+ .ra_pages = 0, /* No readahead */
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+};
+
+static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret = new_inode(sb);
+
+ if (ret) {
+ ret->i_mode = mode;
+ ret->i_mapping->a_ops = &xenfs_aops;
+ ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info;
+ ret->i_uid = ret->i_gid = 0;
+ ret->i_blocks = 0;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+ }
+ return ret;
+}
+
+static struct dentry *xenfs_create_file(struct super_block *sb,
+ struct dentry *parent,
+ const char *name,
+ const struct file_operations *fops,
+ void *data,
+ int mode)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(parent, name);
+ if (!dentry)
+ return NULL;
+
+ inode = xenfs_make_inode(sb, S_IFREG | mode);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+
+ inode->i_fop = fops;
+ inode->i_private = data;
+
+ d_add(dentry, inode);
+ return dentry;
+}
+
static ssize_t capabilities_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
@@ -44,10 +102,23 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
[1] = {},
{ "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
+ { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
{""},
};
+ int rc;
- return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
+ rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
+ if (rc < 0)
+ return rc;
+
+ if (xen_initial_domain()) {
+ xenfs_create_file(sb, sb->s_root, "xsd_kva",
+ &xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
+ xenfs_create_file(sb, sb->s_root, "xsd_port",
+ &xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
+ }
+
+ return rc;
}
static int xenfs_get_sb(struct file_system_type *fs_type,
@@ -66,11 +137,25 @@ static struct file_system_type xenfs_type = {
static int __init xenfs_init(void)
{
- if (xen_domain())
- return register_filesystem(&xenfs_type);
+ int err;
+ if (!xen_domain()) {
+ printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n");
+ return 0;
+ }
+
+ err = register_filesystem(&xenfs_type);
+ if (err) {
+ printk(KERN_ERR "xenfs: Unable to register filesystem!\n");
+ goto out;
+ }
+
+ err = bdi_init(&xenfs_backing_dev_info);
+ if (err)
+ unregister_filesystem(&xenfs_type);
+
+ out:
- printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
- return 0;
+ return err;
}
static void __exit xenfs_exit(void)
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index 51f08b2d0bf1..b68aa6200003 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -2,5 +2,8 @@
#define _XENFS_XENBUS_H
extern const struct file_operations xenbus_file_ops;
+extern const struct file_operations privcmd_file_ops;
+extern const struct file_operations xsd_kva_file_ops;
+extern const struct file_operations xsd_port_file_ops;
#endif /* _XENFS_XENBUS_H */
diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
new file mode 100644
index 000000000000..fef20dbc6a5c
--- /dev/null
+++ b/drivers/xen/xenfs/xenstored.c
@@ -0,0 +1,68 @@
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+
+#include <xen/page.h>
+
+#include "xenfs.h"
+#include "../xenbus/xenbus_comms.h"
+
+static ssize_t xsd_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+{
+ const char *str = (const char *)file->private_data;
+ return simple_read_from_buffer(buf, size, off, str, strlen(str));
+}
+
+static int xsd_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static int xsd_kva_open(struct inode *inode, struct file *file)
+{
+ file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
+ xen_store_interface);
+ if (!file->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+
+ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+ return -EINVAL;
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_pfn(xen_store_interface),
+ size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+const struct file_operations xsd_kva_file_ops = {
+ .open = xsd_kva_open,
+ .mmap = xsd_kva_mmap,
+ .read = xsd_read,
+ .release = xsd_release,
+};
+
+static int xsd_port_open(struct inode *inode, struct file *file)
+{
+ file->private_data = (void *)kasprintf(GFP_KERNEL, "%d",
+ xen_store_evtchn);
+ if (!file->private_data)
+ return -ENOMEM;
+ return 0;
+}
+
+const struct file_operations xsd_port_file_ops = {
+ .open = xsd_port_open,
+ .read = xsd_read,
+ .release = xsd_release,
+};
diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c
index 5a03ba8c8364..ba0cf0b601bb 100644
--- a/firmware/ihex2fw.c
+++ b/firmware/ihex2fw.c
@@ -55,6 +55,7 @@ static int output_records(int outfd);
static int sort_records = 0;
static int wide_records = 0;
+static int include_jump = 0;
static int usage(void)
{
@@ -63,6 +64,7 @@ static int usage(void)
fprintf(stderr, "usage: ihex2fw [<options>] <src.HEX> <dst.fw>\n");
fprintf(stderr, " -w: wide records (16-bit length)\n");
fprintf(stderr, " -s: sort records by address\n");
+ fprintf(stderr, " -j: include records for CS:IP/EIP address\n");
return 1;
}
@@ -73,7 +75,7 @@ int main(int argc, char **argv)
uint8_t *data;
int opt;
- while ((opt = getopt(argc, argv, "ws")) != -1) {
+ while ((opt = getopt(argc, argv, "wsj")) != -1) {
switch (opt) {
case 'w':
wide_records = 1;
@@ -81,7 +83,9 @@ int main(int argc, char **argv)
case 's':
sort_records = 1;
break;
- default:
+ case 'j':
+ include_jump = 1;
+ break;
return usage();
}
}
@@ -128,6 +132,7 @@ static int process_ihex(uint8_t *data, ssize_t size)
{
struct ihex_binrec *record;
uint32_t offset = 0;
+ uint32_t data32;
uint8_t type, crc = 0, crcbyte = 0;
int i, j;
int line = 1;
@@ -223,8 +228,14 @@ next_record:
return -EINVAL;
}
+ memcpy(&data32, &record->data[0], sizeof(data32));
+ data32 = htonl(data32);
+ memcpy(&record->data[0], &data32, sizeof(data32));
+
/* These records contain the CS/IP or EIP where execution
- * starts. Don't really know what to do with them. */
+ * starts. If requested output this as a record. */
+ if (include_jump)
+ file_record(record);
goto next_record;
default:
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 795233702a4e..7e0511476797 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -17,3 +17,16 @@ config 9P_FSCACHE
Choose Y here to enable persistent, read-only local
caching support for 9p clients using FS-Cache
+
+config 9P_FS_POSIX_ACL
+ bool "9P POSIX Access Control Lists"
+ depends on 9P_FS
+ select FS_POSIX_ACL
+ help
+ POSIX Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index 91fba025fcbe..f8ba37effd1b 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_9P_FS) := 9p.o
xattr_user.o
9p-$(CONFIG_9P_FSCACHE) += cache.o
+9p-$(CONFIG_9P_FS_POSIX_ACL) += acl.o
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
new file mode 100644
index 000000000000..12d602351dbe
--- /dev/null
+++ b/fs/9p/acl.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/posix_acl_xattr.h>
+#include "xattr.h"
+#include "acl.h"
+#include "v9fs_vfs.h"
+#include "v9fs.h"
+
+static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
+{
+ ssize_t size;
+ void *value = NULL;
+ struct posix_acl *acl = NULL;;
+
+ size = v9fs_fid_xattr_get(fid, name, NULL, 0);
+ if (size > 0) {
+ value = kzalloc(size, GFP_NOFS);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ size = v9fs_fid_xattr_get(fid, name, value, size);
+ if (size > 0) {
+ acl = posix_acl_from_xattr(value, size);
+ if (IS_ERR(acl))
+ goto err_out;
+ }
+ } else if (size == -ENODATA || size == 0 ||
+ size == -ENOSYS || size == -EOPNOTSUPP) {
+ acl = NULL;
+ } else
+ acl = ERR_PTR(-EIO);
+
+err_out:
+ kfree(value);
+ return acl;
+}
+
+int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
+{
+ int retval = 0;
+ struct posix_acl *pacl, *dacl;
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_inode2v9ses(inode);
+ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
+ set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL);
+ set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
+ return 0;
+ }
+ /* get the default/access acl values and cache them */
+ dacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_DEFAULT);
+ pacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_ACCESS);
+
+ if (!IS_ERR(dacl) && !IS_ERR(pacl)) {
+ set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl);
+ set_cached_acl(inode, ACL_TYPE_ACCESS, pacl);
+ posix_acl_release(dacl);
+ posix_acl_release(pacl);
+ } else
+ retval = -EIO;
+
+ return retval;
+}
+
+static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type)
+{
+ struct posix_acl *acl;
+ /*
+ * 9p Always cache the acl value when
+ * instantiating the inode (v9fs_inode_from_fid)
+ */
+ acl = get_cached_acl(inode, type);
+ BUG_ON(acl == ACL_NOT_CACHED);
+ return acl;
+}
+
+int v9fs_check_acl(struct inode *inode, int mask)
+{
+ struct posix_acl *acl;
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_inode2v9ses(inode);
+ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
+ /*
+ * On access = client mode get the acl
+ * values from the server
+ */
+ return 0;
+ }
+ acl = v9fs_get_cached_acl(inode, ACL_TYPE_ACCESS);
+
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ int error = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return error;
+ }
+ return -EAGAIN;
+}
+
+static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
+{
+ int retval;
+ char *name;
+ size_t size;
+ void *buffer;
+ struct inode *inode = dentry->d_inode;
+
+ set_cached_acl(inode, type, acl);
+ /* Set a setxattr request to server */
+ size = posix_acl_xattr_size(acl->a_count);
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ retval = posix_acl_to_xattr(acl, buffer, size);
+ if (retval < 0)
+ goto err_free_out;
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name = POSIX_ACL_XATTR_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ name = POSIX_ACL_XATTR_DEFAULT;
+ break;
+ default:
+ BUG();
+ }
+ retval = v9fs_xattr_set(dentry, name, buffer, size, 0);
+err_free_out:
+ kfree(buffer);
+ return retval;
+}
+
+int v9fs_acl_chmod(struct dentry *dentry)
+{
+ int retval = 0;
+ struct posix_acl *acl, *clone;
+ struct inode *inode = dentry->d_inode;
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+ acl = v9fs_get_cached_acl(inode, ACL_TYPE_ACCESS);
+ if (acl) {
+ clone = posix_acl_clone(acl, GFP_KERNEL);
+ posix_acl_release(acl);
+ if (!clone)
+ return -ENOMEM;
+ retval = posix_acl_chmod_masq(clone, inode->i_mode);
+ if (!retval)
+ retval = v9fs_set_acl(dentry, ACL_TYPE_ACCESS, clone);
+ posix_acl_release(clone);
+ }
+ return retval;
+}
+
+int v9fs_set_create_acl(struct dentry *dentry,
+ struct posix_acl *dpacl, struct posix_acl *pacl)
+{
+ if (dpacl)
+ v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, dpacl);
+ if (pacl)
+ v9fs_set_acl(dentry, ACL_TYPE_ACCESS, pacl);
+ posix_acl_release(dpacl);
+ posix_acl_release(pacl);
+ return 0;
+}
+
+int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+ struct posix_acl **dpacl, struct posix_acl **pacl)
+{
+ int retval = 0;
+ mode_t mode = *modep;
+ struct posix_acl *acl = NULL;
+
+ if (!S_ISLNK(mode)) {
+ acl = v9fs_get_cached_acl(dir, ACL_TYPE_DEFAULT);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (!acl)
+ mode &= ~current_umask();
+ }
+ if (acl) {
+ struct posix_acl *clone;
+
+ if (S_ISDIR(mode))
+ *dpacl = acl;
+ clone = posix_acl_clone(acl, GFP_NOFS);
+ retval = -ENOMEM;
+ if (!clone)
+ goto cleanup;
+
+ retval = posix_acl_create_masq(clone, &mode);
+ if (retval < 0) {
+ posix_acl_release(clone);
+ goto cleanup;
+ }
+ if (retval > 0)
+ *pacl = clone;
+ }
+ *modep = mode;
+ return 0;
+cleanup:
+ posix_acl_release(acl);
+ return retval;
+
+}
+
+static int v9fs_remote_get_acl(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ char *full_name;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ full_name = POSIX_ACL_XATTR_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ full_name = POSIX_ACL_XATTR_DEFAULT;
+ break;
+ default:
+ BUG();
+ }
+ return v9fs_xattr_get(dentry, full_name, buffer, size);
+}
+
+static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ struct v9fs_session_info *v9ses;
+ struct posix_acl *acl;
+ int error;
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ /*
+ * We allow set/get/list of acl when access=client is not specified
+ */
+ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
+ return v9fs_remote_get_acl(dentry, name, buffer, size, type);
+
+ acl = v9fs_get_cached_acl(dentry->d_inode, type);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl == NULL)
+ return -ENODATA;
+ error = posix_acl_to_xattr(acl, buffer, size);
+ posix_acl_release(acl);
+
+ return error;
+}
+
+static int v9fs_remote_set_acl(struct dentry *dentry, const char *name,
+ const void *value, size_t size,
+ int flags, int type)
+{
+ char *full_name;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ full_name = POSIX_ACL_XATTR_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ full_name = POSIX_ACL_XATTR_DEFAULT;
+ break;
+ default:
+ BUG();
+ }
+ return v9fs_xattr_set(dentry, full_name, value, size, flags);
+}
+
+
+static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
+ const void *value, size_t size,
+ int flags, int type)
+{
+ int retval;
+ struct posix_acl *acl;
+ struct v9fs_session_info *v9ses;
+ struct inode *inode = dentry->d_inode;
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ /*
+ * set the attribute on the remote. Without even looking at the
+ * xattr value. We leave it to the server to validate
+ */
+ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
+ return v9fs_remote_set_acl(dentry, name,
+ value, size, flags, type);
+
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+ if (!is_owner_or_cap(inode))
+ return -EPERM;
+ if (value) {
+ /* update the cached acl value */
+ acl = posix_acl_from_xattr(value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ else if (acl) {
+ retval = posix_acl_valid(acl);
+ if (retval)
+ goto err_out;
+ }
+ } else
+ acl = NULL;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name = POSIX_ACL_XATTR_ACCESS;
+ if (acl) {
+ mode_t mode = inode->i_mode;
+ retval = posix_acl_equiv_mode(acl, &mode);
+ if (retval < 0)
+ goto err_out;
+ else {
+ struct iattr iattr;
+ if (retval == 0) {
+ /*
+ * ACL can be represented
+ * by the mode bits. So don't
+ * update ACL.
+ */
+ acl = NULL;
+ value = NULL;
+ size = 0;
+ }
+ /* Updte the mode bits */
+ iattr.ia_mode = ((mode & S_IALLUGO) |
+ (inode->i_mode & ~S_IALLUGO));
+ iattr.ia_valid = ATTR_MODE;
+ /* FIXME should we update ctime ?
+ * What is the following setxattr update the
+ * mode ?
+ */
+ v9fs_vfs_setattr_dotl(dentry, &iattr);
+ }
+ }
+ break;
+ case ACL_TYPE_DEFAULT:
+ name = POSIX_ACL_XATTR_DEFAULT;
+ if (!S_ISDIR(inode->i_mode)) {
+ retval = -EINVAL;
+ goto err_out;
+ }
+ break;
+ default:
+ BUG();
+ }
+ retval = v9fs_xattr_set(dentry, name, value, size, flags);
+ if (!retval)
+ set_cached_acl(inode, type, acl);
+err_out:
+ posix_acl_release(acl);
+ return retval;
+}
+
+const struct xattr_handler v9fs_xattr_acl_access_handler = {
+ .prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
+ .get = v9fs_xattr_get_acl,
+ .set = v9fs_xattr_set_acl,
+};
+
+const struct xattr_handler v9fs_xattr_acl_default_handler = {
+ .prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
+ .get = v9fs_xattr_get_acl,
+ .set = v9fs_xattr_set_acl,
+};
diff --git a/fs/9p/acl.h b/fs/9p/acl.h
new file mode 100644
index 000000000000..59e18c2e8c7e
--- /dev/null
+++ b/fs/9p/acl.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#ifndef FS_9P_ACL_H
+#define FS_9P_ACL_H
+
+#ifdef CONFIG_9P_FS_POSIX_ACL
+extern int v9fs_get_acl(struct inode *, struct p9_fid *);
+extern int v9fs_check_acl(struct inode *inode, int mask);
+extern int v9fs_acl_chmod(struct dentry *);
+extern int v9fs_set_create_acl(struct dentry *,
+ struct posix_acl *, struct posix_acl *);
+extern int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+ struct posix_acl **dpacl, struct posix_acl **pacl);
+#else
+#define v9fs_check_acl NULL
+static inline int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
+{
+ return 0;
+}
+static inline int v9fs_acl_chmod(struct dentry *dentry)
+{
+ return 0;
+}
+static inline int v9fs_set_create_acl(struct dentry *dentry,
+ struct posix_acl *dpacl,
+ struct posix_acl *pacl)
+{
+ return 0;
+}
+static inline int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+ struct posix_acl **dpacl,
+ struct posix_acl **pacl)
+{
+ return 0;
+}
+
+#endif
+#endif /* FS_9P_XATTR_H */
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 6406f896bf95..b00223c99d70 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -149,6 +149,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
switch (access) {
case V9FS_ACCESS_SINGLE:
case V9FS_ACCESS_USER:
+ case V9FS_ACCESS_CLIENT:
uid = current_fsuid();
any = 0;
break;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 38dc0e067599..2f77cd33ba83 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -193,7 +193,17 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
v9ses->flags |= V9FS_ACCESS_USER;
else if (strcmp(s, "any") == 0)
v9ses->flags |= V9FS_ACCESS_ANY;
- else {
+ else if (strcmp(s, "client") == 0) {
+#ifdef CONFIG_9P_FS_POSIX_ACL
+ v9ses->flags |= V9FS_ACCESS_CLIENT;
+#else
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "access=client option not supported\n");
+ kfree(s);
+ ret = -EINVAL;
+ goto free_and_return;
+#endif
+ } else {
v9ses->flags |= V9FS_ACCESS_SINGLE;
v9ses->uid = simple_strtoul(s, &e, 10);
if (*e != '\0')
@@ -278,6 +288,16 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ;
+ if (!v9fs_proto_dotl(v9ses) &&
+ ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) {
+ /*
+ * We support ACCESS_CLIENT only for dotl.
+ * Fall back to ACCESS_USER
+ */
+ v9ses->flags &= ~V9FS_ACCESS_MASK;
+ v9ses->flags |= V9FS_ACCESS_USER;
+ }
+ /*FIXME !! */
/* for legacy mode, fall back to V9FS_ACCESS_ANY */
if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) &&
((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) {
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 4c963c9fc41f..cb6396855e2d 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -33,13 +33,17 @@
*
* Session flags reflect options selected by users at mount time
*/
+#define V9FS_ACCESS_ANY (V9FS_ACCESS_SINGLE | \
+ V9FS_ACCESS_USER | \
+ V9FS_ACCESS_CLIENT)
+#define V9FS_ACCESS_MASK V9FS_ACCESS_ANY
+
enum p9_session_flags {
V9FS_PROTO_2000U = 0x01,
V9FS_PROTO_2000L = 0x02,
V9FS_ACCESS_SINGLE = 0x04,
V9FS_ACCESS_USER = 0x08,
- V9FS_ACCESS_ANY = 0x0C,
- V9FS_ACCESS_MASK = 0x0C,
+ V9FS_ACCESS_CLIENT = 0x10
};
/* possible values of ->cache */
@@ -113,8 +117,6 @@ void v9fs_session_close(struct v9fs_session_info *v9ses);
void v9fs_session_cancel(struct v9fs_session_info *v9ses);
void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
-#define V9FS_MAGIC 0x01021997
-
/* other default globals */
#define V9FS_PORT 564
#define V9FS_DEFUSER "nobody"
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 88418c419ea7..bab0eac873f4 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -64,3 +64,7 @@ int v9fs_uflags2omode(int uflags, int extended);
ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
void v9fs_blank_wstat(struct p9_wstat *wstat);
+int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *);
+int v9fs_file_fsync_dotl(struct file *filp, int datasync);
+
+#define P9_LOCK_TIMEOUT (30*HZ)
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 90e38449f4b3..b7f2a8e3863e 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -154,10 +154,40 @@ static int v9fs_launder_page(struct page *page)
return 0;
}
+/**
+ * v9fs_direct_IO - 9P address space operation for direct I/O
+ * @rw: direction (read or write)
+ * @iocb: target I/O control block
+ * @iov: array of vectors that define I/O buffer
+ * @pos: offset in file to begin the operation
+ * @nr_segs: size of iovec array
+ *
+ * The presence of v9fs_direct_IO() in the address space ops vector
+ * allowes open() O_DIRECT flags which would have failed otherwise.
+ *
+ * In the non-cached mode, we shunt off direct read and write requests before
+ * the VFS gets them, so this method should never be called.
+ *
+ * Direct IO is not 'yet' supported in the cached mode. Hence when
+ * this routine is called through generic_file_aio_read(), the read/write fails
+ * with an error.
+ *
+ */
+ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+ loff_t pos, unsigned long nr_segs)
+{
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) "
+ "off/no(%lld/%lu) EINVAL\n",
+ iocb->ki_filp->f_path.dentry->d_name.name,
+ (long long) pos, nr_segs);
+
+ return -EINVAL;
+}
const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage,
.readpages = v9fs_vfs_readpages,
.releasepage = v9fs_release_page,
.invalidatepage = v9fs_invalidate_page,
.launder_page = v9fs_launder_page,
+ .direct_IO = v9fs_direct_IO,
};
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 899f168fd19c..b84ebe8cefed 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -242,7 +242,8 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
while (rdir->head < rdir->tail) {
err = p9dirent_read(rdir->buf + rdir->head,
- buflen - rdir->head, &curdirent,
+ rdir->tail - rdir->head,
+ &curdirent,
fid->clnt->proto_version);
if (err < 0) {
P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
@@ -314,4 +315,5 @@ const struct file_operations v9fs_dir_operations_dotl = {
.readdir = v9fs_dir_readdir_dotl,
.open = v9fs_file_open,
.release = v9fs_dir_release,
+ .fsync = v9fs_file_fsync_dotl,
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index e97c92bd6f16..240c30674396 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -33,6 +33,7 @@
#include <linux/inet.h>
#include <linux/list.h>
#include <linux/pagemap.h>
+#include <linux/utsname.h>
#include <asm/uaccess.h>
#include <linux/idr.h>
#include <net/9p/9p.h>
@@ -44,6 +45,7 @@
#include "cache.h"
static const struct file_operations v9fs_cached_file_operations;
+static const struct file_operations v9fs_cached_file_operations_dotl;
/**
* v9fs_file_open - open a file (or directory)
@@ -92,6 +94,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
/* enable cached file options */
if(file->f_op == &v9fs_file_operations)
file->f_op = &v9fs_cached_file_operations;
+ else if (file->f_op == &v9fs_file_operations_dotl)
+ file->f_op = &v9fs_cached_file_operations_dotl;
#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_set_cookie(inode, file);
@@ -130,6 +134,206 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
return res;
}
+static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct p9_flock flock;
+ struct p9_fid *fid;
+ uint8_t status;
+ int res = 0;
+ unsigned char fl_type;
+
+ fid = filp->private_data;
+ BUG_ON(fid == NULL);
+
+ if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
+ BUG();
+
+ res = posix_lock_file_wait(filp, fl);
+ if (res < 0)
+ goto out;
+
+ /* convert posix lock to p9 tlock args */
+ memset(&flock, 0, sizeof(flock));
+ flock.type = fl->fl_type;
+ flock.start = fl->fl_start;
+ if (fl->fl_end == OFFSET_MAX)
+ flock.length = 0;
+ else
+ flock.length = fl->fl_end - fl->fl_start + 1;
+ flock.proc_id = fl->fl_pid;
+ flock.client_id = utsname()->nodename;
+ if (IS_SETLKW(cmd))
+ flock.flags = P9_LOCK_FLAGS_BLOCK;
+
+ /*
+ * if its a blocked request and we get P9_LOCK_BLOCKED as the status
+ * for lock request, keep on trying
+ */
+ for (;;) {
+ res = p9_client_lock_dotl(fid, &flock, &status);
+ if (res < 0)
+ break;
+
+ if (status != P9_LOCK_BLOCKED)
+ break;
+ if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
+ break;
+ schedule_timeout_interruptible(P9_LOCK_TIMEOUT);
+ }
+
+ /* map 9p status to VFS status */
+ switch (status) {
+ case P9_LOCK_SUCCESS:
+ res = 0;
+ break;
+ case P9_LOCK_BLOCKED:
+ res = -EAGAIN;
+ break;
+ case P9_LOCK_ERROR:
+ case P9_LOCK_GRACE:
+ res = -ENOLCK;
+ break;
+ default:
+ BUG();
+ }
+
+ /*
+ * incase server returned error for lock request, revert
+ * it locally
+ */
+ if (res < 0 && fl->fl_type != F_UNLCK) {
+ fl_type = fl->fl_type;
+ fl->fl_type = F_UNLCK;
+ res = posix_lock_file_wait(filp, fl);
+ fl->fl_type = fl_type;
+ }
+out:
+ return res;
+}
+
+static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
+{
+ struct p9_getlock glock;
+ struct p9_fid *fid;
+ int res = 0;
+
+ fid = filp->private_data;
+ BUG_ON(fid == NULL);
+
+ posix_test_lock(filp, fl);
+ /*
+ * if we have a conflicting lock locally, no need to validate
+ * with server
+ */
+ if (fl->fl_type != F_UNLCK)
+ return res;
+
+ /* convert posix lock to p9 tgetlock args */
+ memset(&glock, 0, sizeof(glock));
+ glock.type = fl->fl_type;
+ glock.start = fl->fl_start;
+ if (fl->fl_end == OFFSET_MAX)
+ glock.length = 0;
+ else
+ glock.length = fl->fl_end - fl->fl_start + 1;
+ glock.proc_id = fl->fl_pid;
+ glock.client_id = utsname()->nodename;
+
+ res = p9_client_getlock_dotl(fid, &glock);
+ if (res < 0)
+ return res;
+ if (glock.type != F_UNLCK) {
+ fl->fl_type = glock.type;
+ fl->fl_start = glock.start;
+ if (glock.length == 0)
+ fl->fl_end = OFFSET_MAX;
+ else
+ fl->fl_end = glock.start + glock.length - 1;
+ fl->fl_pid = glock.proc_id;
+ } else
+ fl->fl_type = F_UNLCK;
+
+ return res;
+}
+
+/**
+ * v9fs_file_lock_dotl - lock a file (or directory)
+ * @filp: file to be locked
+ * @cmd: lock command
+ * @fl: file lock structure
+ *
+ */
+
+static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int ret = -ENOLCK;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
+ cmd, fl, filp->f_path.dentry->d_name.name);
+
+ /* No mandatory locks */
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
+ goto out_err;
+
+ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
+ filemap_write_and_wait(inode->i_mapping);
+ invalidate_mapping_pages(&inode->i_data, 0, -1);
+ }
+
+ if (IS_SETLK(cmd) || IS_SETLKW(cmd))
+ ret = v9fs_file_do_lock(filp, cmd, fl);
+ else if (IS_GETLK(cmd))
+ ret = v9fs_file_getlock(filp, fl);
+ else
+ ret = -EINVAL;
+out_err:
+ return ret;
+}
+
+/**
+ * v9fs_file_flock_dotl - lock a file
+ * @filp: file to be locked
+ * @cmd: lock command
+ * @fl: file lock structure
+ *
+ */
+
+static int v9fs_file_flock_dotl(struct file *filp, int cmd,
+ struct file_lock *fl)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int ret = -ENOLCK;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
+ cmd, fl, filp->f_path.dentry->d_name.name);
+
+ /* No mandatory locks */
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
+ goto out_err;
+
+ if (!(fl->fl_flags & FL_FLOCK))
+ goto out_err;
+
+ if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
+ filemap_write_and_wait(inode->i_mapping);
+ invalidate_mapping_pages(&inode->i_data, 0, -1);
+ }
+ /* Convert flock to posix lock */
+ fl->fl_owner = (fl_owner_t)filp;
+ fl->fl_start = 0;
+ fl->fl_end = OFFSET_MAX;
+ fl->fl_flags |= FL_POSIX;
+ fl->fl_flags ^= FL_FLOCK;
+
+ if (IS_SETLK(cmd) | IS_SETLKW(cmd))
+ ret = v9fs_file_do_lock(filp, cmd, fl);
+ else
+ ret = -EINVAL;
+out_err:
+ return ret;
+}
+
/**
* v9fs_file_readn - read from a file
* @filp: file pointer to read
@@ -219,7 +423,9 @@ static ssize_t
v9fs_file_write(struct file *filp, const char __user * data,
size_t count, loff_t * offset)
{
- int n, rsize, total = 0;
+ ssize_t retval;
+ size_t total = 0;
+ int n;
struct p9_fid *fid;
struct p9_client *clnt;
struct inode *inode = filp->f_path.dentry->d_inode;
@@ -232,14 +438,19 @@ v9fs_file_write(struct file *filp, const char __user * data,
fid = filp->private_data;
clnt = fid->clnt;
- rsize = fid->iounit ? fid->iounit : clnt->msize - P9_IOHDRSZ;
+ retval = generic_write_checks(filp, &origin, &count, 0);
+ if (retval)
+ goto out;
- do {
- if (count < rsize)
- rsize = count;
+ retval = -EINVAL;
+ if ((ssize_t) count < 0)
+ goto out;
+ retval = 0;
+ if (!count)
+ goto out;
- n = p9_client_write(fid, NULL, data+total, origin+total,
- rsize);
+ do {
+ n = p9_client_write(fid, NULL, data+total, origin+total, count);
if (n <= 0)
break;
count -= n;
@@ -258,9 +469,11 @@ v9fs_file_write(struct file *filp, const char __user * data,
}
if (n < 0)
- return n;
-
- return total;
+ retval = n;
+ else
+ retval = total;
+out:
+ return retval;
}
static int v9fs_file_fsync(struct file *filp, int datasync)
@@ -278,6 +491,20 @@ static int v9fs_file_fsync(struct file *filp, int datasync)
return retval;
}
+int v9fs_file_fsync_dotl(struct file *filp, int datasync)
+{
+ struct p9_fid *fid;
+ int retval;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_file_fsync_dotl: filp %p datasync %x\n",
+ filp, datasync);
+
+ fid = filp->private_data;
+
+ retval = p9_client_fsync(fid, datasync);
+ return retval;
+}
+
static const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
@@ -290,6 +517,19 @@ static const struct file_operations v9fs_cached_file_operations = {
.fsync = v9fs_file_fsync,
};
+static const struct file_operations v9fs_cached_file_operations_dotl = {
+ .llseek = generic_file_llseek,
+ .read = do_sync_read,
+ .aio_read = generic_file_aio_read,
+ .write = v9fs_file_write,
+ .open = v9fs_file_open,
+ .release = v9fs_dir_release,
+ .lock = v9fs_file_lock_dotl,
+ .flock = v9fs_file_flock_dotl,
+ .mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync_dotl,
+};
+
const struct file_operations v9fs_file_operations = {
.llseek = generic_file_llseek,
.read = v9fs_file_read,
@@ -307,7 +547,8 @@ const struct file_operations v9fs_file_operations_dotl = {
.write = v9fs_file_write,
.open = v9fs_file_open,
.release = v9fs_dir_release,
- .lock = v9fs_file_lock,
+ .lock = v9fs_file_lock_dotl,
+ .flock = v9fs_file_flock_dotl,
.mmap = generic_file_readonly_mmap,
- .fsync = v9fs_file_fsync,
+ .fsync = v9fs_file_fsync_dotl,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9e670d527646..34bf71b56542 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/posix_acl.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -44,6 +45,7 @@
#include "fid.h"
#include "cache.h"
#include "xattr.h"
+#include "acl.h"
static const struct inode_operations v9fs_dir_inode_operations;
static const struct inode_operations v9fs_dir_inode_operations_dotu;
@@ -53,6 +55,10 @@ static const struct inode_operations v9fs_file_inode_operations_dotl;
static const struct inode_operations v9fs_symlink_inode_operations;
static const struct inode_operations v9fs_symlink_inode_operations_dotl;
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+ dev_t rdev);
+
/**
* unixmode2p9mode - convert unix mode bits to plan 9
* @v9ses: v9fs session information
@@ -500,6 +506,11 @@ v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
v9fs_vcookie_set_qid(ret, &st->qid);
v9fs_cache_inode_get_cookie(ret);
#endif
+ err = v9fs_get_acl(ret, fid);
+ if (err) {
+ iput(ret);
+ goto error;
+ }
kfree(st);
return ret;
error:
@@ -553,13 +564,6 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
return retval;
}
-static int
-v9fs_open_created(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-
/**
* v9fs_create - Create a file
* @v9ses: session information
@@ -655,29 +659,37 @@ error:
*/
static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
struct nameidata *nd)
{
int err = 0;
char *name = NULL;
gid_t gid;
int flags;
+ mode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL;
struct p9_fid *dfid, *ofid;
struct file *filp;
struct p9_qid qid;
struct inode *inode;
+ struct posix_acl *pacl = NULL, *dacl = NULL;
v9ses = v9fs_inode2v9ses(dir);
if (nd && nd->flags & LOOKUP_OPEN)
flags = nd->intent.open.flags - 1;
- else
- flags = O_RDWR;
+ else {
+ /*
+ * create call without LOOKUP_OPEN is due
+ * to mknod of regular files. So use mknod
+ * operation.
+ */
+ return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+ }
name = (char *) dentry->d_name.name;
P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
- "mode:0x%x\n", name, flags, mode);
+ "mode:0x%x\n", name, flags, omode);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
@@ -695,6 +707,15 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
}
gid = v9fs_get_fsgid_for_create(dir);
+
+ mode = omode;
+ /* Update mode based on ACL value */
+ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "Failed to get acl values in creat %d\n", err);
+ goto error;
+ }
err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
if (err < 0) {
P9_DPRINTK(P9_DEBUG_VFS,
@@ -702,46 +723,52 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
err);
goto error;
}
+ /* instantiate inode and assign the unopened fid to the dentry */
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE ||
+ (nd && nd->flags & LOOKUP_OPEN)) {
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
- /* No need to populate the inode if we are not opening the file AND
- * not in cached mode.
- */
- if (!v9ses->cache && !(nd && nd->flags & LOOKUP_OPEN)) {
- /* Not in cached mode. No need to populate inode with stat */
- dentry->d_op = &v9fs_dentry_operations;
- p9_client_clunk(ofid);
- d_instantiate(dentry, NULL);
- return 0;
- }
-
- /* Now walk from the parent so we can get an unopened fid. */
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- fid = NULL;
- goto error;
- }
-
- /* instantiate inode and assign the unopened fid to dentry */
- inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
- goto error;
- }
- if (v9ses->cache)
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
+ }
dentry->d_op = &v9fs_cached_dentry_operations;
- else
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ /* The fid would get clunked via a dput */
+ fid = NULL;
+ } else {
+ /*
+ * Not in cached mode. No need to populate
+ * inode with stat. We need to get an inode
+ * so that we can set the acl with dentry
+ */
+ inode = v9fs_get_inode(dir->i_sb, mode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
dentry->d_op = &v9fs_dentry_operations;
- d_instantiate(dentry, inode);
- err = v9fs_fid_add(dentry, fid);
- if (err < 0)
- goto error;
+ d_instantiate(dentry, inode);
+ }
+ /* Now set the ACL based on the default value */
+ v9fs_set_create_acl(dentry, dacl, pacl);
/* if we are opening a file, assign the open fid to the file */
if (nd && nd->flags & LOOKUP_OPEN) {
- filp = lookup_instantiate_filp(nd, dentry, v9fs_open_created);
+ filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
p9_client_clunk(ofid);
return PTR_ERR(filp);
@@ -800,7 +827,7 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
/* if we are opening a file, assign the open fid to the file */
if (nd && nd->flags & LOOKUP_OPEN) {
- filp = lookup_instantiate_filp(nd, dentry, v9fs_open_created);
+ filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
goto error;
@@ -859,23 +886,28 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
*
*/
-static int v9fs_vfs_mkdir_dotl(struct inode *dir, struct dentry *dentry,
- int mode)
+static int v9fs_vfs_mkdir_dotl(struct inode *dir,
+ struct dentry *dentry, int omode)
{
int err;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
gid_t gid;
char *name;
+ mode_t mode;
struct inode *inode;
struct p9_qid qid;
struct dentry *dir_dentry;
+ struct posix_acl *dacl = NULL, *pacl = NULL;
P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
v9ses = v9fs_inode2v9ses(dir);
- mode |= S_IFDIR;
+ omode |= S_IFDIR;
+ if (dir->i_mode & S_ISGID)
+ omode |= S_ISGID;
+
dir_dentry = v9fs_dentry_from_dir_inode(dir);
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid)) {
@@ -886,11 +918,14 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, struct dentry *dentry,
}
gid = v9fs_get_fsgid_for_create(dir);
- if (gid < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n");
+ mode = omode;
+ /* Update mode based on ACL value */
+ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "Failed to get acl values in mkdir %d\n", err);
goto error;
}
-
name = (char *) dentry->d_name.name;
err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
if (err < 0)
@@ -920,7 +955,23 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, struct dentry *dentry,
if (err < 0)
goto error;
fid = NULL;
+ } else {
+ /*
+ * Not in cached mode. No need to populate
+ * inode with stat. We need to get an inode
+ * so that we can set the acl with dentry
+ */
+ inode = v9fs_get_inode(dir->i_sb, mode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+ dentry->d_op = &v9fs_dentry_operations;
+ d_instantiate(dentry, inode);
}
+ /* Now set the ACL based on the default value */
+ v9fs_set_create_acl(dentry, dacl, pacl);
+
error:
if (fid)
p9_client_clunk(fid);
@@ -979,7 +1030,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
result = v9fs_fid_add(dentry, fid);
if (result < 0)
- goto error;
+ goto error_iput;
inst_out:
if (v9ses->cache)
@@ -990,6 +1041,8 @@ inst_out:
d_add(dentry, inode);
return NULL;
+error_iput:
+ iput(inode);
error:
p9_client_clunk(fid);
@@ -1237,7 +1290,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
*
*/
-static int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
{
int retval;
struct v9fs_session_info *v9ses;
@@ -1279,6 +1332,12 @@ static int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
+ if (iattr->ia_valid & ATTR_MODE) {
+ /* We also want to update ACL when we update mode bits */
+ retval = v9fs_acl_chmod(dentry);
+ if (retval < 0)
+ return retval;
+ }
return 0;
}
@@ -1473,7 +1532,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
if (IS_ERR(fid))
return PTR_ERR(fid);
- if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))
+ if (!v9fs_proto_dotu(v9ses))
return -EBADF;
st = p9_client_stat(fid);
@@ -1616,11 +1675,6 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
gid = v9fs_get_fsgid_for_create(dir);
- if (gid < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_egid failed %d\n", gid);
- goto error;
- }
-
/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
@@ -1789,9 +1843,10 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
kfree(st);
} else {
/* Caching disabled. No need to get upto date stat info.
- * This dentry will be released immediately. So, just i_count++
+ * This dentry will be released immediately. So, just hold the
+ * inode
*/
- atomic_inc(&old_dentry->d_inode->i_count);
+ ihold(old_dentry->d_inode);
}
dentry->d_op = old_dentry->d_op;
@@ -1854,21 +1909,23 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
*
*/
static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode,
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
dev_t rdev)
{
int err;
char *name;
+ mode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
gid_t gid;
struct p9_qid qid;
struct dentry *dir_dentry;
+ struct posix_acl *dacl = NULL, *pacl = NULL;
P9_DPRINTK(P9_DEBUG_VFS,
" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
- dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev));
+ dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
return -EINVAL;
@@ -1884,11 +1941,14 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode,
}
gid = v9fs_get_fsgid_for_create(dir);
- if (gid < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n");
+ mode = omode;
+ /* Update mode based on ACL value */
+ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "Failed to get acl values in mknod %d\n", err);
goto error;
}
-
name = (char *) dentry->d_name.name;
err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
@@ -1932,13 +1992,68 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode,
dentry->d_op = &v9fs_dentry_operations;
d_instantiate(dentry, inode);
}
-
+ /* Now set the ACL based on the default value */
+ v9fs_set_create_acl(dentry, dacl, pacl);
error:
if (fid)
p9_client_clunk(fid);
return err;
}
+static int
+v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen)
+{
+ int retval;
+ struct p9_fid *fid;
+ char *target = NULL;
+
+ P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
+ retval = -EPERM;
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ retval = p9_client_readlink(fid, &target);
+ if (retval < 0)
+ return retval;
+
+ strncpy(buffer, target, buflen);
+ P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer);
+
+ retval = strnlen(buffer, buflen);
+ return retval;
+}
+
+/**
+ * v9fs_vfs_follow_link_dotl - follow a symlink path
+ * @dentry: dentry for symlink
+ * @nd: nameidata
+ *
+ */
+
+static void *
+v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
+{
+ int len = 0;
+ char *link = __getname();
+
+ P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
+
+ if (!link)
+ link = ERR_PTR(-ENOMEM);
+ else {
+ len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX);
+ if (len < 0) {
+ __putname(link);
+ link = ERR_PTR(len);
+ } else
+ link[min(len, PATH_MAX-1)] = 0;
+ }
+ nd_set_link(nd, link);
+
+ return NULL;
+}
+
static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
@@ -1969,7 +2084,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotl = {
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
-
+ .check_acl = v9fs_check_acl,
};
static const struct inode_operations v9fs_dir_inode_operations = {
@@ -1996,6 +2111,7 @@ static const struct inode_operations v9fs_file_inode_operations_dotl = {
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
+ .check_acl = v9fs_check_acl,
};
static const struct inode_operations v9fs_symlink_inode_operations = {
@@ -2007,8 +2123,8 @@ static const struct inode_operations v9fs_symlink_inode_operations = {
};
static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
- .readlink = generic_readlink,
- .follow_link = v9fs_vfs_follow_link,
+ .readlink = v9fs_vfs_readlink_dotl,
+ .follow_link = v9fs_vfs_follow_link_dotl,
.put_link = v9fs_vfs_put_link,
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 1d12ba0ed3db..48d4215c60a8 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -39,6 +39,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/statfs.h>
+#include <linux/magic.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -46,6 +47,7 @@
#include "v9fs_vfs.h"
#include "fid.h"
#include "xattr.h"
+#include "acl.h"
static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl;
@@ -88,6 +90,11 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
MS_NOATIME;
+#ifdef CONFIG_9P_FS_POSIX_ACL
+ if ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)
+ sb->s_flags |= MS_POSIXACL;
+#endif
+
save_mount_options(sb, data);
}
@@ -149,7 +156,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
goto release_sb;
}
sb->s_root = root;
-
if (v9fs_proto_dotl(v9ses)) {
struct p9_stat_dotl *st = NULL;
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
@@ -174,7 +180,9 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
p9stat_free(st);
kfree(st);
}
-
+ retval = v9fs_get_acl(inode, fid);
+ if (retval)
+ goto release_sb;
v9fs_fid_add(root, fid);
P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
@@ -249,7 +257,7 @@ static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
if (v9fs_proto_dotl(v9ses)) {
res = p9_client_statfs(fid, &rs);
if (res == 0) {
- buf->f_type = rs.type;
+ buf->f_type = V9FS_MAGIC;
buf->f_bsize = rs.bsize;
buf->f_blocks = rs.blocks;
buf->f_bfree = rs.bfree;
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index f88e5c2dc873..43ec7df84336 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -21,30 +21,13 @@
#include "fid.h"
#include "xattr.h"
-/*
- * v9fs_xattr_get()
- *
- * Copy an extended attribute into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
- * Returns a negative error number on failure, or the number of bytes
- * used / required on success.
- */
-ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
- void *buffer, size_t buffer_size)
+ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
+ void *buffer, size_t buffer_size)
{
ssize_t retval;
int msize, read_count;
u64 offset = 0, attr_size;
- struct p9_fid *fid, *attr_fid;
-
- P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu\n",
- __func__, name, buffer_size);
-
- fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
+ struct p9_fid *attr_fid;
attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
if (IS_ERR(attr_fid)) {
@@ -88,6 +71,31 @@ error:
}
+
+/*
+ * v9fs_xattr_get()
+ *
+ * Copy an extended attribute into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t buffer_size)
+{
+ struct p9_fid *fid;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu\n",
+ __func__, name, buffer_size);
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ return v9fs_fid_xattr_get(fid, name, buffer, buffer_size);
+}
+
/*
* v9fs_xattr_set()
*
@@ -156,5 +164,9 @@ ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
const struct xattr_handler *v9fs_xattr_handlers[] = {
&v9fs_xattr_user_handler,
+#ifdef CONFIG_9P_FS_POSIX_ACL
+ &v9fs_xattr_acl_access_handler,
+ &v9fs_xattr_acl_default_handler,
+#endif
NULL
};
diff --git a/fs/9p/xattr.h b/fs/9p/xattr.h
index 9ddf672ae5c4..eaa837c53bd5 100644
--- a/fs/9p/xattr.h
+++ b/fs/9p/xattr.h
@@ -15,10 +15,16 @@
#define FS_9P_XATTR_H
#include <linux/xattr.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
extern const struct xattr_handler *v9fs_xattr_handlers[];
extern struct xattr_handler v9fs_xattr_user_handler;
+extern const struct xattr_handler v9fs_xattr_acl_access_handler;
+extern const struct xattr_handler v9fs_xattr_acl_default_handler;
+extern ssize_t v9fs_fid_xattr_get(struct p9_fid *, const char *,
+ void *, size_t);
extern ssize_t v9fs_xattr_get(struct dentry *, const char *,
void *, size_t);
extern int v9fs_xattr_set(struct dentry *, const char *,
diff --git a/fs/Kconfig b/fs/Kconfig
index 65781de44fc0..97673c955484 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -47,10 +47,12 @@ source "fs/nilfs2/Kconfig"
endif # BLOCK
+config EXPORTFS
+ tristate
+
config FILE_LOCKING
bool "Enable POSIX file locking API" if EMBEDDED
default y
- select BKL # while lockd still uses it.
help
This option enables standard file locking support, required
for filesystems like NFS and for the flock() system
@@ -222,9 +224,6 @@ config LOCKD_V4
depends on FILE_LOCKING
default y
-config EXPORTFS
- tristate
-
config NFS_ACL_SUPPORT
tristate
select FS_POSIX_ACL
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index bb4cc5b8abc8..79e2ca7973b7 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -42,7 +42,7 @@ config BINFMT_ELF_FDPIC
config CORE_DUMP_DEFAULT_ELF_HEADERS
bool "Write ELF core dumps with partial segments"
- default n
+ default y
depends on BINFMT_ELF && ELF_CORE
help
ELF core dump files describe each memory mapping of the crashed
@@ -60,7 +60,7 @@ config CORE_DUMP_DEFAULT_ELF_HEADERS
inherited. See Documentation/filesystems/proc.txt for details.
This config option changes the default setting of coredump_filter
- seen at boot time. If unsure, say N.
+ seen at boot time. If unsure, say Y.
config BINFMT_FLAT
bool "Kernel support for flat binaries"
diff --git a/fs/Makefile b/fs/Makefile
index e6ec1d309b1d..26956fcec917 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -29,10 +29,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
-
-nfsd-$(CONFIG_NFSD) := nfsctl.o
-obj-y += $(nfsd-y) $(nfsd-m)
-
+obj-$(CONFIG_NFSD_DEPRECATED) += nfsctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
diff --git a/fs/affs/file.c b/fs/affs/file.c
index c4a9875bd1a6..0a90dcd46de2 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -894,9 +894,9 @@ affs_truncate(struct inode *inode)
if (AFFS_SB(sb)->s_flags & SF_OFS) {
struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
u32 tmp;
- if (IS_ERR(ext_bh)) {
+ if (IS_ERR(bh)) {
affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
- ext, PTR_ERR(ext_bh));
+ ext, PTR_ERR(bh));
return;
}
tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 3a0fdec175ba..5d828903ac69 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -388,7 +388,7 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3
affs_adjust_checksum(inode_bh, block - be32_to_cpu(chain));
mark_buffer_dirty_inode(inode_bh, inode);
inode->i_nlink = 2;
- atomic_inc(&inode->i_count);
+ ihold(inode);
}
affs_fix_checksum(sb, bh);
mark_buffer_dirty_inode(bh, inode);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 0d38c09bd55e..5439e1bc9a86 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1045,7 +1045,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
if (ret < 0)
goto link_error;
- atomic_inc(&vnode->vfs_inode.i_count);
+ ihold(&vnode->vfs_inode);
d_instantiate(dentry, &vnode->vfs_inode);
key_put(key);
_leave(" = 0");
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 722743b152d8..15690bb1d3b5 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -438,7 +438,6 @@ no_more:
*/
int afs_writepage(struct page *page, struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = page->mapping->backing_dev_info;
struct afs_writeback *wb;
int ret;
@@ -455,8 +454,6 @@ int afs_writepage(struct page *page, struct writeback_control *wbc)
}
wbc->nr_to_write -= ret;
- if (wbc->nonblocking && bdi_write_congested(bdi))
- wbc->encountered_congestion = 1;
_leave(" = 0");
return 0;
@@ -469,7 +466,6 @@ static int afs_writepages_region(struct address_space *mapping,
struct writeback_control *wbc,
pgoff_t index, pgoff_t end, pgoff_t *_next)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
struct afs_writeback *wb;
struct page *page;
int ret, n;
@@ -529,11 +525,6 @@ static int afs_writepages_region(struct address_space *mapping,
wbc->nr_to_write -= ret;
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- break;
- }
-
cond_resched();
} while (index < end && wbc->nr_to_write > 0);
@@ -548,24 +539,16 @@ static int afs_writepages_region(struct address_space *mapping,
int afs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
pgoff_t start, end, next;
int ret;
_enter("");
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- _leave(" = 0 [congest]");
- return 0;
- }
-
if (wbc->range_cyclic) {
start = mapping->writeback_index;
end = -1;
ret = afs_writepages_region(mapping, wbc, start, end, &next);
- if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&
- !(wbc->nonblocking && wbc->encountered_congestion))
+ if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
ret = afs_writepages_region(mapping, wbc, 0, start,
&next);
mapping->writeback_index = next;
diff --git a/fs/aio.c b/fs/aio.c
index 250b0a73c8a8..8c8f6c5b6d79 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1543,7 +1543,19 @@ static void aio_batch_add(struct address_space *mapping,
}
abe = mempool_alloc(abe_pool, GFP_KERNEL);
- BUG_ON(!igrab(mapping->host));
+
+ /*
+ * we should be using igrab here, but
+ * we don't want to hammer on the global
+ * inode spinlock just to take an extra
+ * reference on a file that we must already
+ * have a reference to.
+ *
+ * When we're called, we always have a reference
+ * on the file, so we must always have a reference
+ * on the inode, so ihold() is safe here.
+ */
+ ihold(mapping->host);
abe->mapping = mapping;
hlist_add_head(&abe->list, &batch_hash[bucket]);
return;
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index e4b75d6eda83..5365527ca43f 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -111,10 +111,9 @@ struct file *anon_inode_getfile(const char *name,
path.mnt = mntget(anon_inode_mnt);
/*
* We know the anon_inode inode count is always greater than zero,
- * so we can avoid doing an igrab() and we can use an open-coded
- * atomic_inc().
+ * so ihold() is safe.
*/
- atomic_inc(&anon_inode_inode->i_count);
+ ihold(anon_inode_inode);
path.dentry->d_op = &anon_inodefs_dentry_operations;
d_instantiate(path.dentry, anon_inode_inode);
@@ -194,6 +193,7 @@ static struct inode *anon_inode_mkinode(void)
if (!inode)
return ERR_PTR(-ENOMEM);
+ inode->i_ino = get_next_ino();
inode->i_fop = &anon_inode_fops;
inode->i_mapping->a_ops = &anon_aops;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 821b2b955dac..ac87e49fa706 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -398,6 +398,7 @@ struct inode *autofs4_get_inode(struct super_block *sb,
inode->i_gid = sb->s_root->d_inode->i_gid;
}
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_ino = get_next_ino();
if (S_ISDIR(inf->mode)) {
inode->i_nlink = 2;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index d967e052b779..685ecff3ab31 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -176,7 +176,7 @@ static int bfs_link(struct dentry *old, struct inode *dir,
inc_nlink(inode);
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
d_instantiate(new, inode);
mutex_unlock(&info->bfs_lock);
return 0;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 139fc8083f53..29990f0eee0c 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -495,6 +495,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
struct inode * inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime =
current_fs_time(inode->i_sb);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b737451e2e9d..dea3b628a6ce 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -48,6 +48,21 @@ inline struct block_device *I_BDEV(struct inode *inode)
EXPORT_SYMBOL(I_BDEV);
+/*
+ * move the inode from it's current bdi to the a new bdi. if the inode is dirty
+ * we need to move it onto the dirty list of @dst so that the inode is always
+ * on the right list.
+ */
+static void bdev_inode_switch_bdi(struct inode *inode,
+ struct backing_dev_info *dst)
+{
+ spin_lock(&inode_lock);
+ inode->i_data.backing_dev_info = dst;
+ if (inode->i_state & I_DIRTY)
+ list_move(&inode->i_wb_list, &dst->wb.b_dirty);
+ spin_unlock(&inode_lock);
+}
+
static sector_t max_block(struct block_device *bdev)
{
sector_t retval = ~((sector_t)0);
@@ -550,7 +565,7 @@ EXPORT_SYMBOL(bdget);
*/
struct block_device *bdgrab(struct block_device *bdev)
{
- atomic_inc(&bdev->bd_inode->i_count);
+ ihold(bdev->bd_inode);
return bdev;
}
@@ -580,7 +595,7 @@ static struct block_device *bd_acquire(struct inode *inode)
spin_lock(&bdev_lock);
bdev = inode->i_bdev;
if (bdev) {
- atomic_inc(&bdev->bd_inode->i_count);
+ ihold(bdev->bd_inode);
spin_unlock(&bdev_lock);
return bdev;
}
@@ -591,12 +606,12 @@ static struct block_device *bd_acquire(struct inode *inode)
spin_lock(&bdev_lock);
if (!inode->i_bdev) {
/*
- * We take an additional bd_inode->i_count for inode,
+ * We take an additional reference to bd_inode,
* and it's released in clear_inode() of inode.
* So, we can access it via ->i_mapping always
* without igrab().
*/
- atomic_inc(&bdev->bd_inode->i_count);
+ ihold(bdev->bd_inode);
inode->i_bdev = bdev;
inode->i_mapping = bdev->bd_inode->i_mapping;
list_add(&inode->i_devices, &bdev->bd_inodes);
@@ -1390,7 +1405,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
bdi = &default_backing_dev_info;
- bdev->bd_inode->i_data.backing_dev_info = bdi;
+ bdev_inode_switch_bdi(bdev->bd_inode, bdi);
}
if (bdev->bd_invalidated)
rescan_partitions(disk, bdev);
@@ -1405,8 +1420,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (ret)
goto out_clear;
bdev->bd_contains = whole;
- bdev->bd_inode->i_data.backing_dev_info =
- whole->bd_inode->i_data.backing_dev_info;
+ bdev_inode_switch_bdi(bdev->bd_inode,
+ whole->bd_inode->i_data.backing_dev_info);
bdev->bd_part = disk_get_part(disk, partno);
if (!(disk->flags & GENHD_FL_UP) ||
!bdev->bd_part || !bdev->bd_part->nr_sects) {
@@ -1439,7 +1454,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
disk_put_part(bdev->bd_part);
bdev->bd_disk = NULL;
bdev->bd_part = NULL;
- bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
+ bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info);
if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL;
@@ -1533,7 +1548,8 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
disk_put_part(bdev->bd_part);
bdev->bd_part = NULL;
bdev->bd_disk = NULL;
- bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
+ bdev_inode_switch_bdi(bdev->bd_inode,
+ &default_backing_dev_info);
if (bdev != bdev->bd_contains)
victim = bdev->bd_contains;
bdev->bd_contains = NULL;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c03864406af3..64f99cf69ce0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3849,7 +3849,7 @@ again:
p = &root->inode_tree.rb_node;
parent = NULL;
- if (hlist_unhashed(&inode->i_hash))
+ if (inode_unhashed(inode))
return;
spin_lock(&root->inode_lock);
@@ -4758,7 +4758,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
}
btrfs_set_trans_block_group(trans, dir);
- atomic_inc(&inode->i_count);
+ ihold(inode);
err = btrfs_add_nondir(trans, dentry, inode, 1, index);
diff --git a/fs/buffer.c b/fs/buffer.c
index 7f0b9b083f77..5930e382959b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -905,7 +905,6 @@ try_again:
bh->b_state = 0;
atomic_set(&bh->b_count, 0);
- bh->b_private = NULL;
bh->b_size = size;
/* Link the buffer to its page */
@@ -1706,7 +1705,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
* and kswapd activity, but those code paths have their own
* higher-level throttling.
*/
- if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+ if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page);
@@ -1834,9 +1833,11 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
}
EXPORT_SYMBOL(page_zero_new_buffers);
-int block_prepare_write(struct page *page, unsigned from, unsigned to,
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
@@ -1916,7 +1917,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
}
return err;
}
-EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(__block_write_begin);
static int __block_commit_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
@@ -1953,15 +1954,6 @@ static int __block_commit_write(struct inode *inode, struct page *page,
return 0;
}
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
- get_block_t *get_block)
-{
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
-
- return block_prepare_write(page, start, start + len, get_block);
-}
-EXPORT_SYMBOL(__block_write_begin);
-
/*
* block_write_begin takes care of the basic task of block allocation and
* bringing partial write blocks uptodate first.
@@ -2379,7 +2371,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
else
end = PAGE_CACHE_SIZE;
- ret = block_prepare_write(page, 0, end, get_block);
+ ret = __block_write_begin(page, 0, end, get_block);
if (!ret)
ret = block_commit_write(page, 0, end);
@@ -2466,11 +2458,10 @@ int nobh_write_begin(struct address_space *mapping,
*fsdata = NULL;
if (page_has_buffers(page)) {
- unlock_page(page);
- page_cache_release(page);
- *pagep = NULL;
- return block_write_begin(mapping, pos, len, flags, pagep,
- get_block);
+ ret = __block_write_begin(page, pos, len, get_block);
+ if (unlikely(ret))
+ goto out_release;
+ return ret;
}
if (PageMappedToDisk(page))
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 51bcc5ce3230..e9c874abc9e1 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -591,7 +591,6 @@ static int ceph_writepages_start(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
- struct backing_dev_info *bdi = mapping->backing_dev_info;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc;
pgoff_t index, start, end;
@@ -633,13 +632,6 @@ static int ceph_writepages_start(struct address_space *mapping,
pagevec_init(&pvec, 0);
- /* ?? */
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- dout(" writepages congested\n");
- wbc->encountered_congestion = 1;
- goto out_final;
- }
-
/* where to start/end? */
if (wbc->range_cyclic) {
start = mapping->writeback_index; /* Start from prev offset */
@@ -885,7 +877,6 @@ out:
rc = 0; /* vfs expects us to return 0 */
ceph_put_snap_context(snapc);
dout("writepages done, rc = %d\n", rc);
-out_final:
return rc;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8c81e7b14d53..45af003865d2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1303,7 +1303,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned int bytes_to_write;
unsigned int bytes_written;
struct cifs_sb_info *cifs_sb;
@@ -1326,15 +1325,6 @@ static int cifs_writepages(struct address_space *mapping,
int scanned = 0;
int xid, long_op;
- /*
- * BB: Is this meaningful for a non-block-device file system?
- * If it is, we should test it again after we do I/O
- */
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- return 0;
- }
-
cifs_sb = CIFS_SB(mapping->host->i_sb);
/*
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 96fbeab77f2f..5d8b35539601 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -276,7 +276,7 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode,
}
coda_dir_update_mtime(dir_inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
d_instantiate(de, inode);
inc_nlink(inode);
return 0;
diff --git a/fs/compat.c b/fs/compat.c
index 0644a154672b..f03abdadc401 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1963,7 +1963,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
}
#endif /* HAVE_SET_RESTORE_SIGMASK */
-#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
+#if (defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)) && !defined(CONFIG_NFSD_DEPRECATED)
/* Stuff for NFS server syscalls... */
struct compat_nfsctl_svc {
u16 svc32_port;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index cf78d44a8d6a..253476d78ed8 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -135,6 +135,7 @@ struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd)
{
struct inode * inode = new_inode(configfs_sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mapping->a_ops = &configfs_aops;
inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
inode->i_op = &configfs_inode_operations;
diff --git a/fs/dcache.c b/fs/dcache.c
index 83293be48149..23702a9d4e6d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -67,33 +67,43 @@ struct dentry_stat_t dentry_stat = {
.age_limit = 45,
};
-static void __d_free(struct dentry *dentry)
+static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
+static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
+
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
+ dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
+ return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+#endif
+
+static void __d_free(struct rcu_head *head)
{
+ struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
+
WARN_ON(!list_empty(&dentry->d_alias));
if (dname_external(dentry))
kfree(dentry->d_name.name);
kmem_cache_free(dentry_cache, dentry);
}
-static void d_callback(struct rcu_head *head)
-{
- struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
- __d_free(dentry);
-}
-
/*
- * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry
- * inside dcache_lock.
+ * no dcache_lock, please.
*/
static void d_free(struct dentry *dentry)
{
+ percpu_counter_dec(&nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
+
/* if dentry was never inserted into hash, immediate free is OK */
if (hlist_unhashed(&dentry->d_hash))
- __d_free(dentry);
+ __d_free(&dentry->d_u.d_rcu);
else
- call_rcu(&dentry->d_u.d_rcu, d_callback);
+ call_rcu(&dentry->d_u.d_rcu, __d_free);
}
/*
@@ -123,37 +133,34 @@ static void dentry_iput(struct dentry * dentry)
}
/*
- * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held.
+ * dentry_lru_(add|del|move_tail) must be called with dcache_lock held.
*/
static void dentry_lru_add(struct dentry *dentry)
{
- list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
- dentry->d_sb->s_nr_dentry_unused++;
- dentry_stat.nr_unused++;
-}
-
-static void dentry_lru_add_tail(struct dentry *dentry)
-{
- list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
- dentry->d_sb->s_nr_dentry_unused++;
- dentry_stat.nr_unused++;
+ if (list_empty(&dentry->d_lru)) {
+ list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
+ dentry->d_sb->s_nr_dentry_unused++;
+ percpu_counter_inc(&nr_dentry_unused);
+ }
}
static void dentry_lru_del(struct dentry *dentry)
{
if (!list_empty(&dentry->d_lru)) {
- list_del(&dentry->d_lru);
+ list_del_init(&dentry->d_lru);
dentry->d_sb->s_nr_dentry_unused--;
- dentry_stat.nr_unused--;
+ percpu_counter_dec(&nr_dentry_unused);
}
}
-static void dentry_lru_del_init(struct dentry *dentry)
+static void dentry_lru_move_tail(struct dentry *dentry)
{
- if (likely(!list_empty(&dentry->d_lru))) {
- list_del_init(&dentry->d_lru);
- dentry->d_sb->s_nr_dentry_unused--;
- dentry_stat.nr_unused--;
+ if (list_empty(&dentry->d_lru)) {
+ list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
+ dentry->d_sb->s_nr_dentry_unused++;
+ percpu_counter_inc(&nr_dentry_unused);
+ } else {
+ list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
}
}
@@ -172,7 +179,6 @@ static struct dentry *d_kill(struct dentry *dentry)
struct dentry *parent;
list_del(&dentry->d_u.d_child);
- dentry_stat.nr_dentry--; /* For d_free, below */
/*drops the locks, at that point nobody can reach this dentry */
dentry_iput(dentry);
if (IS_ROOT(dentry))
@@ -237,13 +243,15 @@ repeat:
if (dentry->d_op->d_delete(dentry))
goto unhash_it;
}
+
/* Unreachable? Get rid of it */
if (d_unhashed(dentry))
goto kill_it;
- if (list_empty(&dentry->d_lru)) {
- dentry->d_flags |= DCACHE_REFERENCED;
- dentry_lru_add(dentry);
- }
+
+ /* Otherwise leave it cached and ensure it's on the LRU */
+ dentry->d_flags |= DCACHE_REFERENCED;
+ dentry_lru_add(dentry);
+
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
return;
@@ -318,11 +326,10 @@ int d_invalidate(struct dentry * dentry)
EXPORT_SYMBOL(d_invalidate);
/* This should be called _only_ with dcache_lock held */
-
static inline struct dentry * __dget_locked(struct dentry *dentry)
{
atomic_inc(&dentry->d_count);
- dentry_lru_del_init(dentry);
+ dentry_lru_del(dentry);
return dentry;
}
@@ -441,73 +448,27 @@ static void prune_one_dentry(struct dentry * dentry)
if (dentry->d_op && dentry->d_op->d_delete)
dentry->d_op->d_delete(dentry);
- dentry_lru_del_init(dentry);
+ dentry_lru_del(dentry);
__d_drop(dentry);
dentry = d_kill(dentry);
spin_lock(&dcache_lock);
}
}
-/*
- * Shrink the dentry LRU on a given superblock.
- * @sb : superblock to shrink dentry LRU.
- * @count: If count is NULL, we prune all dentries on superblock.
- * @flags: If flags is non-zero, we need to do special processing based on
- * which flags are set. This means we don't need to maintain multiple
- * similar copies of this loop.
- */
-static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
+static void shrink_dentry_list(struct list_head *list)
{
- LIST_HEAD(referenced);
- LIST_HEAD(tmp);
struct dentry *dentry;
- int cnt = 0;
- BUG_ON(!sb);
- BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
- spin_lock(&dcache_lock);
- if (count != NULL)
- /* called from prune_dcache() and shrink_dcache_parent() */
- cnt = *count;
-restart:
- if (count == NULL)
- list_splice_init(&sb->s_dentry_lru, &tmp);
- else {
- while (!list_empty(&sb->s_dentry_lru)) {
- dentry = list_entry(sb->s_dentry_lru.prev,
- struct dentry, d_lru);
- BUG_ON(dentry->d_sb != sb);
+ while (!list_empty(list)) {
+ dentry = list_entry(list->prev, struct dentry, d_lru);
+ dentry_lru_del(dentry);
- spin_lock(&dentry->d_lock);
- /*
- * If we are honouring the DCACHE_REFERENCED flag and
- * the dentry has this flag set, don't free it. Clear
- * the flag and put it back on the LRU.
- */
- if ((flags & DCACHE_REFERENCED)
- && (dentry->d_flags & DCACHE_REFERENCED)) {
- dentry->d_flags &= ~DCACHE_REFERENCED;
- list_move(&dentry->d_lru, &referenced);
- spin_unlock(&dentry->d_lock);
- } else {
- list_move_tail(&dentry->d_lru, &tmp);
- spin_unlock(&dentry->d_lock);
- cnt--;
- if (!cnt)
- break;
- }
- cond_resched_lock(&dcache_lock);
- }
- }
- while (!list_empty(&tmp)) {
- dentry = list_entry(tmp.prev, struct dentry, d_lru);
- dentry_lru_del_init(dentry);
- spin_lock(&dentry->d_lock);
/*
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free
* it - just keep it off the LRU list.
*/
+ spin_lock(&dentry->d_lock);
if (atomic_read(&dentry->d_count)) {
spin_unlock(&dentry->d_lock);
continue;
@@ -516,13 +477,60 @@ restart:
/* dentry->d_lock was dropped in prune_one_dentry() */
cond_resched_lock(&dcache_lock);
}
- if (count == NULL && !list_empty(&sb->s_dentry_lru))
- goto restart;
- if (count != NULL)
- *count = cnt;
+}
+
+/**
+ * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
+ * @sb: superblock to shrink dentry LRU.
+ * @count: number of entries to prune
+ * @flags: flags to control the dentry processing
+ *
+ * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
+ */
+static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
+{
+ /* called from prune_dcache() and shrink_dcache_parent() */
+ struct dentry *dentry;
+ LIST_HEAD(referenced);
+ LIST_HEAD(tmp);
+ int cnt = *count;
+
+ spin_lock(&dcache_lock);
+ while (!list_empty(&sb->s_dentry_lru)) {
+ dentry = list_entry(sb->s_dentry_lru.prev,
+ struct dentry, d_lru);
+ BUG_ON(dentry->d_sb != sb);
+
+ /*
+ * If we are honouring the DCACHE_REFERENCED flag and the
+ * dentry has this flag set, don't free it. Clear the flag
+ * and put it back on the LRU.
+ */
+ if (flags & DCACHE_REFERENCED) {
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_REFERENCED) {
+ dentry->d_flags &= ~DCACHE_REFERENCED;
+ list_move(&dentry->d_lru, &referenced);
+ spin_unlock(&dentry->d_lock);
+ cond_resched_lock(&dcache_lock);
+ continue;
+ }
+ spin_unlock(&dentry->d_lock);
+ }
+
+ list_move_tail(&dentry->d_lru, &tmp);
+ if (!--cnt)
+ break;
+ cond_resched_lock(&dcache_lock);
+ }
+
+ *count = cnt;
+ shrink_dentry_list(&tmp);
+
if (!list_empty(&referenced))
list_splice(&referenced, &sb->s_dentry_lru);
spin_unlock(&dcache_lock);
+
}
/**
@@ -538,7 +546,7 @@ static void prune_dcache(int count)
{
struct super_block *sb, *p = NULL;
int w_count;
- int unused = dentry_stat.nr_unused;
+ int unused = percpu_counter_sum_positive(&nr_dentry_unused);
int prune_ratio;
int pruned;
@@ -608,13 +616,19 @@ static void prune_dcache(int count)
* shrink_dcache_sb - shrink dcache for a superblock
* @sb: superblock
*
- * Shrink the dcache for the specified super block. This
- * is used to free the dcache before unmounting a file
- * system
+ * Shrink the dcache for the specified super block. This is used to free
+ * the dcache before unmounting a file system.
*/
-void shrink_dcache_sb(struct super_block * sb)
+void shrink_dcache_sb(struct super_block *sb)
{
- __shrink_dcache_sb(sb, NULL, 0);
+ LIST_HEAD(tmp);
+
+ spin_lock(&dcache_lock);
+ while (!list_empty(&sb->s_dentry_lru)) {
+ list_splice_init(&sb->s_dentry_lru, &tmp);
+ shrink_dentry_list(&tmp);
+ }
+ spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(shrink_dcache_sb);
@@ -632,7 +646,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
/* detach this root from the system */
spin_lock(&dcache_lock);
- dentry_lru_del_init(dentry);
+ dentry_lru_del(dentry);
__d_drop(dentry);
spin_unlock(&dcache_lock);
@@ -646,7 +660,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
spin_lock(&dcache_lock);
list_for_each_entry(loop, &dentry->d_subdirs,
d_u.d_child) {
- dentry_lru_del_init(loop);
+ dentry_lru_del(loop);
__d_drop(loop);
cond_resched_lock(&dcache_lock);
}
@@ -703,20 +717,13 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
* otherwise we ascend to the parent and move to the
* next sibling if there is one */
if (!parent)
- goto out;
-
+ return;
dentry = parent;
-
} while (list_empty(&dentry->d_subdirs));
dentry = list_entry(dentry->d_subdirs.next,
struct dentry, d_u.d_child);
}
-out:
- /* several dentries were freed, need to correct nr_dentry */
- spin_lock(&dcache_lock);
- dentry_stat.nr_dentry -= detached;
- spin_unlock(&dcache_lock);
}
/*
@@ -830,14 +837,15 @@ resume:
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
- dentry_lru_del_init(dentry);
/*
* move only zero ref count dentries to the end
* of the unused list for prune_dcache
*/
if (!atomic_read(&dentry->d_count)) {
- dentry_lru_add_tail(dentry);
+ dentry_lru_move_tail(dentry);
found++;
+ } else {
+ dentry_lru_del(dentry);
}
/*
@@ -900,12 +908,16 @@ EXPORT_SYMBOL(shrink_dcache_parent);
*/
static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
+ int nr_unused;
+
if (nr) {
if (!(gfp_mask & __GFP_FS))
return -1;
prune_dcache(nr);
}
- return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
+
+ nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
+ return (nr_unused / 100) * sysctl_vfs_cache_pressure;
}
static struct shrinker dcache_shrinker = {
@@ -972,9 +984,10 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
spin_lock(&dcache_lock);
if (parent)
list_add(&dentry->d_u.d_child, &parent->d_subdirs);
- dentry_stat.nr_dentry++;
spin_unlock(&dcache_lock);
+ percpu_counter_inc(&nr_dentry);
+
return dentry;
}
EXPORT_SYMBOL(d_alloc);
@@ -1478,33 +1491,26 @@ out:
* This is used by ncpfs in its readdir implementation.
* Zero is returned in the dentry is invalid.
*/
-
-int d_validate(struct dentry *dentry, struct dentry *dparent)
+int d_validate(struct dentry *dentry, struct dentry *parent)
{
- struct hlist_head *base;
- struct hlist_node *lhp;
+ struct hlist_head *head = d_hash(parent, dentry->d_name.hash);
+ struct hlist_node *node;
+ struct dentry *d;
/* Check whether the ptr might be valid at all.. */
if (!kmem_ptr_validate(dentry_cache, dentry))
- goto out;
-
- if (dentry->d_parent != dparent)
- goto out;
+ return 0;
+ if (dentry->d_parent != parent)
+ return 0;
- spin_lock(&dcache_lock);
- base = d_hash(dparent, dentry->d_name.hash);
- hlist_for_each(lhp,base) {
- /* hlist_for_each_entry_rcu() not required for d_hash list
- * as it is parsed under dcache_lock
- */
- if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
- __dget_locked(dentry);
- spin_unlock(&dcache_lock);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(d, node, head, d_hash) {
+ if (d == dentry) {
+ dget(dentry);
return 1;
}
}
- spin_unlock(&dcache_lock);
-out:
+ rcu_read_unlock();
return 0;
}
EXPORT_SYMBOL(d_validate);
@@ -1994,7 +2000,7 @@ global_root:
* Returns a pointer into the buffer or an error code if the
* path was too long.
*
- * "buflen" should be positive. Caller holds the dcache_lock.
+ * "buflen" should be positive.
*
* If path is not reachable from the supplied root, then the value of
* root is changed (without modifying refcounts).
@@ -2006,10 +2012,12 @@ char *__d_path(const struct path *path, struct path *root,
int error;
prepend(&res, &buflen, "\0", 1);
+ spin_lock(&dcache_lock);
error = prepend_path(path, root, &res, &buflen);
+ spin_unlock(&dcache_lock);
+
if (error)
return ERR_PTR(error);
-
return res;
}
@@ -2419,6 +2427,9 @@ static void __init dcache_init(void)
{
int loop;
+ percpu_counter_init(&nr_dentry, 0);
+ percpu_counter_init(&nr_dentry_unused, 0);
+
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 30a87b3dbcac..a4ed8380e98a 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -40,6 +40,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
struct inode *inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 48d74c7391d1..85882f6ba5f7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio)
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
-static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async)
+static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async)
{
ssize_t transferred = 0;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 256bb7bb102a..8cf07242067d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -77,9 +77,6 @@
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
-/* Maximum msec timeout value storeable in a long int */
-#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
-
#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
#define EP_UNACTIVE_PTR ((void *) -1L)
@@ -1117,18 +1114,22 @@ static int ep_send_events(struct eventpoll *ep,
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
- int res, eavail;
+ int res, eavail, timed_out = 0;
unsigned long flags;
- long jtimeout;
+ long slack;
wait_queue_t wait;
-
- /*
- * Calculate the timeout by checking for the "infinite" value (-1)
- * and the overflow condition. The passed timeout is in milliseconds,
- * that why (t * HZ) / 1000.
- */
- jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ?
- MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
+ struct timespec end_time;
+ ktime_t expires, *to = NULL;
+
+ if (timeout > 0) {
+ ktime_get_ts(&end_time);
+ timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+ slack = select_estimate_accuracy(&end_time);
+ to = &expires;
+ *to = timespec_to_ktime(end_time);
+ } else if (timeout == 0) {
+ timed_out = 1;
+ }
retry:
spin_lock_irqsave(&ep->lock, flags);
@@ -1150,7 +1151,7 @@ retry:
* to TASK_INTERRUPTIBLE before doing the checks.
*/
set_current_state(TASK_INTERRUPTIBLE);
- if (!list_empty(&ep->rdllist) || !jtimeout)
+ if (!list_empty(&ep->rdllist) || timed_out)
break;
if (signal_pending(current)) {
res = -EINTR;
@@ -1158,7 +1159,9 @@ retry:
}
spin_unlock_irqrestore(&ep->lock, flags);
- jtimeout = schedule_timeout(jtimeout);
+ if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+ timed_out = 1;
+
spin_lock_irqsave(&ep->lock, flags);
}
__remove_wait_queue(&ep->wq, &wait);
@@ -1176,7 +1179,7 @@ retry:
* more luck.
*/
if (!res && eavail &&
- !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
+ !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
goto retry;
return res;
diff --git a/fs/exec.c b/fs/exec.c
index 6d2b6f936858..99d33a1371e9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -54,6 +54,7 @@
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
#include <linux/pipe_fs_i.h>
+#include <linux/oom.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -65,6 +66,12 @@ char core_pattern[CORENAME_MAX_SIZE] = "core";
unsigned int core_pipe_limit;
int suid_dumpable = 0;
+struct core_name {
+ char *corename;
+ int used, size;
+};
+static atomic_t call_count = ATOMIC_INIT(1);
+
/* The maximal length of core_pattern is also specified in sysctl.c */
static LIST_HEAD(formats);
@@ -759,6 +766,10 @@ static int exec_mmap(struct mm_struct *mm)
tsk->mm = mm;
tsk->active_mm = mm;
activate_mm(active_mm, mm);
+ if (old_mm && tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+ atomic_dec(&old_mm->oom_disable_count);
+ atomic_inc(&tsk->mm->oom_disable_count);
+ }
task_unlock(tsk);
arch_pick_mmap_layout(mm);
if (old_mm) {
@@ -998,7 +1009,7 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
- current->flags &= ~PF_RANDOMIZE;
+ current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
flush_thread();
current->personality &= ~bprm->per_clear;
@@ -1078,14 +1089,14 @@ EXPORT_SYMBOL(setup_new_exec);
*/
int prepare_bprm_creds(struct linux_binprm *bprm)
{
- if (mutex_lock_interruptible(&current->cred_guard_mutex))
+ if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
return -ERESTARTNOINTR;
bprm->cred = prepare_exec_creds();
if (likely(bprm->cred))
return 0;
- mutex_unlock(&current->cred_guard_mutex);
+ mutex_unlock(&current->signal->cred_guard_mutex);
return -ENOMEM;
}
@@ -1093,7 +1104,7 @@ void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
- mutex_unlock(&current->cred_guard_mutex);
+ mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
kfree(bprm);
@@ -1114,13 +1125,13 @@ void install_exec_creds(struct linux_binprm *bprm)
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
- mutex_unlock(&current->cred_guard_mutex);
+ mutex_unlock(&current->signal->cred_guard_mutex);
}
EXPORT_SYMBOL(install_exec_creds);
/*
* determine how safe it is to execute the proposed program
- * - the caller must hold current->cred_guard_mutex to protect against
+ * - the caller must hold ->cred_guard_mutex to protect against
* PTRACE_ATTACH
*/
int check_unsafe_exec(struct linux_binprm *bprm)
@@ -1401,7 +1412,6 @@ int do_execve(const char * filename,
if (retval < 0)
goto out;
- current->flags &= ~PF_KTHREAD;
retval = search_binary_handler(bprm,regs);
if (retval < 0)
goto out;
@@ -1454,127 +1464,148 @@ void set_binfmt(struct linux_binfmt *new)
EXPORT_SYMBOL(set_binfmt);
+static int expand_corename(struct core_name *cn)
+{
+ char *old_corename = cn->corename;
+
+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
+ cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+
+ if (!cn->corename) {
+ kfree(old_corename);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int cn_printf(struct core_name *cn, const char *fmt, ...)
+{
+ char *cur;
+ int need;
+ int ret;
+ va_list arg;
+
+ va_start(arg, fmt);
+ need = vsnprintf(NULL, 0, fmt, arg);
+ va_end(arg);
+
+ if (likely(need < cn->size - cn->used - 1))
+ goto out_printf;
+
+ ret = expand_corename(cn);
+ if (ret)
+ goto expand_fail;
+
+out_printf:
+ cur = cn->corename + cn->used;
+ va_start(arg, fmt);
+ vsnprintf(cur, need + 1, fmt, arg);
+ va_end(arg);
+ cn->used += need;
+ return 0;
+
+expand_fail:
+ return ret;
+}
+
/* format_corename will inspect the pattern parameter, and output a
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
-static int format_corename(char *corename, long signr)
+static int format_corename(struct core_name *cn, long signr)
{
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
int ispipe = (*pat_ptr == '|');
- char *out_ptr = corename;
- char *const out_end = corename + CORENAME_MAX_SIZE;
- int rc;
int pid_in_pattern = 0;
+ int err = 0;
+
+ cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
+ cn->corename = kmalloc(cn->size, GFP_KERNEL);
+ cn->used = 0;
+
+ if (!cn->corename)
+ return -ENOMEM;
/* Repeat as long as we have more pattern to process and more output
space */
while (*pat_ptr) {
if (*pat_ptr != '%') {
- if (out_ptr == out_end)
+ if (*pat_ptr == 0)
goto out;
- *out_ptr++ = *pat_ptr++;
+ err = cn_printf(cn, "%c", *pat_ptr++);
} else {
switch (*++pat_ptr) {
+ /* single % at the end, drop that */
case 0:
goto out;
/* Double percent, output one percent */
case '%':
- if (out_ptr == out_end)
- goto out;
- *out_ptr++ = '%';
+ err = cn_printf(cn, "%c", '%');
break;
/* pid */
case 'p':
pid_in_pattern = 1;
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", task_tgid_vnr(current));
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, "%d",
+ task_tgid_vnr(current));
break;
/* uid */
case 'u':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", cred->uid);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, "%d", cred->uid);
break;
/* gid */
case 'g':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%d", cred->gid);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, "%d", cred->gid);
break;
/* signal that caused the coredump */
case 's':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%ld", signr);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, "%ld", signr);
break;
/* UNIX time of coredump */
case 't': {
struct timeval tv;
do_gettimeofday(&tv);
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%lu", tv.tv_sec);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, "%lu", tv.tv_sec);
break;
}
/* hostname */
case 'h':
down_read(&uts_sem);
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%s", utsname()->nodename);
+ err = cn_printf(cn, "%s",
+ utsname()->nodename);
up_read(&uts_sem);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
break;
/* executable */
case 'e':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%s", current->comm);
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, "%s", current->comm);
break;
/* core limit size */
case 'c':
- rc = snprintf(out_ptr, out_end - out_ptr,
- "%lu", rlimit(RLIMIT_CORE));
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, "%lu",
+ rlimit(RLIMIT_CORE));
break;
default:
break;
}
++pat_ptr;
}
+
+ if (err)
+ return err;
}
+
/* Backward compatibility with core_uses_pid:
*
* If core_pattern does not include a %p (as is the default)
* and core_uses_pid is set, then .%pid will be appended to
* the filename. Do not do this for piped commands. */
if (!ispipe && !pid_in_pattern && core_uses_pid) {
- rc = snprintf(out_ptr, out_end - out_ptr,
- ".%d", task_tgid_vnr(current));
- if (rc > out_end - out_ptr)
- goto out;
- out_ptr += rc;
+ err = cn_printf(cn, ".%d", task_tgid_vnr(current));
+ if (err)
+ return err;
}
out:
- *out_ptr = 0;
return ispipe;
}
@@ -1851,7 +1882,7 @@ static int umh_pipe_setup(struct subprocess_info *info)
void do_coredump(long signr, int exit_code, struct pt_regs *regs)
{
struct core_state core_state;
- char corename[CORENAME_MAX_SIZE + 1];
+ struct core_name cn;
struct mm_struct *mm = current->mm;
struct linux_binfmt * binfmt;
const struct cred *old_cred;
@@ -1906,7 +1937,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
clear_thread_flag(TIF_SIGPENDING);
- ispipe = format_corename(corename, signr);
+ ispipe = format_corename(&cn, signr);
+
+ if (ispipe == -ENOMEM) {
+ printk(KERN_WARNING "format_corename failed\n");
+ printk(KERN_WARNING "Aborting core\n");
+ goto fail_corename;
+ }
if (ispipe) {
int dump_count;
@@ -1943,7 +1980,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
goto fail_dropcount;
}
- helper_argv = argv_split(GFP_KERNEL, corename+1, NULL);
+ helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
if (!helper_argv) {
printk(KERN_WARNING "%s failed to allocate memory\n",
__func__);
@@ -1956,7 +1993,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
argv_free(helper_argv);
if (retval) {
printk(KERN_INFO "Core dump to %s pipe failed\n",
- corename);
+ cn.corename);
goto close_fail;
}
} else {
@@ -1965,7 +2002,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
- cprm.file = filp_open(corename,
+ cprm.file = filp_open(cn.corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
0600);
if (IS_ERR(cprm.file))
@@ -2007,6 +2044,8 @@ fail_dropcount:
if (ispipe)
atomic_dec(&core_dump_count);
fail_unlock:
+ kfree(cn.corename);
+fail_corename:
coredump_finish(mm);
revert_creds(old_cred);
fail_creds:
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 68cb23e3bb98..b905c79b4f0a 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -46,10 +46,6 @@ static int exofs_file_fsync(struct file *filp, int datasync)
{
int ret;
struct inode *inode = filp->f_mapping->host;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0, /* metadata-only; caller takes care of data */
- };
struct super_block *sb;
if (!(inode->i_state & I_DIRTY))
@@ -57,7 +53,7 @@ static int exofs_file_fsync(struct file *filp, int datasync)
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
return 0;
- ret = sync_inode(inode, &wbc);
+ ret = sync_inode_metadata(inode, 1);
/* This is a good place to write the sb */
/* TODO: Sechedule an sb-sync on create */
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index b7dd0c236863..264e95d02830 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -153,7 +153,7 @@ static int exofs_link(struct dentry *old_dentry, struct inode *dir,
inode->i_ctime = CURRENT_TIME;
inode_inc_link_count(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
return exofs_add_nondir(dentry, inode);
}
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index e9e175949a63..51b304056f10 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -74,21 +74,20 @@ static struct dentry *
find_disconnected_root(struct dentry *dentry)
{
dget(dentry);
- spin_lock(&dentry->d_lock);
- while (!IS_ROOT(dentry) &&
- (dentry->d_parent->d_flags & DCACHE_DISCONNECTED)) {
- struct dentry *parent = dentry->d_parent;
- dget(parent);
- spin_unlock(&dentry->d_lock);
+ while (!IS_ROOT(dentry)) {
+ struct dentry *parent = dget_parent(dentry);
+
+ if (!(parent->d_flags & DCACHE_DISCONNECTED)) {
+ dput(parent);
+ break;
+ }
+
dput(dentry);
dentry = parent;
- spin_lock(&dentry->d_lock);
}
- spin_unlock(&dentry->d_lock);
return dentry;
}
-
/*
* Make sure target_dir is fully connected to the dentry tree.
*
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index c6c684b44ea1..0d06f4e75699 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -646,10 +646,9 @@ find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
return here;
}
-/*
+/**
* ext2_try_to_allocate()
* @sb: superblock
- * @handle: handle to this transaction
* @group: given allocation block group
* @bitmap_bh: bufferhead holds the block bitmap
* @grp_goal: given target block within the group
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 764109886ec0..2709b34206ab 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -98,7 +98,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
if (IS_DIRSYNC(dir)) {
err = write_one_page(page, 1);
if (!err)
- err = ext2_sync_inode(dir);
+ err = sync_inode_metadata(dir, 1);
} else {
unlock_page(page);
}
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 416daa62242c..6346a2acf326 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -120,7 +120,6 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
extern struct inode *ext2_iget (struct super_block *, unsigned long);
extern int ext2_write_inode (struct inode *, struct writeback_control *);
extern void ext2_evict_inode(struct inode *);
-extern int ext2_sync_inode (struct inode *);
extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern int ext2_setattr (struct dentry *, struct iattr *);
extern void ext2_set_inode_flags(struct inode *inode);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 533699c16040..40ad210a5049 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1203,7 +1203,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
if (inode_needs_sync(inode)) {
sync_mapping_buffers(inode->i_mapping);
- ext2_sync_inode (inode);
+ sync_inode_metadata(inode, 1);
} else {
mark_inode_dirty(inode);
}
@@ -1523,15 +1523,6 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
-int ext2_sync_inode(struct inode *inode)
-{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0, /* sys_fsync did this */
- };
- return sync_inode(inode, &wbc);
-}
-
int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 71efb0e9a3f2..f8aecd2e3297 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -206,7 +206,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
err = ext2_add_link(dentry, inode);
if (!err) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 85df87d0f7b7..0901320671da 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1221,9 +1221,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
}
es = sbi->s_es;
- if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
- (old_mount_opt & EXT2_MOUNT_XIP)) &&
- invalidate_inodes(sb)) {
+ if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) {
ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
"xip flag with busy inodes while remounting");
sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 8c29ae15129e..f84700be3274 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
inode->i_ctime = CURRENT_TIME_SEC;
if (IS_SYNC(inode)) {
- error = ext2_sync_inode (inode);
+ error = sync_inode_metadata(inode, 1);
/* In case sync failed due to ENOSPC the inode was actually
* written (only some dirty data were not) so we just proceed
* as if nothing happened and cleanup the unused block */
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 4a32511f4ded..b3db22649426 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -792,9 +792,9 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
if (here < 0)
here = 0;
- p = ((char *)bh->b_data) + (here >> 3);
+ p = bh->b_data + (here >> 3);
r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
- next = (r - ((char *)bh->b_data)) << 3;
+ next = (r - bh->b_data) << 3;
if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
return next;
@@ -810,8 +810,9 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
/**
* claim_block()
+ * @lock: the spin lock for this block group
* @block: the free block (group relative) to allocate
- * @bh: the bufferhead containts the block group bitmap
+ * @bh: the buffer_head contains the block group bitmap
*
* We think we can allocate this block in this bitmap. Try to set the bit.
* If that succeeds then check that nobody has allocated and then freed the
@@ -956,9 +957,11 @@ fail_access:
* but we will shift to the place where start_block is,
* then start from there, when looking for a reservable space.
*
- * @size: the target new reservation window size
+ * @my_rsv: the reservation window
*
- * @group_first_block: the first block we consider to start
+ * @sb: the super block
+ *
+ * @start_block: the first block we consider to start
* the real search from
*
* @last_block:
@@ -1084,7 +1087,7 @@ static int find_next_reservable_window(
*
* failed: we failed to find a reservation window in this group
*
- * @rsv: the reservation
+ * @my_rsv: the reservation window
*
* @grp_goal: The goal (group-relative). It is where the search for a
* free reservable space should start from.
@@ -1273,8 +1276,8 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
* @group: given allocation block group
* @bitmap_bh: bufferhead holds the block bitmap
* @grp_goal: given target block within the group
- * @count: target number of blocks to allocate
* @my_rsv: reservation window
+ * @count: target number of blocks to allocate
* @errp: pointer to store the error code
*
* This is the main function used to allocate a new block and its reservation
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 4ab72db3559e..9724aef22460 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -570,9 +570,14 @@ got:
ei->i_state_flags = 0;
ext3_set_inode_state(inode, EXT3_STATE_NEW);
- ei->i_extra_isize =
- (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
- sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
+ /* See comment in ext3_iget for explanation */
+ if (ino >= EXT3_FIRST_INO(sb) + 1 &&
+ EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
+ ei->i_extra_isize =
+ sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
+ } else {
+ ei->i_extra_isize = 0;
+ }
ret = inode;
dquot_initialize(inode);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 5e0faf4cda79..a9580617edd2 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -498,7 +498,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
}
/**
- * ext3_blks_to_allocate: Look up the block map and count the number
+ * ext3_blks_to_allocate - Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
* @branch: chain of indirect blocks
@@ -536,14 +536,18 @@ static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
}
/**
- * ext3_alloc_blocks: multiple allocate blocks needed for a branch
+ * ext3_alloc_blocks - multiple allocate blocks needed for a branch
+ * @handle: handle for this transaction
+ * @inode: owner
+ * @goal: preferred place for allocation
* @indirect_blks: the number of blocks need to allocate for indirect
* blocks
- *
+ * @blks: number of blocks need to allocated for direct blocks
* @new_blocks: on return it will store the new block numbers for
* the indirect blocks(if needed) and the first direct block,
- * @blks: on return it will store the total number of allocated
- * direct blocks
+ * @err: here we store the error value
+ *
+ * return the number of direct blocks allocated
*/
static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
ext3_fsblk_t goal, int indirect_blks, int blks,
@@ -598,9 +602,11 @@ failed_out:
/**
* ext3_alloc_branch - allocate and set up a chain of blocks.
+ * @handle: handle for this transaction
* @inode: owner
* @indirect_blks: number of allocated indirect blocks
* @blks: number of allocated direct blocks
+ * @goal: preferred place for allocation
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
@@ -700,10 +706,9 @@ failed:
/**
* ext3_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
* @inode: owner
* @block: (logical) number of block we are adding
- * @chain: chain of indirect blocks (with a missing link - see
- * ext3_alloc_branch)
* @where: location of missing link
* @num: number of indirect blocks we are adding
* @blks: number of direct blocks we are adding
@@ -1696,8 +1701,8 @@ static int ext3_journalled_writepage(struct page *page,
* doesn't seem much point in redirtying the page here.
*/
ClearPageChecked(page);
- ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
- ext3_get_block);
+ ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
+ ext3_get_block);
if (ret != 0) {
ext3_journal_stop(handle);
goto out_unlock;
@@ -2530,7 +2535,6 @@ void ext3_truncate(struct inode *inode)
*/
} else {
/* Shared branch grows from an indirect block */
- BUFFER_TRACE(partial->bh, "get_write_access");
ext3_free_branches(handle, inode, partial->bh,
partial->p,
partial->p+1, (chain+n-1) - partial);
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 2b35ddb70d65..bce9dce639b8 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2260,7 +2260,7 @@ retry:
inode->i_ctime = CURRENT_TIME_SEC;
inc_nlink(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
err = ext3_add_entry(handle, dentry, inode);
if (!err) {
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 0ccd7b12b73c..e746d30b1232 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -977,7 +977,8 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
o_blocks_count = le32_to_cpu(es->s_blocks_count);
if (test_opt(sb, DEBUG))
- printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
+ printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
+ " upto "E3FSBLK" blocks\n",
o_blocks_count, n_blocks_count);
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
@@ -985,7 +986,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
printk(KERN_ERR "EXT3-fs: filesystem on %s:"
- " too large to resize to %lu blocks safely\n",
+ " too large to resize to "E3FSBLK" blocks safely\n",
sb->s_id, n_blocks_count);
if (sizeof(sector_t) < 8)
ext3_warning(sb, __func__,
@@ -1065,11 +1066,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
- ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count,
- o_blocks_count + add);
+ ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
+ o_blocks_count, o_blocks_count + add);
ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
- ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n", o_blocks_count,
- o_blocks_count + add);
+ ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
+ o_blocks_count, o_blocks_count + add);
if ((err = ext3_journal_stop(handle)))
goto exit_put;
if (test_opt(sb, DEBUG))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 377768009106..db87413d3479 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1301,9 +1301,9 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
ext3_msg(sb, KERN_WARNING,
"warning: mounting fs with errors, "
"running e2fsck is recommended");
- else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
+ else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
le16_to_cpu(es->s_mnt_count) >=
- (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
+ le16_to_cpu(es->s_max_mnt_count))
ext3_msg(sb, KERN_WARNING,
"warning: maximal mount count reached, "
"running e2fsck is recommended");
@@ -1320,7 +1320,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
valid forever! :) */
es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
#endif
- if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
+ if (!le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
es->s_mtime = cpu_to_le32(get_seconds());
@@ -1647,7 +1647,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
* Note: s_es must be initialized as soon as possible because
* some ext3 macro-instructions depend on its value
*/
- es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
+ es = (struct ext3_super_block *) (bh->b_data + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT3_SUPER_MAGIC)
@@ -1758,7 +1758,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
"error: can't read superblock on 2nd try");
goto failed_mount;
}
- es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
+ es = (struct ext3_super_block *)(bh->b_data + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
ext3_msg(sb, KERN_ERR,
@@ -1857,13 +1857,13 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT3_BLOCKS_PER_GROUP(sb)) + 1;
- db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) /
- EXT3_DESC_PER_BLOCK(sb);
+ db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb));
sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext3_msg(sb, KERN_ERR,
"error: not enough memory");
+ ret = -ENOMEM;
goto failed_mount;
}
@@ -1951,6 +1951,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
}
if (err) {
ext3_msg(sb, KERN_ERR, "error: insufficient memory");
+ ret = err;
goto failed_mount3;
}
@@ -2159,7 +2160,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
goto out_bdev;
}
- es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
+ es = (struct ext3_super_block *) (bh->b_data + offset);
if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
!(le32_to_cpu(es->s_feature_incompat) &
EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
@@ -2352,6 +2353,21 @@ static int ext3_commit_super(struct super_block *sb,
if (!sbh)
return error;
+
+ if (buffer_write_io_error(sbh)) {
+ /*
+ * Oh, dear. A previous attempt to write the
+ * superblock failed. This could happen because the
+ * USB device was yanked out. Or it could happen to
+ * be a transient write error and maybe the block will
+ * be remapped. Nothing we can do but to retry the
+ * write and hope for the best.
+ */
+ ext3_msg(sb, KERN_ERR, "previous I/O error to "
+ "superblock detected");
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
+ }
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
@@ -2368,8 +2384,15 @@ static int ext3_commit_super(struct super_block *sb,
es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
BUFFER_TRACE(sbh, "marking dirty");
mark_buffer_dirty(sbh);
- if (sync)
+ if (sync) {
error = sync_dirty_buffer(sbh);
+ if (buffer_write_io_error(sbh)) {
+ ext3_msg(sb, KERN_ERR, "I/O error while writing "
+ "superblock");
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
+ }
+ }
return error;
}
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 8867b2a1e5fe..c947e36eda6c 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_EXT4_FS) += ext4.o
-ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index bd30799a43ed..14c3af26c671 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -171,7 +171,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
* less than the blocksize * 8 ( which is the size
* of bitmap ), set rest of the block bitmap to 1
*/
- mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
+ ext4_mark_bitmap_end(group_blocks, sb->s_blocksize * 8,
+ bh->b_data);
}
return free_blocks - ext4_group_used_meta_blocks(sb, block_group, gdp);
}
@@ -489,7 +490,7 @@ error_return:
* Check if filesystem has nblocks free & available for allocation.
* On success return 1, return 0 on failure.
*/
-int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
+static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
{
s64 free_blocks, dirty_blocks, root_blocks;
struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 3db5084db9bd..fac90f3fba80 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -29,16 +29,15 @@ struct ext4_system_zone {
static struct kmem_cache *ext4_system_zone_cachep;
-int __init init_ext4_system_zone(void)
+int __init ext4_init_system_zone(void)
{
- ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone,
- SLAB_RECLAIM_ACCOUNT);
+ ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone, 0);
if (ext4_system_zone_cachep == NULL)
return -ENOMEM;
return 0;
}
-void exit_ext4_system_zone(void)
+void ext4_exit_system_zone(void)
{
kmem_cache_destroy(ext4_system_zone_cachep);
}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 374510f72baa..ece76fb6a40c 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -39,7 +39,7 @@ static int ext4_release_dir(struct inode *inode,
struct file *filp);
const struct file_operations ext4_dir_operations = {
- .llseek = generic_file_llseek,
+ .llseek = ext4_llseek,
.read = generic_read_dir,
.readdir = ext4_readdir, /* we take BKL. needed?*/
.unlocked_ioctl = ext4_ioctl,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 889ec9d5e6ad..8b5dd6369f82 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -168,7 +168,20 @@ struct mpage_da_data {
int pages_written;
int retval;
};
-#define EXT4_IO_UNWRITTEN 0x1
+
+/*
+ * Flags for ext4_io_end->flags
+ */
+#define EXT4_IO_END_UNWRITTEN 0x0001
+#define EXT4_IO_END_ERROR 0x0002
+
+struct ext4_io_page {
+ struct page *p_page;
+ int p_count;
+};
+
+#define MAX_IO_PAGES 128
+
typedef struct ext4_io_end {
struct list_head list; /* per-file finished IO list */
struct inode *inode; /* file being written to */
@@ -179,8 +192,18 @@ typedef struct ext4_io_end {
struct work_struct work; /* data work queue */
struct kiocb *iocb; /* iocb struct for AIO */
int result; /* error value for AIO */
+ int num_io_pages;
+ struct ext4_io_page *pages[MAX_IO_PAGES];
} ext4_io_end_t;
+struct ext4_io_submit {
+ int io_op;
+ struct bio *io_bio;
+ ext4_io_end_t *io_end;
+ struct ext4_io_page *io_page;
+ sector_t io_next_block;
+};
+
/*
* Special inodes numbers
*/
@@ -205,6 +228,7 @@ typedef struct ext4_io_end {
#define EXT4_MIN_BLOCK_SIZE 1024
#define EXT4_MAX_BLOCK_SIZE 65536
#define EXT4_MIN_BLOCK_LOG_SIZE 10
+#define EXT4_MAX_BLOCK_LOG_SIZE 16
#ifdef __KERNEL__
# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
@@ -889,6 +913,7 @@ struct ext4_inode_info {
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */
+#define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */
#define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt
#define set_opt(o, opt) o |= EXT4_MOUNT_##opt
@@ -1087,7 +1112,6 @@ struct ext4_sb_info {
struct completion s_kobj_unregister;
/* Journaling */
- struct inode *s_journal_inode;
struct journal_s *s_journal;
struct list_head s_orphan;
struct mutex s_orphan_lock;
@@ -1120,10 +1144,7 @@ struct ext4_sb_info {
/* for buddy allocator */
struct ext4_group_info ***s_group_info;
struct inode *s_buddy_cache;
- long s_blocks_reserved;
- spinlock_t s_reserve_lock;
spinlock_t s_md_lock;
- tid_t s_last_transaction;
unsigned short *s_mb_offsets;
unsigned int *s_mb_maxs;
@@ -1141,7 +1162,6 @@ struct ext4_sb_info {
unsigned long s_mb_last_start;
/* stats for buddy allocator */
- spinlock_t s_mb_pa_lock;
atomic_t s_bal_reqs; /* number of reqs with len > 1 */
atomic_t s_bal_success; /* we found long enough chunks */
atomic_t s_bal_allocated; /* in blocks */
@@ -1172,6 +1192,11 @@ struct ext4_sb_info {
/* timer for periodic error stats printing */
struct timer_list s_err_report;
+
+ /* Lazy inode table initialization info */
+ struct ext4_li_request *s_li_request;
+ /* Wait multiplier for lazy initialization thread */
+ unsigned int s_li_wait_mult;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1533,7 +1558,42 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
-extern struct proc_dir_entry *ext4_proc_root;
+/*
+ * Timeout and state flag for lazy initialization inode thread.
+ */
+#define EXT4_DEF_LI_WAIT_MULT 10
+#define EXT4_DEF_LI_MAX_START_DELAY 5
+#define EXT4_LAZYINIT_QUIT 0x0001
+#define EXT4_LAZYINIT_RUNNING 0x0002
+
+/*
+ * Lazy inode table initialization info
+ */
+struct ext4_lazy_init {
+ unsigned long li_state;
+
+ wait_queue_head_t li_wait_daemon;
+ wait_queue_head_t li_wait_task;
+ struct timer_list li_timer;
+ struct task_struct *li_task;
+
+ struct list_head li_request_list;
+ struct mutex li_list_mtx;
+};
+
+struct ext4_li_request {
+ struct super_block *lr_super;
+ struct ext4_sb_info *lr_sbi;
+ ext4_group_t lr_next_group;
+ struct list_head lr_request;
+ unsigned long lr_next_sched;
+ unsigned long lr_timeout;
+};
+
+struct ext4_features {
+ struct kobject f_kobj;
+ struct completion f_kobj_unregister;
+};
/*
* Function prototypes
@@ -1561,7 +1621,6 @@ extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
ext4_fsblk_t goal, unsigned long *count, int *errp);
extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
-extern int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count);
extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
@@ -1605,11 +1664,9 @@ extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
extern unsigned long ext4_count_free_inodes(struct super_block *);
extern unsigned long ext4_count_dirs(struct super_block *);
extern void ext4_check_inodes_bitmap(struct super_block *);
-extern unsigned ext4_init_inode_bitmap(struct super_block *sb,
- struct buffer_head *bh,
- ext4_group_t group,
- struct ext4_group_desc *desc);
-extern void mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
+extern void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
+extern int ext4_init_inode_table(struct super_block *sb,
+ ext4_group_t group, int barrier);
/* mballoc.c */
extern long ext4_mb_stats;
@@ -1620,16 +1677,15 @@ extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
struct ext4_allocation_request *, int *);
extern int ext4_mb_reserve_blocks(struct super_block *, int);
extern void ext4_discard_preallocations(struct inode *);
-extern int __init init_ext4_mballoc(void);
-extern void exit_ext4_mballoc(void);
+extern int __init ext4_init_mballoc(void);
+extern void ext4_exit_mballoc(void);
extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t block,
unsigned long count, int flags);
extern int ext4_mb_add_groupinfo(struct super_block *sb,
ext4_group_t i, struct ext4_group_desc *desc);
-extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
-extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
- ext4_group_t, int);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+
/* inode.c */
struct buffer_head *ext4_getblk(handle_t *, struct inode *,
ext4_lblk_t, int, int *);
@@ -1657,13 +1713,11 @@ extern void ext4_get_inode_flags(struct ext4_inode_info *);
extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
extern int ext4_writepage_trans_blocks(struct inode *);
-extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
-extern int flush_completed_IO(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
/* ioctl.c */
@@ -1960,6 +2014,7 @@ extern const struct file_operations ext4_dir_operations;
/* file.c */
extern const struct inode_operations ext4_file_inode_operations;
extern const struct file_operations ext4_file_operations;
+extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
/* namei.c */
extern const struct inode_operations ext4_dir_inode_operations;
@@ -1973,8 +2028,8 @@ extern const struct inode_operations ext4_fast_symlink_inode_operations;
/* block_validity */
extern void ext4_release_system_zone(struct super_block *sb);
extern int ext4_setup_system_zone(struct super_block *sb);
-extern int __init init_ext4_system_zone(void);
-extern void exit_ext4_system_zone(void);
+extern int __init ext4_init_system_zone(void);
+extern void ext4_exit_system_zone(void);
extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
ext4_fsblk_t start_blk,
unsigned int count);
@@ -2002,6 +2057,17 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
__u64 start_orig, __u64 start_donor,
__u64 len, __u64 *moved_len);
+/* page-io.c */
+extern int __init ext4_init_pageio(void);
+extern void ext4_exit_pageio(void);
+extern void ext4_free_io_end(ext4_io_end_t *io);
+extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
+extern int ext4_end_io_nolock(ext4_io_end_t *io);
+extern void ext4_io_submit(struct ext4_io_submit *io);
+extern int ext4_bio_write_page(struct ext4_io_submit *io,
+ struct page *page,
+ int len,
+ struct writeback_control *wbc);
/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
enum ext4_state_bits {
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index bdb6ce7e2eb4..28ce70fd9cd0 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -225,11 +225,60 @@ static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
}
+/*
+ * ext4_ext_pblock:
+ * combine low and high parts of physical block number into ext4_fsblk_t
+ */
+static inline ext4_fsblk_t ext4_ext_pblock(struct ext4_extent *ex)
+{
+ ext4_fsblk_t block;
+
+ block = le32_to_cpu(ex->ee_start_lo);
+ block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
+ return block;
+}
+
+/*
+ * ext4_idx_pblock:
+ * combine low and high parts of a leaf physical block number into ext4_fsblk_t
+ */
+static inline ext4_fsblk_t ext4_idx_pblock(struct ext4_extent_idx *ix)
+{
+ ext4_fsblk_t block;
+
+ block = le32_to_cpu(ix->ei_leaf_lo);
+ block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
+ return block;
+}
+
+/*
+ * ext4_ext_store_pblock:
+ * stores a large physical block number into an extent struct,
+ * breaking it into parts
+ */
+static inline void ext4_ext_store_pblock(struct ext4_extent *ex,
+ ext4_fsblk_t pb)
+{
+ ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+ ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) &
+ 0xffff);
+}
+
+/*
+ * ext4_idx_store_pblock:
+ * stores a large physical block number into an index struct,
+ * breaking it into parts
+ */
+static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
+ ext4_fsblk_t pb)
+{
+ ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+ ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) &
+ 0xffff);
+}
+
extern int ext4_ext_calc_metadata_amount(struct inode *inode,
sector_t lblocks);
-extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
-extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
-extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
extern int ext4_extent_tree_init(handle_t *, struct inode *);
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
int num,
@@ -237,19 +286,9 @@ extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
extern int ext4_can_extents_be_merged(struct inode *inode,
struct ext4_extent *ex1,
struct ext4_extent *ex2);
-extern int ext4_ext_try_to_merge(struct inode *inode,
- struct ext4_ext_path *path,
- struct ext4_extent *);
-extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *, int);
-extern int ext4_ext_walk_space(struct inode *, ext4_lblk_t, ext4_lblk_t,
- ext_prepare_callback, void *);
extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
struct ext4_ext_path *);
-extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *,
- ext4_lblk_t *, ext4_fsblk_t *);
-extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *,
- ext4_lblk_t *, ext4_fsblk_t *);
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
#endif /* _EXT4_EXTENTS */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 06328d3e5717..0554c48cb1fd 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -44,55 +44,6 @@
#include "ext4_jbd2.h"
#include "ext4_extents.h"
-
-/*
- * ext_pblock:
- * combine low and high parts of physical block number into ext4_fsblk_t
- */
-ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
-{
- ext4_fsblk_t block;
-
- block = le32_to_cpu(ex->ee_start_lo);
- block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
- return block;
-}
-
-/*
- * idx_pblock:
- * combine low and high parts of a leaf physical block number into ext4_fsblk_t
- */
-ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
-{
- ext4_fsblk_t block;
-
- block = le32_to_cpu(ix->ei_leaf_lo);
- block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
- return block;
-}
-
-/*
- * ext4_ext_store_pblock:
- * stores a large physical block number into an extent struct,
- * breaking it into parts
- */
-void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
-{
- ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
- ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
-}
-
-/*
- * ext4_idx_store_pblock:
- * stores a large physical block number into an index struct,
- * breaking it into parts
- */
-static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
-{
- ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
- ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
-}
-
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
@@ -169,7 +120,8 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
/* try to predict block placement */
ex = path[depth].p_ext;
if (ex)
- return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
+ return (ext4_ext_pblock(ex) +
+ (block - le32_to_cpu(ex->ee_block)));
/* it looks like index is empty;
* try to find starting block from index itself */
@@ -354,7 +306,7 @@ ext4_ext_max_entries(struct inode *inode, int depth)
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
- ext4_fsblk_t block = ext_pblock(ext);
+ ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
@@ -363,7 +315,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
static int ext4_valid_extent_idx(struct inode *inode,
struct ext4_extent_idx *ext_idx)
{
- ext4_fsblk_t block = idx_pblock(ext_idx);
+ ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
}
@@ -463,13 +415,13 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
for (k = 0; k <= l; k++, path++) {
if (path->p_idx) {
ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
- idx_pblock(path->p_idx));
+ ext4_idx_pblock(path->p_idx));
} else if (path->p_ext) {
ext_debug(" %d:[%d]%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_is_uninitialized(path->p_ext),
ext4_ext_get_actual_len(path->p_ext),
- ext_pblock(path->p_ext));
+ ext4_ext_pblock(path->p_ext));
} else
ext_debug(" []");
}
@@ -494,7 +446,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
ext4_ext_is_uninitialized(ex),
- ext4_ext_get_actual_len(ex), ext_pblock(ex));
+ ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
}
ext_debug("\n");
}
@@ -545,7 +497,7 @@ ext4_ext_binsearch_idx(struct inode *inode,
path->p_idx = l - 1;
ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
- idx_pblock(path->p_idx));
+ ext4_idx_pblock(path->p_idx));
#ifdef CHECK_BINSEARCH
{
@@ -614,7 +566,7 @@ ext4_ext_binsearch(struct inode *inode,
path->p_ext = l - 1;
ext_debug(" -> %d:%llu:[%d]%d ",
le32_to_cpu(path->p_ext->ee_block),
- ext_pblock(path->p_ext),
+ ext4_ext_pblock(path->p_ext),
ext4_ext_is_uninitialized(path->p_ext),
ext4_ext_get_actual_len(path->p_ext));
@@ -682,7 +634,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
ext4_ext_binsearch_idx(inode, path + ppos, block);
- path[ppos].p_block = idx_pblock(path[ppos].p_idx);
+ path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
@@ -721,7 +673,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
ext4_ext_binsearch(inode, path + ppos, block);
/* if not an empty leaf */
if (path[ppos].p_ext)
- path[ppos].p_block = ext_pblock(path[ppos].p_ext);
+ path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
ext4_ext_show_path(inode, path);
@@ -739,9 +691,9 @@ err:
* insert new index [@logical;@ptr] into the block at @curp;
* check where to insert: before @curp or after @curp
*/
-int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
- struct ext4_ext_path *curp,
- int logical, ext4_fsblk_t ptr)
+static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *curp,
+ int logical, ext4_fsblk_t ptr)
{
struct ext4_extent_idx *ix;
int len, err;
@@ -917,7 +869,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
EXT_MAX_EXTENT(path[depth].p_hdr)) {
ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
le32_to_cpu(path[depth].p_ext->ee_block),
- ext_pblock(path[depth].p_ext),
+ ext4_ext_pblock(path[depth].p_ext),
ext4_ext_is_uninitialized(path[depth].p_ext),
ext4_ext_get_actual_len(path[depth].p_ext),
newblock);
@@ -1007,7 +959,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
ext_debug("%d: move %d:%llu in new index %llu\n", i,
le32_to_cpu(path[i].p_idx->ei_block),
- idx_pblock(path[i].p_idx),
+ ext4_idx_pblock(path[i].p_idx),
newblock);
/*memmove(++fidx, path[i].p_idx++,
sizeof(struct ext4_extent_idx));
@@ -1146,7 +1098,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
- idx_pblock(EXT_FIRST_INDEX(neh)));
+ ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
neh->eh_depth = cpu_to_le16(path->p_depth + 1);
err = ext4_ext_dirty(handle, inode, curp);
@@ -1232,9 +1184,9 @@ out:
* returns 0 at @phys
* return value contains 0 (success) or error code
*/
-int
-ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
- ext4_lblk_t *logical, ext4_fsblk_t *phys)
+static int ext4_ext_search_left(struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
struct ext4_extent_idx *ix;
struct ext4_extent *ex;
@@ -1286,7 +1238,7 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
}
*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
- *phys = ext_pblock(ex) + ee_len - 1;
+ *phys = ext4_ext_pblock(ex) + ee_len - 1;
return 0;
}
@@ -1297,9 +1249,9 @@ ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
* returns 0 at @phys
* return value contains 0 (success) or error code
*/
-int
-ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
- ext4_lblk_t *logical, ext4_fsblk_t *phys)
+static int ext4_ext_search_right(struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
struct buffer_head *bh = NULL;
struct ext4_extent_header *eh;
@@ -1342,7 +1294,7 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
}
}
*logical = le32_to_cpu(ex->ee_block);
- *phys = ext_pblock(ex);
+ *phys = ext4_ext_pblock(ex);
return 0;
}
@@ -1357,7 +1309,7 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
/* next allocated block in this leaf */
ex++;
*logical = le32_to_cpu(ex->ee_block);
- *phys = ext_pblock(ex);
+ *phys = ext4_ext_pblock(ex);
return 0;
}
@@ -1376,7 +1328,7 @@ got_index:
* follow it and find the closest allocated
* block to the right */
ix++;
- block = idx_pblock(ix);
+ block = ext4_idx_pblock(ix);
while (++depth < path->p_depth) {
bh = sb_bread(inode->i_sb, block);
if (bh == NULL)
@@ -1388,7 +1340,7 @@ got_index:
return -EIO;
}
ix = EXT_FIRST_INDEX(eh);
- block = idx_pblock(ix);
+ block = ext4_idx_pblock(ix);
put_bh(bh);
}
@@ -1402,7 +1354,7 @@ got_index:
}
ex = EXT_FIRST_EXTENT(eh);
*logical = le32_to_cpu(ex->ee_block);
- *phys = ext_pblock(ex);
+ *phys = ext4_ext_pblock(ex);
put_bh(bh);
return 0;
}
@@ -1573,7 +1525,7 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
return 0;
#endif
- if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
+ if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
return 1;
return 0;
}
@@ -1585,9 +1537,9 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
* Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
* 1 if they got merged.
*/
-int ext4_ext_try_to_merge(struct inode *inode,
- struct ext4_ext_path *path,
- struct ext4_extent *ex)
+static int ext4_ext_try_to_merge(struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *ex)
{
struct ext4_extent_header *eh;
unsigned int depth, len;
@@ -1632,9 +1584,9 @@ int ext4_ext_try_to_merge(struct inode *inode,
* such that there will be no overlap, and then returns 1.
* If there is no overlap found, it returns 0.
*/
-unsigned int ext4_ext_check_overlap(struct inode *inode,
- struct ext4_extent *newext,
- struct ext4_ext_path *path)
+static unsigned int ext4_ext_check_overlap(struct inode *inode,
+ struct ext4_extent *newext,
+ struct ext4_ext_path *path)
{
ext4_lblk_t b1, b2;
unsigned int depth, len1;
@@ -1706,11 +1658,12 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
&& ext4_can_extents_be_merged(inode, ex, newext)) {
ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
- ext4_ext_is_uninitialized(newext),
- ext4_ext_get_actual_len(newext),
- le32_to_cpu(ex->ee_block),
- ext4_ext_is_uninitialized(ex),
- ext4_ext_get_actual_len(ex), ext_pblock(ex));
+ ext4_ext_is_uninitialized(newext),
+ ext4_ext_get_actual_len(newext),
+ le32_to_cpu(ex->ee_block),
+ ext4_ext_is_uninitialized(ex),
+ ext4_ext_get_actual_len(ex),
+ ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
return err;
@@ -1780,7 +1733,7 @@ has_space:
/* there is no extent in this leaf, create first one */
ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
le32_to_cpu(newext->ee_block),
- ext_pblock(newext),
+ ext4_ext_pblock(newext),
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext));
path[depth].p_ext = EXT_FIRST_EXTENT(eh);
@@ -1794,7 +1747,7 @@ has_space:
ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
"move %d from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
- ext_pblock(newext),
+ ext4_ext_pblock(newext),
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext),
nearex, len, nearex + 1, nearex + 2);
@@ -1808,7 +1761,7 @@ has_space:
ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
"move %d from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
- ext_pblock(newext),
+ ext4_ext_pblock(newext),
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext),
nearex, len, nearex + 1, nearex + 2);
@@ -1819,7 +1772,7 @@ has_space:
le16_add_cpu(&eh->eh_entries, 1);
nearex = path[depth].p_ext;
nearex->ee_block = newext->ee_block;
- ext4_ext_store_pblock(nearex, ext_pblock(newext));
+ ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
nearex->ee_len = newext->ee_len;
merge:
@@ -1845,9 +1798,9 @@ cleanup:
return err;
}
-int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
- ext4_lblk_t num, ext_prepare_callback func,
- void *cbdata)
+static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
+ ext4_lblk_t num, ext_prepare_callback func,
+ void *cbdata)
{
struct ext4_ext_path *path = NULL;
struct ext4_ext_cache cbex;
@@ -1923,7 +1876,7 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
} else {
cbex.ec_block = le32_to_cpu(ex->ee_block);
cbex.ec_len = ext4_ext_get_actual_len(ex);
- cbex.ec_start = ext_pblock(ex);
+ cbex.ec_start = ext4_ext_pblock(ex);
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
}
@@ -2073,7 +2026,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
/* free index block */
path--;
- leaf = idx_pblock(path->p_idx);
+ leaf = ext4_idx_pblock(path->p_idx);
if (unlikely(path->p_hdr->eh_entries == 0)) {
EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
return -EIO;
@@ -2181,7 +2134,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
ext4_fsblk_t start;
num = le32_to_cpu(ex->ee_block) + ee_len - from;
- start = ext_pblock(ex) + ee_len - num;
+ start = ext4_ext_pblock(ex) + ee_len - num;
ext_debug("free last %u blocks starting %llu\n", num, start);
ext4_free_blocks(handle, inode, 0, start, num, flags);
} else if (from == le32_to_cpu(ex->ee_block)
@@ -2310,7 +2263,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
goto out;
ext_debug("new extent: %u:%u:%llu\n", block, num,
- ext_pblock(ex));
+ ext4_ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2421,9 +2374,9 @@ again:
struct buffer_head *bh;
/* go to the next level */
ext_debug("move to level %d (block %llu)\n",
- i + 1, idx_pblock(path[i].p_idx));
+ i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
- bh = sb_bread(sb, idx_pblock(path[i].p_idx));
+ bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
if (!bh) {
/* should we reset i_size? */
err = -EIO;
@@ -2535,77 +2488,21 @@ void ext4_ext_release(struct super_block *sb)
#endif
}
-static void bi_complete(struct bio *bio, int error)
-{
- complete((struct completion *)bio->bi_private);
-}
-
/* FIXME!! we need to try to merge to left or right after zero-out */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
+ ext4_fsblk_t ee_pblock;
+ unsigned int ee_len;
int ret;
- struct bio *bio;
- int blkbits, blocksize;
- sector_t ee_pblock;
- struct completion event;
- unsigned int ee_len, len, done, offset;
-
- blkbits = inode->i_blkbits;
- blocksize = inode->i_sb->s_blocksize;
ee_len = ext4_ext_get_actual_len(ex);
- ee_pblock = ext_pblock(ex);
-
- /* convert ee_pblock to 512 byte sectors */
- ee_pblock = ee_pblock << (blkbits - 9);
-
- while (ee_len > 0) {
-
- if (ee_len > BIO_MAX_PAGES)
- len = BIO_MAX_PAGES;
- else
- len = ee_len;
-
- bio = bio_alloc(GFP_NOIO, len);
- if (!bio)
- return -ENOMEM;
-
- bio->bi_sector = ee_pblock;
- bio->bi_bdev = inode->i_sb->s_bdev;
-
- done = 0;
- offset = 0;
- while (done < len) {
- ret = bio_add_page(bio, ZERO_PAGE(0),
- blocksize, offset);
- if (ret != blocksize) {
- /*
- * We can't add any more pages because of
- * hardware limitations. Start a new bio.
- */
- break;
- }
- done++;
- offset += blocksize;
- if (offset >= PAGE_CACHE_SIZE)
- offset = 0;
- }
+ ee_pblock = ext4_ext_pblock(ex);
- init_completion(&event);
- bio->bi_private = &event;
- bio->bi_end_io = bi_complete;
- submit_bio(WRITE, bio);
- wait_for_completion(&event);
+ ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
+ if (ret > 0)
+ ret = 0;
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
- bio_put(bio);
- return -EIO;
- }
- bio_put(bio);
- ee_len -= done;
- ee_pblock += done << (blkbits - 9);
- }
- return 0;
+ return ret;
}
#define EXT4_EXT_ZERO_LEN 7
@@ -2651,12 +2548,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (map->m_lblk - ee_block);
- newblock = map->m_lblk - ee_block + ext_pblock(ex);
+ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
ex2 = ex;
orig_ex.ee_block = ex->ee_block;
orig_ex.ee_len = cpu_to_le16(ee_len);
- ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
+ ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
/*
* It is safe to convert extent to initialized via explicit
@@ -2675,7 +2572,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
/* update the extent length and mark as initialized */
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
/* zeroed the full extent */
return allocated;
@@ -2710,7 +2607,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
ex->ee_block = orig_ex.ee_block;
ex->ee_len = cpu_to_le16(ee_len - allocated);
ext4_ext_mark_uninitialized(ex);
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
ex3 = &newex;
@@ -2725,7 +2622,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
goto fix_extent_len;
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex,
+ ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
/* blocks available from map->m_lblk */
return allocated;
@@ -2782,7 +2680,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
/* update the extent length and mark as initialized */
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
/* zeroed the full extent */
/* blocks available from map->m_lblk */
@@ -2833,7 +2731,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
/* update the extent length and mark as initialized */
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
/* zero out the first half */
/* blocks available from map->m_lblk */
@@ -2902,7 +2800,7 @@ insert:
/* update the extent length and mark as initialized */
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
/* zero out the first half */
return allocated;
@@ -2915,7 +2813,7 @@ out:
fix_extent_len:
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_mark_uninitialized(ex);
ext4_ext_dirty(handle, inode, path + depth);
return err;
@@ -2973,12 +2871,12 @@ static int ext4_split_unwritten_extents(handle_t *handle,
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (map->m_lblk - ee_block);
- newblock = map->m_lblk - ee_block + ext_pblock(ex);
+ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
ex2 = ex;
orig_ex.ee_block = ex->ee_block;
orig_ex.ee_len = cpu_to_le16(ee_len);
- ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
+ ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
/*
* It is safe to convert extent to initialized via explicit
@@ -3027,7 +2925,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
/* update the extent length and mark as initialized */
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
/* zeroed the full extent */
/* blocks available from map->m_lblk */
@@ -3099,7 +2997,7 @@ insert:
/* update the extent length and mark as initialized */
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_dirty(handle, inode, path + depth);
/* zero out the first half */
return allocated;
@@ -3112,7 +3010,7 @@ out:
fix_extent_len:
ex->ee_block = orig_ex.ee_block;
ex->ee_len = orig_ex.ee_len;
- ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
ext4_ext_mark_uninitialized(ex);
ext4_ext_dirty(handle, inode, path + depth);
return err;
@@ -3180,6 +3078,57 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
unmap_underlying_metadata(bdev, block + i);
}
+/*
+ * Handle EOFBLOCKS_FL flag, clearing it if necessary
+ */
+static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path,
+ unsigned int len)
+{
+ int i, depth;
+ struct ext4_extent_header *eh;
+ struct ext4_extent *ex, *last_ex;
+
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
+ return 0;
+
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+ ex = path[depth].p_ext;
+
+ if (unlikely(!eh->eh_entries)) {
+ EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
+ "EOFBLOCKS_FL set");
+ return -EIO;
+ }
+ last_ex = EXT_LAST_EXTENT(eh);
+ /*
+ * We should clear the EOFBLOCKS_FL flag if we are writing the
+ * last block in the last extent in the file. We test this by
+ * first checking to see if the caller to
+ * ext4_ext_get_blocks() was interested in the last block (or
+ * a block beyond the last block) in the current extent. If
+ * this turns out to be false, we can bail out from this
+ * function immediately.
+ */
+ if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
+ ext4_ext_get_actual_len(last_ex))
+ return 0;
+ /*
+ * If the caller does appear to be planning to write at or
+ * beyond the end of the current extent, we then test to see
+ * if the current extent is the last extent in the file, by
+ * checking to make sure it was reached via the rightmost node
+ * at each level of the tree.
+ */
+ for (i = depth-1; i >= 0; i--)
+ if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
+ return 0;
+ ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
+ return ext4_mark_inode_dirty(handle, inode);
+}
+
static int
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
@@ -3206,7 +3155,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
* completed
*/
if (io)
- io->flag = EXT4_IO_UNWRITTEN;
+ io->flag = EXT4_IO_END_UNWRITTEN;
else
ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
if (ext4_should_dioread_nolock(inode))
@@ -3217,8 +3166,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
ret = ext4_convert_unwritten_extents_endio(handle, inode,
path);
- if (ret >= 0)
+ if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
+ err = check_eofblocks_fl(handle, inode, map, path,
+ map->m_len);
+ } else
+ err = ret;
goto out2;
}
/* buffered IO case */
@@ -3244,8 +3197,13 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
/* buffered write, writepage time, convert*/
ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
- if (ret >= 0)
+ if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
+ err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
+ if (err < 0)
+ goto out2;
+ }
+
out:
if (ret <= 0) {
err = ret;
@@ -3292,6 +3250,7 @@ out2:
}
return err ? err : allocated;
}
+
/*
* Block allocation/map/preallocation routine for extents based files
*
@@ -3315,9 +3274,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
{
struct ext4_ext_path *path = NULL;
struct ext4_extent_header *eh;
- struct ext4_extent newex, *ex, *last_ex;
+ struct ext4_extent newex, *ex;
ext4_fsblk_t newblock;
- int i, err = 0, depth, ret, cache_type;
+ int err = 0, depth, ret, cache_type;
unsigned int allocated = 0;
struct ext4_allocation_request ar;
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3341,7 +3300,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
/* block is already allocated */
newblock = map->m_lblk
- le32_to_cpu(newex.ee_block)
- + ext_pblock(&newex);
+ + ext4_ext_pblock(&newex);
/* number of remaining blocks in the extent */
allocated = ext4_ext_get_actual_len(&newex) -
(map->m_lblk - le32_to_cpu(newex.ee_block));
@@ -3379,7 +3338,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ex = path[depth].p_ext;
if (ex) {
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
- ext4_fsblk_t ee_start = ext_pblock(ex);
+ ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len;
/*
@@ -3488,7 +3447,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
if (io)
- io->flag = EXT4_IO_UNWRITTEN;
+ io->flag = EXT4_IO_END_UNWRITTEN;
else
ext4_set_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN);
@@ -3497,44 +3456,23 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
map->m_flags |= EXT4_MAP_UNINIT;
}
- if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
- if (unlikely(!eh->eh_entries)) {
- EXT4_ERROR_INODE(inode,
- "eh->eh_entries == 0 and "
- "EOFBLOCKS_FL set");
- err = -EIO;
- goto out2;
- }
- last_ex = EXT_LAST_EXTENT(eh);
- /*
- * If the current leaf block was reached by looking at
- * the last index block all the way down the tree, and
- * we are extending the inode beyond the last extent
- * in the current leaf block, then clear the
- * EOFBLOCKS_FL flag.
- */
- for (i = depth-1; i >= 0; i--) {
- if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
- break;
- }
- if ((i < 0) &&
- (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) +
- ext4_ext_get_actual_len(last_ex)))
- ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
- }
+ err = check_eofblocks_fl(handle, inode, map, path, ar.len);
+ if (err)
+ goto out2;
+
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err) {
/* free data blocks we just allocated */
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
- ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
+ ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex),
ext4_ext_get_actual_len(&newex), 0);
goto out2;
}
/* previous routine could use block we allocated */
- newblock = ext_pblock(&newex);
+ newblock = ext4_ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
if (allocated > map->m_len)
allocated = map->m_len;
@@ -3729,7 +3667,7 @@ retry:
printk(KERN_ERR "%s: ext4_ext_map_blocks "
"returned error inode#%lu, block=%u, "
"max_blocks=%u", __func__,
- inode->i_ino, block, max_blocks);
+ inode->i_ino, map.m_lblk, max_blocks);
#endif
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index ee92b66d4558..5a5c55ddceef 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -130,8 +130,50 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
return dquot_file_open(inode, filp);
}
+/*
+ * ext4_llseek() copied from generic_file_llseek() to handle both
+ * block-mapped and extent-mapped maxbytes values. This should
+ * otherwise be identical with generic_file_llseek().
+ */
+loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ loff_t maxbytes;
+
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+ maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+ else
+ maxbytes = inode->i_sb->s_maxbytes;
+ mutex_lock(&inode->i_mutex);
+ switch (origin) {
+ case SEEK_END:
+ offset += inode->i_size;
+ break;
+ case SEEK_CUR:
+ if (offset == 0) {
+ mutex_unlock(&inode->i_mutex);
+ return file->f_pos;
+ }
+ offset += file->f_pos;
+ break;
+ }
+
+ if (offset < 0 || offset > maxbytes) {
+ mutex_unlock(&inode->i_mutex);
+ return -EINVAL;
+ }
+
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+ mutex_unlock(&inode->i_mutex);
+
+ return offset;
+}
+
const struct file_operations ext4_file_operations = {
- .llseek = generic_file_llseek,
+ .llseek = ext4_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = generic_file_aio_read,
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 3f3ff5ee8f9d..c1a7bc923cf6 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -34,6 +34,89 @@
#include <trace/events/ext4.h>
+static void dump_completed_IO(struct inode * inode)
+{
+#ifdef EXT4_DEBUG
+ struct list_head *cur, *before, *after;
+ ext4_io_end_t *io, *io0, *io1;
+ unsigned long flags;
+
+ if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
+ ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
+ return;
+ }
+
+ ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
+ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
+ list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
+ cur = &io->list;
+ before = cur->prev;
+ io0 = container_of(before, ext4_io_end_t, list);
+ after = cur->next;
+ io1 = container_of(after, ext4_io_end_t, list);
+
+ ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+ io, inode->i_ino, io0, io1);
+ }
+ spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
+#endif
+}
+
+/*
+ * This function is called from ext4_sync_file().
+ *
+ * When IO is completed, the work to convert unwritten extents to
+ * written is queued on workqueue but may not get immediately
+ * scheduled. When fsync is called, we need to ensure the
+ * conversion is complete before fsync returns.
+ * The inode keeps track of a list of pending/completed IO that
+ * might needs to do the conversion. This function walks through
+ * the list and convert the related unwritten extents for completed IO
+ * to written.
+ * The function return the number of pending IOs on success.
+ */
+static int flush_completed_IO(struct inode *inode)
+{
+ ext4_io_end_t *io;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned long flags;
+ int ret = 0;
+ int ret2 = 0;
+
+ if (list_empty(&ei->i_completed_io_list))
+ return ret;
+
+ dump_completed_IO(inode);
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ while (!list_empty(&ei->i_completed_io_list)){
+ io = list_entry(ei->i_completed_io_list.next,
+ ext4_io_end_t, list);
+ /*
+ * Calling ext4_end_io_nolock() to convert completed
+ * IO to written.
+ *
+ * When ext4_sync_file() is called, run_queue() may already
+ * about to flush the work corresponding to this io structure.
+ * It will be upset if it founds the io structure related
+ * to the work-to-be schedule is freed.
+ *
+ * Thus we need to keep the io structure still valid here after
+ * convertion finished. The io structure has a flag to
+ * avoid double converting from both fsync and background work
+ * queue work.
+ */
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+ ret = ext4_end_io_nolock(io);
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ if (ret < 0)
+ ret2 = ret;
+ else
+ list_del_init(&io->list);
+ }
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+ return (ret2 < 0) ? ret2 : 0;
+}
+
/*
* If we're not journaling and this is a just-created file, we have to
* sync our parent directory (if it was freshly created) since
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 45853e0d1f21..1ce240a23ebb 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -50,7 +50,7 @@
* need to use it within a single byte (to ensure we get endianness right).
* We can use memset for the rest of the bitmap as there are no other users.
*/
-void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
+void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
{
int i;
@@ -65,9 +65,10 @@ void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
}
/* Initializes an uninitialized inode bitmap */
-unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
- ext4_group_t block_group,
- struct ext4_group_desc *gdp)
+static unsigned ext4_init_inode_bitmap(struct super_block *sb,
+ struct buffer_head *bh,
+ ext4_group_t block_group,
+ struct ext4_group_desc *gdp)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -85,7 +86,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
}
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
- mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
bh->b_data);
return EXT4_INODES_PER_GROUP(sb);
@@ -107,6 +108,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
desc = ext4_get_group_desc(sb, block_group, NULL);
if (!desc)
return NULL;
+
bitmap_blk = ext4_inode_bitmap(sb, desc);
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
@@ -123,6 +125,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
unlock_buffer(bh);
return bh;
}
+
ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
ext4_init_inode_bitmap(sb, bh, block_group, desc);
@@ -133,6 +136,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
return bh;
}
ext4_unlock_group(sb, block_group);
+
if (buffer_uptodate(bh)) {
/*
* if not uninit if bh is uptodate,
@@ -411,8 +415,8 @@ struct orlov_stats {
* for a particular block group or flex_bg. If flex_size is 1, then g
* is a block group number; otherwise it is flex_bg number.
*/
-void get_orlov_stats(struct super_block *sb, ext4_group_t g,
- int flex_size, struct orlov_stats *stats)
+static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+ int flex_size, struct orlov_stats *stats)
{
struct ext4_group_desc *desc;
struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
@@ -712,8 +716,17 @@ static int ext4_claim_inode(struct super_block *sb,
{
int free = 0, retval = 0, count;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
+ /*
+ * We have to be sure that new inode allocation does not race with
+ * inode table initialization, because otherwise we may end up
+ * allocating and writing new inode right before sb_issue_zeroout
+ * takes place and overwriting our new inode with zeroes. So we
+ * take alloc_sem to prevent it.
+ */
+ down_read(&grp->alloc_sem);
ext4_lock_group(sb, group);
if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
/* not a free inode */
@@ -724,6 +737,7 @@ static int ext4_claim_inode(struct super_block *sb,
if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
ino > EXT4_INODES_PER_GROUP(sb)) {
ext4_unlock_group(sb, group);
+ up_read(&grp->alloc_sem);
ext4_error(sb, "reserved inode or inode > inodes count - "
"block_group = %u, inode=%lu", group,
ino + group * EXT4_INODES_PER_GROUP(sb));
@@ -772,6 +786,7 @@ static int ext4_claim_inode(struct super_block *sb,
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
err_ret:
ext4_unlock_group(sb, group);
+ up_read(&grp->alloc_sem);
return retval;
}
@@ -1205,3 +1220,109 @@ unsigned long ext4_count_dirs(struct super_block * sb)
}
return count;
}
+
+/*
+ * Zeroes not yet zeroed inode table - just write zeroes through the whole
+ * inode table. Must be called without any spinlock held. The only place
+ * where it is called from on active part of filesystem is ext4lazyinit
+ * thread, so we do not need any special locks, however we have to prevent
+ * inode allocation from the current group, so we take alloc_sem lock, to
+ * block ext4_claim_inode until we are finished.
+ */
+extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
+ int barrier)
+{
+ struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_group_desc *gdp = NULL;
+ struct buffer_head *group_desc_bh;
+ handle_t *handle;
+ ext4_fsblk_t blk;
+ int num, ret = 0, used_blks = 0;
+
+ /* This should not happen, but just to be sure check this */
+ if (sb->s_flags & MS_RDONLY) {
+ ret = 1;
+ goto out;
+ }
+
+ gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
+ if (!gdp)
+ goto out;
+
+ /*
+ * We do not need to lock this, because we are the only one
+ * handling this flag.
+ */
+ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
+ goto out;
+
+ handle = ext4_journal_start_sb(sb, 1);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+
+ down_write(&grp->alloc_sem);
+ /*
+ * If inode bitmap was already initialized there may be some
+ * used inodes so we need to skip blocks with used inodes in
+ * inode table.
+ */
+ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
+ used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
+ ext4_itable_unused_count(sb, gdp)),
+ sbi->s_inodes_per_block);
+
+ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+ ext4_error(sb, "Something is wrong with group %u\n"
+ "Used itable blocks: %d"
+ "itable unused count: %u\n",
+ group, used_blks,
+ ext4_itable_unused_count(sb, gdp));
+ ret = 1;
+ goto out;
+ }
+
+ blk = ext4_inode_table(sb, gdp) + used_blks;
+ num = sbi->s_itb_per_group - used_blks;
+
+ BUFFER_TRACE(group_desc_bh, "get_write_access");
+ ret = ext4_journal_get_write_access(handle,
+ group_desc_bh);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Skip zeroout if the inode table is full. But we set the ZEROED
+ * flag anyway, because obviously, when it is full it does not need
+ * further zeroing.
+ */
+ if (unlikely(num == 0))
+ goto skip_zeroout;
+
+ ext4_debug("going to zero out inode table in group %d\n",
+ group);
+ ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
+ if (ret < 0)
+ goto err_out;
+ if (barrier)
+ blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
+
+skip_zeroout:
+ ext4_lock_group(sb, group);
+ gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+ ext4_unlock_group(sb, group);
+
+ BUFFER_TRACE(group_desc_bh,
+ "call ext4_handle_dirty_metadata");
+ ret = ext4_handle_dirty_metadata(handle, NULL,
+ group_desc_bh);
+
+err_out:
+ up_write(&grp->alloc_sem);
+ ext4_journal_stop(handle);
+out:
+ return ret;
+}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4b8debeb3965..2d6c6c8c036d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -60,6 +60,12 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
}
static void ext4_invalidatepage(struct page *page, unsigned long offset);
+static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
+static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
+static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
+static int __ext4_journalled_writepage(struct page *page, unsigned int len);
+static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
/*
* Test whether an inode is a fast symlink.
@@ -755,6 +761,11 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
* parent to disk.
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ if (unlikely(!bh)) {
+ err = -EIO;
+ goto failed;
+ }
+
branch[n].bh = bh;
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
@@ -1207,8 +1218,10 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
break;
idx++;
num++;
- if (num >= max_pages)
+ if (num >= max_pages) {
+ done = 1;
break;
+ }
}
pagevec_release(&pvec);
}
@@ -1538,10 +1551,10 @@ static int do_journal_get_write_access(handle_t *handle,
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
/*
- * __block_prepare_write() could have dirtied some buffers. Clean
+ * __block_write_begin() could have dirtied some buffers. Clean
* the dirty bit as jbd2_journal_get_write_access() could complain
* otherwise about fs integrity issues. Setting of the dirty bit
- * by __block_prepare_write() isn't a real problem here as we clear
+ * by __block_write_begin() isn't a real problem here as we clear
* the bit before releasing a page lock and thus writeback cannot
* ever write the buffer.
*/
@@ -1995,16 +2008,23 @@ static void ext4_da_page_release_reservation(struct page *page,
*
* As pages are already locked by write_cache_pages(), we can't use it
*/
-static int mpage_da_submit_io(struct mpage_da_data *mpd)
+static int mpage_da_submit_io(struct mpage_da_data *mpd,
+ struct ext4_map_blocks *map)
{
- long pages_skipped;
struct pagevec pvec;
unsigned long index, end;
int ret = 0, err, nr_pages, i;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
+ loff_t size = i_size_read(inode);
+ unsigned int len, block_start;
+ struct buffer_head *bh, *page_bufs = NULL;
+ int journal_data = ext4_should_journal_data(inode);
+ sector_t pblock = 0, cur_logical = 0;
+ struct ext4_io_submit io_submit;
BUG_ON(mpd->next_page <= mpd->first_page);
+ memset(&io_submit, 0, sizeof(io_submit));
/*
* We need to start from the first_page to the next_page - 1
* to make sure we also write the mapped dirty buffer_heads.
@@ -2020,122 +2040,108 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
+ int commit_write = 0, redirty_page = 0;
struct page *page = pvec.pages[i];
index = page->index;
if (index > end)
break;
+
+ if (index == size >> PAGE_CACHE_SHIFT)
+ len = size & ~PAGE_CACHE_MASK;
+ else
+ len = PAGE_CACHE_SIZE;
+ if (map) {
+ cur_logical = index << (PAGE_CACHE_SHIFT -
+ inode->i_blkbits);
+ pblock = map->m_pblk + (cur_logical -
+ map->m_lblk);
+ }
index++;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
- pages_skipped = mpd->wbc->pages_skipped;
- err = mapping->a_ops->writepage(page, mpd->wbc);
- if (!err && (pages_skipped == mpd->wbc->pages_skipped))
- /*
- * have successfully written the page
- * without skipping the same
- */
- mpd->pages_written++;
/*
- * In error case, we have to continue because
- * remaining pages are still locked
- * XXX: unlock and re-dirty them?
+ * If the page does not have buffers (for
+ * whatever reason), try to create them using
+ * __block_write_begin. If this fails,
+ * redirty the page and move on.
*/
- if (ret == 0)
- ret = err;
- }
- pagevec_release(&pvec);
- }
- return ret;
-}
-
-/*
- * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
- *
- * the function goes through all passed space and put actual disk
- * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
- */
-static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd,
- struct ext4_map_blocks *map)
-{
- struct inode *inode = mpd->inode;
- struct address_space *mapping = inode->i_mapping;
- int blocks = map->m_len;
- sector_t pblock = map->m_pblk, cur_logical;
- struct buffer_head *head, *bh;
- pgoff_t index, end;
- struct pagevec pvec;
- int nr_pages, i;
-
- index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
- end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
- cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
- pagevec_init(&pvec, 0);
-
- while (index <= end) {
- /* XXX: optimize tail */
- nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
- if (nr_pages == 0)
- break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- index = page->index;
- if (index > end)
- break;
- index++;
-
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
- BUG_ON(!page_has_buffers(page));
-
- bh = page_buffers(page);
- head = bh;
-
- /* skip blocks out of the range */
- do {
- if (cur_logical >= map->m_lblk)
- break;
- cur_logical++;
- } while ((bh = bh->b_this_page) != head);
+ if (!page_has_buffers(page)) {
+ if (__block_write_begin(page, 0, len,
+ noalloc_get_block_write)) {
+ redirty_page:
+ redirty_page_for_writepage(mpd->wbc,
+ page);
+ unlock_page(page);
+ continue;
+ }
+ commit_write = 1;
+ }
+ bh = page_bufs = page_buffers(page);
+ block_start = 0;
do {
- if (cur_logical >= map->m_lblk + blocks)
- break;
-
- if (buffer_delay(bh) || buffer_unwritten(bh)) {
-
- BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
-
+ if (!bh)
+ goto redirty_page;
+ if (map && (cur_logical >= map->m_lblk) &&
+ (cur_logical <= (map->m_lblk +
+ (map->m_len - 1)))) {
if (buffer_delay(bh)) {
clear_buffer_delay(bh);
bh->b_blocknr = pblock;
- } else {
- /*
- * unwritten already should have
- * blocknr assigned. Verify that
- */
- clear_buffer_unwritten(bh);
- BUG_ON(bh->b_blocknr != pblock);
}
+ if (buffer_unwritten(bh) ||
+ buffer_mapped(bh))
+ BUG_ON(bh->b_blocknr != pblock);
+ if (map->m_flags & EXT4_MAP_UNINIT)
+ set_buffer_uninit(bh);
+ clear_buffer_unwritten(bh);
+ }
- } else if (buffer_mapped(bh))
- BUG_ON(bh->b_blocknr != pblock);
-
- if (map->m_flags & EXT4_MAP_UNINIT)
- set_buffer_uninit(bh);
+ /* redirty page if block allocation undone */
+ if (buffer_delay(bh) || buffer_unwritten(bh))
+ redirty_page = 1;
+ bh = bh->b_this_page;
+ block_start += bh->b_size;
cur_logical++;
pblock++;
- } while ((bh = bh->b_this_page) != head);
+ } while (bh != page_bufs);
+
+ if (redirty_page)
+ goto redirty_page;
+
+ if (commit_write)
+ /* mark the buffer_heads as dirty & uptodate */
+ block_commit_write(page, 0, len);
+
+ /*
+ * Delalloc doesn't support data journalling,
+ * but eventually maybe we'll lift this
+ * restriction.
+ */
+ if (unlikely(journal_data && PageChecked(page)))
+ err = __ext4_journalled_writepage(page, len);
+ else
+ err = ext4_bio_write_page(&io_submit, page,
+ len, mpd->wbc);
+
+ if (!err)
+ mpd->pages_written++;
+ /*
+ * In error case, we have to continue because
+ * remaining pages are still locked
+ */
+ if (ret == 0)
+ ret = err;
}
pagevec_release(&pvec);
}
+ ext4_io_submit(&io_submit);
+ return ret;
}
-
static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
sector_t logical, long blk_cnt)
{
@@ -2187,35 +2193,32 @@ static void ext4_print_free_blocks(struct inode *inode)
}
/*
- * mpage_da_map_blocks - go through given space
+ * mpage_da_map_and_submit - go through given space, map them
+ * if necessary, and then submit them for I/O
*
* @mpd - bh describing space
*
* The function skips space we know is already mapped to disk blocks.
*
*/
-static int mpage_da_map_blocks(struct mpage_da_data *mpd)
+static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
{
int err, blks, get_blocks_flags;
- struct ext4_map_blocks map;
+ struct ext4_map_blocks map, *mapp = NULL;
sector_t next = mpd->b_blocknr;
unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
handle_t *handle = NULL;
/*
- * We consider only non-mapped and non-allocated blocks
- */
- if ((mpd->b_state & (1 << BH_Mapped)) &&
- !(mpd->b_state & (1 << BH_Delay)) &&
- !(mpd->b_state & (1 << BH_Unwritten)))
- return 0;
-
- /*
- * If we didn't accumulate anything to write simply return
+ * If the blocks are mapped already, or we couldn't accumulate
+ * any blocks, then proceed immediately to the submission stage.
*/
- if (!mpd->b_size)
- return 0;
+ if ((mpd->b_size == 0) ||
+ ((mpd->b_state & (1 << BH_Mapped)) &&
+ !(mpd->b_state & (1 << BH_Delay)) &&
+ !(mpd->b_state & (1 << BH_Unwritten))))
+ goto submit_io;
handle = ext4_journal_current_handle();
BUG_ON(!handle);
@@ -2252,17 +2255,18 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
err = blks;
/*
- * If get block returns with error we simply
- * return. Later writepage will redirty the page and
- * writepages will find the dirty page again
+ * If get block returns EAGAIN or ENOSPC and there
+ * appears to be free blocks we will call
+ * ext4_writepage() for all of the pages which will
+ * just redirty the pages.
*/
if (err == -EAGAIN)
- return 0;
+ goto submit_io;
if (err == -ENOSPC &&
ext4_count_free_blocks(sb)) {
mpd->retval = err;
- return 0;
+ goto submit_io;
}
/*
@@ -2287,10 +2291,11 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
/* invalidate all the pages */
ext4_da_block_invalidatepages(mpd, next,
mpd->b_size >> mpd->inode->i_blkbits);
- return err;
+ return;
}
BUG_ON(blks == 0);
+ mapp = &map;
if (map.m_flags & EXT4_MAP_NEW) {
struct block_device *bdev = mpd->inode->i_sb->s_bdev;
int i;
@@ -2299,18 +2304,11 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
unmap_underlying_metadata(bdev, map.m_pblk + i);
}
- /*
- * If blocks are delayed marked, we need to
- * put actual blocknr and drop delayed bit
- */
- if ((mpd->b_state & (1 << BH_Delay)) ||
- (mpd->b_state & (1 << BH_Unwritten)))
- mpage_put_bnr_to_bhs(mpd, &map);
-
if (ext4_should_order_data(mpd->inode)) {
err = ext4_jbd2_file_inode(handle, mpd->inode);
if (err)
- return err;
+ /* This only happens if the journal is aborted */
+ return;
}
/*
@@ -2321,10 +2319,16 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
disksize = i_size_read(mpd->inode);
if (disksize > EXT4_I(mpd->inode)->i_disksize) {
ext4_update_i_disksize(mpd->inode, disksize);
- return ext4_mark_inode_dirty(handle, mpd->inode);
+ err = ext4_mark_inode_dirty(handle, mpd->inode);
+ if (err)
+ ext4_error(mpd->inode->i_sb,
+ "Failed to mark inode %lu dirty",
+ mpd->inode->i_ino);
}
- return 0;
+submit_io:
+ mpage_da_submit_io(mpd, mapp);
+ mpd->io_done = 1;
}
#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
@@ -2401,9 +2405,7 @@ flush_it:
* We couldn't merge the block to our extent, so we
* need to flush current extent and start new one
*/
- if (mpage_da_map_blocks(mpd) == 0)
- mpage_da_submit_io(mpd);
- mpd->io_done = 1;
+ mpage_da_map_and_submit(mpd);
return;
}
@@ -2422,9 +2424,9 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
* The function finds extents of pages and scan them for all blocks.
*/
static int __mpage_da_writepage(struct page *page,
- struct writeback_control *wbc, void *data)
+ struct writeback_control *wbc,
+ struct mpage_da_data *mpd)
{
- struct mpage_da_data *mpd = data;
struct inode *inode = mpd->inode;
struct buffer_head *bh, *head;
sector_t logical;
@@ -2435,15 +2437,13 @@ static int __mpage_da_writepage(struct page *page,
if (mpd->next_page != page->index) {
/*
* Nope, we can't. So, we map non-allocated blocks
- * and start IO on them using writepage()
+ * and start IO on them
*/
if (mpd->next_page != mpd->first_page) {
- if (mpage_da_map_blocks(mpd) == 0)
- mpage_da_submit_io(mpd);
+ mpage_da_map_and_submit(mpd);
/*
* skip rest of the page in the page_vec
*/
- mpd->io_done = 1;
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return MPAGE_DA_EXTENT_TAIL;
@@ -2550,8 +2550,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
if (buffer_delay(bh))
return 0; /* Not sure this could or should happen */
/*
- * XXX: __block_prepare_write() unmaps passed block,
- * is it OK?
+ * XXX: __block_write_begin() unmaps passed block, is it OK?
*/
ret = ext4_da_reserve_space(inode, iblock);
if (ret)
@@ -2583,7 +2582,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
/*
* This function is used as a standard get_block_t calback function
* when there is no desire to allocate any blocks. It is used as a
- * callback function for block_prepare_write() and block_write_full_page().
+ * callback function for block_write_begin() and block_write_full_page().
* These functions should only try to map a single block at a time.
*
* Since this function doesn't do block allocations even if the caller
@@ -2623,6 +2622,7 @@ static int __ext4_journalled_writepage(struct page *page,
int ret = 0;
int err;
+ ClearPageChecked(page);
page_bufs = page_buffers(page);
BUG_ON(!page_bufs);
walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
@@ -2700,7 +2700,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
- int ret = 0;
+ int ret = 0, commit_write = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs = NULL;
@@ -2713,71 +2713,46 @@ static int ext4_writepage(struct page *page,
else
len = PAGE_CACHE_SIZE;
- if (page_has_buffers(page)) {
- page_bufs = page_buffers(page);
- if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
- ext4_bh_delay_or_unwritten)) {
- /*
- * We don't want to do block allocation
- * So redirty the page and return
- * We may reach here when we do a journal commit
- * via journal_submit_inode_data_buffers.
- * If we don't have mapping block we just ignore
- * them. We can also reach here via shrink_page_list
- */
+ /*
+ * If the page does not have buffers (for whatever reason),
+ * try to create them using __block_write_begin. If this
+ * fails, redirty the page and move on.
+ */
+ if (!page_buffers(page)) {
+ if (__block_write_begin(page, 0, len,
+ noalloc_get_block_write)) {
+ redirty_page:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
- } else {
+ commit_write = 1;
+ }
+ page_bufs = page_buffers(page);
+ if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
+ ext4_bh_delay_or_unwritten)) {
/*
- * The test for page_has_buffers() is subtle:
- * We know the page is dirty but it lost buffers. That means
- * that at some moment in time after write_begin()/write_end()
- * has been called all buffers have been clean and thus they
- * must have been written at least once. So they are all
- * mapped and we can happily proceed with mapping them
- * and writing the page.
- *
- * Try to initialize the buffer_heads and check whether
- * all are mapped and non delay. We don't want to
- * do block allocation here.
+ * We don't want to do block allocation So redirty the
+ * page and return We may reach here when we do a
+ * journal commit via
+ * journal_submit_inode_data_buffers. If we don't
+ * have mapping block we just ignore them. We can also
+ * reach here via shrink_page_list
*/
- ret = block_prepare_write(page, 0, len,
- noalloc_get_block_write);
- if (!ret) {
- page_bufs = page_buffers(page);
- /* check whether all are mapped and non delay */
- if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
- ext4_bh_delay_or_unwritten)) {
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
- }
- } else {
- /*
- * We can't do block allocation here
- * so just redity the page and unlock
- * and return
- */
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
- }
+ goto redirty_page;
+ }
+ if (commit_write)
/* now mark the buffer_heads as dirty and uptodate */
block_commit_write(page, 0, len);
- }
- if (PageChecked(page) && ext4_should_journal_data(inode)) {
+ if (PageChecked(page) && ext4_should_journal_data(inode))
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
- ClearPageChecked(page);
return __ext4_journalled_writepage(page, len);
- }
- if (page_bufs && buffer_uninit(page_bufs)) {
+ if (buffer_uninit(page_bufs)) {
ext4_set_bh_endio(page_bufs, inode);
ret = block_write_full_page_endio(page, noalloc_get_block_write,
wbc, ext4_end_io_buffer_write);
@@ -2824,25 +2799,32 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
*/
static int write_cache_pages_da(struct address_space *mapping,
struct writeback_control *wbc,
- struct mpage_da_data *mpd)
+ struct mpage_da_data *mpd,
+ pgoff_t *done_index)
{
int ret = 0;
int done = 0;
struct pagevec pvec;
- int nr_pages;
+ unsigned nr_pages;
pgoff_t index;
pgoff_t end; /* Inclusive */
long nr_to_write = wbc->nr_to_write;
+ int tag;
pagevec_init(&pvec, 0);
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ tag = PAGECACHE_TAG_TOWRITE;
+ else
+ tag = PAGECACHE_TAG_DIRTY;
+
+ *done_index = index;
while (!done && (index <= end)) {
int i;
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY,
+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
break;
@@ -2862,6 +2844,8 @@ static int write_cache_pages_da(struct address_space *mapping,
break;
}
+ *done_index = page->index + 1;
+
lock_page(page);
/*
@@ -2947,6 +2931,8 @@ static int ext4_da_writepages(struct address_space *mapping,
long desired_nr_to_write, nr_to_writebump = 0;
loff_t range_start = wbc->range_start;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
+ pgoff_t done_index = 0;
+ pgoff_t end;
trace_ext4_da_writepages(inode, wbc);
@@ -2982,8 +2968,11 @@ static int ext4_da_writepages(struct address_space *mapping,
wbc->range_start = index << PAGE_CACHE_SHIFT;
wbc->range_end = LLONG_MAX;
wbc->range_cyclic = 0;
- } else
+ end = -1;
+ } else {
index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ }
/*
* This works around two forms of stupidity. The first is in
@@ -3002,9 +2991,12 @@ static int ext4_da_writepages(struct address_space *mapping,
* sbi->max_writeback_mb_bump whichever is smaller.
*/
max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
- if (!range_cyclic && range_whole)
- desired_nr_to_write = wbc->nr_to_write * 8;
- else
+ if (!range_cyclic && range_whole) {
+ if (wbc->nr_to_write == LONG_MAX)
+ desired_nr_to_write = wbc->nr_to_write;
+ else
+ desired_nr_to_write = wbc->nr_to_write * 8;
+ } else
desired_nr_to_write = ext4_num_dirty_pages(inode, index,
max_pages);
if (desired_nr_to_write > max_pages)
@@ -3021,6 +3013,9 @@ static int ext4_da_writepages(struct address_space *mapping,
pages_skipped = wbc->pages_skipped;
retry:
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ tag_pages_for_writeback(mapping, index, end);
+
while (!ret && wbc->nr_to_write > 0) {
/*
@@ -3059,16 +3054,14 @@ retry:
mpd.io_done = 0;
mpd.pages_written = 0;
mpd.retval = 0;
- ret = write_cache_pages_da(mapping, wbc, &mpd);
+ ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
/*
* If we have a contiguous extent of pages and we
* haven't done the I/O yet, map the blocks and submit
* them for I/O.
*/
if (!mpd.io_done && mpd.next_page != mpd.first_page) {
- if (mpage_da_map_blocks(&mpd) == 0)
- mpage_da_submit_io(&mpd);
- mpd.io_done = 1;
+ mpage_da_map_and_submit(&mpd);
ret = MPAGE_DA_EXTENT_TAIL;
}
trace_ext4_da_write_pages(inode, &mpd);
@@ -3115,14 +3108,13 @@ retry:
__func__, wbc->nr_to_write, ret);
/* Update index */
- index += pages_written;
wbc->range_cyclic = range_cyclic;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
/*
* set the writeback_index so that range_cyclic
* mode will write it back later
*/
- mapping->writeback_index = index;
+ mapping->writeback_index = done_index;
out_writepages:
wbc->nr_to_write -= nr_to_writebump;
@@ -3457,15 +3449,6 @@ ext4_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
-static void ext4_free_io_end(ext4_io_end_t *io)
-{
- BUG_ON(!io);
- if (io->page)
- put_page(io->page);
- iput(io->inode);
- kfree(io);
-}
-
static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh;
@@ -3642,173 +3625,6 @@ static int ext4_get_block_write(struct inode *inode, sector_t iblock,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
}
-static void dump_completed_IO(struct inode * inode)
-{
-#ifdef EXT4_DEBUG
- struct list_head *cur, *before, *after;
- ext4_io_end_t *io, *io0, *io1;
- unsigned long flags;
-
- if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
- ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
- return;
- }
-
- ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
- spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
- list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
- cur = &io->list;
- before = cur->prev;
- io0 = container_of(before, ext4_io_end_t, list);
- after = cur->next;
- io1 = container_of(after, ext4_io_end_t, list);
-
- ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
- io, inode->i_ino, io0, io1);
- }
- spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
-#endif
-}
-
-/*
- * check a range of space and convert unwritten extents to written.
- */
-static int ext4_end_io_nolock(ext4_io_end_t *io)
-{
- struct inode *inode = io->inode;
- loff_t offset = io->offset;
- ssize_t size = io->size;
- int ret = 0;
-
- ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
- "list->prev 0x%p\n",
- io, inode->i_ino, io->list.next, io->list.prev);
-
- if (list_empty(&io->list))
- return ret;
-
- if (io->flag != EXT4_IO_UNWRITTEN)
- return ret;
-
- ret = ext4_convert_unwritten_extents(inode, offset, size);
- if (ret < 0) {
- printk(KERN_EMERG "%s: failed to convert unwritten"
- "extents to written extents, error is %d"
- " io is still on inode %lu aio dio list\n",
- __func__, ret, inode->i_ino);
- return ret;
- }
-
- if (io->iocb)
- aio_complete(io->iocb, io->result, 0);
- /* clear the DIO AIO unwritten flag */
- io->flag = 0;
- return ret;
-}
-
-/*
- * work on completed aio dio IO, to convert unwritten extents to extents
- */
-static void ext4_end_io_work(struct work_struct *work)
-{
- ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
- struct inode *inode = io->inode;
- struct ext4_inode_info *ei = EXT4_I(inode);
- unsigned long flags;
- int ret;
-
- mutex_lock(&inode->i_mutex);
- ret = ext4_end_io_nolock(io);
- if (ret < 0) {
- mutex_unlock(&inode->i_mutex);
- return;
- }
-
- spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- if (!list_empty(&io->list))
- list_del_init(&io->list);
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- mutex_unlock(&inode->i_mutex);
- ext4_free_io_end(io);
-}
-
-/*
- * This function is called from ext4_sync_file().
- *
- * When IO is completed, the work to convert unwritten extents to
- * written is queued on workqueue but may not get immediately
- * scheduled. When fsync is called, we need to ensure the
- * conversion is complete before fsync returns.
- * The inode keeps track of a list of pending/completed IO that
- * might needs to do the conversion. This function walks through
- * the list and convert the related unwritten extents for completed IO
- * to written.
- * The function return the number of pending IOs on success.
- */
-int flush_completed_IO(struct inode *inode)
-{
- ext4_io_end_t *io;
- struct ext4_inode_info *ei = EXT4_I(inode);
- unsigned long flags;
- int ret = 0;
- int ret2 = 0;
-
- if (list_empty(&ei->i_completed_io_list))
- return ret;
-
- dump_completed_IO(inode);
- spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- while (!list_empty(&ei->i_completed_io_list)){
- io = list_entry(ei->i_completed_io_list.next,
- ext4_io_end_t, list);
- /*
- * Calling ext4_end_io_nolock() to convert completed
- * IO to written.
- *
- * When ext4_sync_file() is called, run_queue() may already
- * about to flush the work corresponding to this io structure.
- * It will be upset if it founds the io structure related
- * to the work-to-be schedule is freed.
- *
- * Thus we need to keep the io structure still valid here after
- * convertion finished. The io structure has a flag to
- * avoid double converting from both fsync and background work
- * queue work.
- */
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- ret = ext4_end_io_nolock(io);
- spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- if (ret < 0)
- ret2 = ret;
- else
- list_del_init(&io->list);
- }
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- return (ret2 < 0) ? ret2 : 0;
-}
-
-static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
-{
- ext4_io_end_t *io = NULL;
-
- io = kmalloc(sizeof(*io), flags);
-
- if (io) {
- igrab(inode);
- io->inode = inode;
- io->flag = 0;
- io->offset = 0;
- io->size = 0;
- io->page = NULL;
- io->iocb = NULL;
- io->result = 0;
- INIT_WORK(&io->work, ext4_end_io_work);
- INIT_LIST_HEAD(&io->list);
- }
-
- return io;
-}
-
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private, int ret,
bool is_async)
@@ -3828,7 +3644,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
size);
/* if not aio dio with unwritten extents, just free io and return */
- if (io_end->flag != EXT4_IO_UNWRITTEN){
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end);
iocb->private = NULL;
out:
@@ -3845,14 +3661,14 @@ out:
}
wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
- /* queue the work to convert unwritten extents to written */
- queue_work(wq, &io_end->work);
-
/* Add the io_end to per-inode completed aio dio list*/
ei = EXT4_I(io_end->inode);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
list_add_tail(&io_end->list, &ei->i_completed_io_list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+
+ /* queue the work to convert unwritten extents to written */
+ queue_work(wq, &io_end->work);
iocb->private = NULL;
}
@@ -3873,7 +3689,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
goto out;
}
- io_end->flag = EXT4_IO_UNWRITTEN;
+ io_end->flag = EXT4_IO_END_UNWRITTEN;
inode = io_end->inode;
/* Add the io_end to per-inode completed io list*/
@@ -5464,6 +5280,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error, rc = 0;
+ int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
error = inode_change_ok(inode, attr);
@@ -5519,8 +5336,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
-
- error = ext4_orphan_add(handle, inode);
+ if (ext4_handle_valid(handle)) {
+ error = ext4_orphan_add(handle, inode);
+ orphan = 1;
+ }
EXT4_I(inode)->i_disksize = attr->ia_size;
rc = ext4_mark_inode_dirty(handle, inode);
if (!error)
@@ -5538,6 +5357,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
goto err_out;
}
ext4_orphan_del(handle, inode);
+ orphan = 0;
ext4_journal_stop(handle);
goto err_out;
}
@@ -5560,7 +5380,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
* If the call to ext4_truncate failed to get a transaction handle at
* all, we need to clean up the in-core orphan list manually.
*/
- if (inode->i_nlink)
+ if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
if (!rc && (ia_valid & ATTR_MODE))
@@ -5643,7 +5463,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
*
* Also account for superblock, inode, quota and xattr blocks
*/
-int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 19aa0d44d822..c58eba34724a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -338,6 +338,14 @@
static struct kmem_cache *ext4_pspace_cachep;
static struct kmem_cache *ext4_ac_cachep;
static struct kmem_cache *ext4_free_ext_cachep;
+
+/* We create slab caches for groupinfo data structures based on the
+ * superblock block size. There will be one per mounted filesystem for
+ * each unique s_blocksize_bits */
+#define NR_GRPINFO_CACHES \
+ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
+static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
+
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group);
static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
@@ -939,6 +947,85 @@ out:
}
/*
+ * lock the group_info alloc_sem of all the groups
+ * belonging to the same buddy cache page. This
+ * make sure other parallel operation on the buddy
+ * cache doesn't happen whild holding the buddy cache
+ * lock
+ */
+static int ext4_mb_get_buddy_cache_lock(struct super_block *sb,
+ ext4_group_t group)
+{
+ int i;
+ int block, pnum;
+ int blocks_per_page;
+ int groups_per_page;
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ ext4_group_t first_group;
+ struct ext4_group_info *grp;
+
+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+ /*
+ * the buddy cache inode stores the block bitmap
+ * and buddy information in consecutive blocks.
+ * So for each group we need two blocks.
+ */
+ block = group * 2;
+ pnum = block / blocks_per_page;
+ first_group = pnum * blocks_per_page / 2;
+
+ groups_per_page = blocks_per_page >> 1;
+ if (groups_per_page == 0)
+ groups_per_page = 1;
+ /* read all groups the page covers into the cache */
+ for (i = 0; i < groups_per_page; i++) {
+
+ if ((first_group + i) >= ngroups)
+ break;
+ grp = ext4_get_group_info(sb, first_group + i);
+ /* take all groups write allocation
+ * semaphore. This make sure there is
+ * no block allocation going on in any
+ * of that groups
+ */
+ down_write_nested(&grp->alloc_sem, i);
+ }
+ return i;
+}
+
+static void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
+ ext4_group_t group, int locked_group)
+{
+ int i;
+ int block, pnum;
+ int blocks_per_page;
+ ext4_group_t first_group;
+ struct ext4_group_info *grp;
+
+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+ /*
+ * the buddy cache inode stores the block bitmap
+ * and buddy information in consecutive blocks.
+ * So for each group we need two blocks.
+ */
+ block = group * 2;
+ pnum = block / blocks_per_page;
+ first_group = pnum * blocks_per_page / 2;
+ /* release locks on all the groups */
+ for (i = 0; i < locked_group; i++) {
+
+ grp = ext4_get_group_info(sb, first_group + i);
+ /* take all groups write allocation
+ * semaphore. This make sure there is
+ * no block allocation going on in any
+ * of that groups
+ */
+ up_write(&grp->alloc_sem);
+ }
+
+}
+
+/*
* Locking note: This routine calls ext4_mb_init_cache(), which takes the
* block group lock of all groups for this page; do not hold the BG lock when
* calling this routine!
@@ -1915,84 +2002,6 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
return 0;
}
-/*
- * lock the group_info alloc_sem of all the groups
- * belonging to the same buddy cache page. This
- * make sure other parallel operation on the buddy
- * cache doesn't happen whild holding the buddy cache
- * lock
- */
-int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
-{
- int i;
- int block, pnum;
- int blocks_per_page;
- int groups_per_page;
- ext4_group_t ngroups = ext4_get_groups_count(sb);
- ext4_group_t first_group;
- struct ext4_group_info *grp;
-
- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
- /*
- * the buddy cache inode stores the block bitmap
- * and buddy information in consecutive blocks.
- * So for each group we need two blocks.
- */
- block = group * 2;
- pnum = block / blocks_per_page;
- first_group = pnum * blocks_per_page / 2;
-
- groups_per_page = blocks_per_page >> 1;
- if (groups_per_page == 0)
- groups_per_page = 1;
- /* read all groups the page covers into the cache */
- for (i = 0; i < groups_per_page; i++) {
-
- if ((first_group + i) >= ngroups)
- break;
- grp = ext4_get_group_info(sb, first_group + i);
- /* take all groups write allocation
- * semaphore. This make sure there is
- * no block allocation going on in any
- * of that groups
- */
- down_write_nested(&grp->alloc_sem, i);
- }
- return i;
-}
-
-void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
- ext4_group_t group, int locked_group)
-{
- int i;
- int block, pnum;
- int blocks_per_page;
- ext4_group_t first_group;
- struct ext4_group_info *grp;
-
- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
- /*
- * the buddy cache inode stores the block bitmap
- * and buddy information in consecutive blocks.
- * So for each group we need two blocks.
- */
- block = group * 2;
- pnum = block / blocks_per_page;
- first_group = pnum * blocks_per_page / 2;
- /* release locks on all the groups */
- for (i = 0; i < locked_group; i++) {
-
- grp = ext4_get_group_info(sb, first_group + i);
- /* take all groups write allocation
- * semaphore. This make sure there is
- * no block allocation going on in any
- * of that groups
- */
- up_write(&grp->alloc_sem);
- }
-
-}
-
static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
@@ -2233,15 +2242,24 @@ static const struct file_operations ext4_mb_seq_groups_fops = {
.release = seq_release,
};
+static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
+{
+ int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+ struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
+
+ BUG_ON(!cachep);
+ return cachep;
+}
/* Create and initialize ext4_group_info data for the given group. */
int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *desc)
{
- int i, len;
+ int i;
int metalen = 0;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_info **meta_group_info;
+ struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
/*
* First check if this group is the first of a reserved block.
@@ -2261,22 +2279,16 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
meta_group_info;
}
- /*
- * calculate needed size. if change bb_counters size,
- * don't forget about ext4_mb_generate_buddy()
- */
- len = offsetof(typeof(**meta_group_info),
- bb_counters[sb->s_blocksize_bits + 2]);
-
meta_group_info =
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
- meta_group_info[i] = kzalloc(len, GFP_KERNEL);
+ meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
if (meta_group_info[i] == NULL) {
printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
goto exit_group_info;
}
+ memset(meta_group_info[i], 0, kmem_cache_size(cachep));
set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
&(meta_group_info[i]->bb_state));
@@ -2331,6 +2343,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
int num_meta_group_infos_max;
int array_size;
struct ext4_group_desc *desc;
+ struct kmem_cache *cachep;
/* This is the number of blocks used by GDT */
num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
@@ -2373,6 +2386,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
printk(KERN_ERR "EXT4-fs: can't get new inode\n");
goto err_freesgi;
}
+ sbi->s_buddy_cache->i_ino = get_next_ino();
EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
for (i = 0; i < ngroups; i++) {
desc = ext4_get_group_desc(sb, i, NULL);
@@ -2388,8 +2402,9 @@ static int ext4_mb_init_backend(struct super_block *sb)
return 0;
err_freebuddy:
+ cachep = get_groupinfo_cache(sb->s_blocksize_bits);
while (i-- > 0)
- kfree(ext4_get_group_info(sb, i));
+ kmem_cache_free(cachep, ext4_get_group_info(sb, i));
i = num_meta_group_infos;
while (i-- > 0)
kfree(sbi->s_group_info[i]);
@@ -2406,19 +2421,48 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
unsigned offset;
unsigned max;
int ret;
+ int cache_index;
+ struct kmem_cache *cachep;
+ char *namep = NULL;
i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
if (sbi->s_mb_offsets == NULL) {
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
if (sbi->s_mb_maxs == NULL) {
- kfree(sbi->s_mb_offsets);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+ cachep = ext4_groupinfo_caches[cache_index];
+ if (!cachep) {
+ char name[32];
+ int len = offsetof(struct ext4_group_info,
+ bb_counters[sb->s_blocksize_bits + 2]);
+
+ sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
+ namep = kstrdup(name, GFP_KERNEL);
+ if (!namep) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Need to free the kmem_cache_name() when we
+ * destroy the slab */
+ cachep = kmem_cache_create(namep, len, 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
+ if (!cachep) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ext4_groupinfo_caches[cache_index] = cachep;
}
/* order 0 is regular bitmap */
@@ -2439,9 +2483,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
/* init file for buddy data */
ret = ext4_mb_init_backend(sb);
if (ret != 0) {
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- return ret;
+ goto out;
}
spin_lock_init(&sbi->s_md_lock);
@@ -2456,9 +2498,8 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
if (sbi->s_locality_groups == NULL) {
- kfree(sbi->s_mb_offsets);
- kfree(sbi->s_mb_maxs);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
for_each_possible_cpu(i) {
struct ext4_locality_group *lg;
@@ -2475,7 +2516,13 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
if (sbi->s_journal)
sbi->s_journal->j_commit_callback = release_blocks_on_commit;
- return 0;
+out:
+ if (ret) {
+ kfree(sbi->s_mb_offsets);
+ kfree(sbi->s_mb_maxs);
+ kfree(namep);
+ }
+ return ret;
}
/* need to called with the ext4 group lock held */
@@ -2503,6 +2550,7 @@ int ext4_mb_release(struct super_block *sb)
int num_meta_group_infos;
struct ext4_group_info *grinfo;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
if (sbi->s_group_info) {
for (i = 0; i < ngroups; i++) {
@@ -2513,7 +2561,7 @@ int ext4_mb_release(struct super_block *sb)
ext4_lock_group(sb, i);
ext4_mb_cleanup_pa(grinfo);
ext4_unlock_group(sb, i);
- kfree(grinfo);
+ kmem_cache_free(cachep, grinfo);
}
num_meta_group_infos = (ngroups +
EXT4_DESC_PER_BLOCK(sb) - 1) >>
@@ -2557,7 +2605,7 @@ int ext4_mb_release(struct super_block *sb)
return 0;
}
-static inline void ext4_issue_discard(struct super_block *sb,
+static inline int ext4_issue_discard(struct super_block *sb,
ext4_group_t block_group, ext4_grpblk_t block, int count)
{
int ret;
@@ -2567,10 +2615,11 @@ static inline void ext4_issue_discard(struct super_block *sb,
trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count);
ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
- if (ret == EOPNOTSUPP) {
+ if (ret == -EOPNOTSUPP) {
ext4_warning(sb, "discard not supported, disabling");
clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
}
+ return ret;
}
/*
@@ -2658,28 +2707,22 @@ static void ext4_remove_debugfs_entry(void)
#endif
-int __init init_ext4_mballoc(void)
+int __init ext4_init_mballoc(void)
{
- ext4_pspace_cachep =
- kmem_cache_create("ext4_prealloc_space",
- sizeof(struct ext4_prealloc_space),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
+ ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
+ SLAB_RECLAIM_ACCOUNT);
if (ext4_pspace_cachep == NULL)
return -ENOMEM;
- ext4_ac_cachep =
- kmem_cache_create("ext4_alloc_context",
- sizeof(struct ext4_allocation_context),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
+ ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
+ SLAB_RECLAIM_ACCOUNT);
if (ext4_ac_cachep == NULL) {
kmem_cache_destroy(ext4_pspace_cachep);
return -ENOMEM;
}
- ext4_free_ext_cachep =
- kmem_cache_create("ext4_free_block_extents",
- sizeof(struct ext4_free_data),
- 0, SLAB_RECLAIM_ACCOUNT, NULL);
+ ext4_free_ext_cachep = KMEM_CACHE(ext4_free_data,
+ SLAB_RECLAIM_ACCOUNT);
if (ext4_free_ext_cachep == NULL) {
kmem_cache_destroy(ext4_pspace_cachep);
kmem_cache_destroy(ext4_ac_cachep);
@@ -2689,8 +2732,9 @@ int __init init_ext4_mballoc(void)
return 0;
}
-void exit_ext4_mballoc(void)
+void ext4_exit_mballoc(void)
{
+ int i;
/*
* Wait for completion of call_rcu()'s on ext4_pspace_cachep
* before destroying the slab cache.
@@ -2699,6 +2743,15 @@ void exit_ext4_mballoc(void)
kmem_cache_destroy(ext4_pspace_cachep);
kmem_cache_destroy(ext4_ac_cachep);
kmem_cache_destroy(ext4_free_ext_cachep);
+
+ for (i = 0; i < NR_GRPINFO_CACHES; i++) {
+ struct kmem_cache *cachep = ext4_groupinfo_caches[i];
+ if (cachep) {
+ char *name = (char *)kmem_cache_name(cachep);
+ kmem_cache_destroy(cachep);
+ kfree(name);
+ }
+ }
ext4_remove_debugfs_entry();
}
@@ -3535,8 +3588,7 @@ static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
*/
static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
- struct ext4_prealloc_space *pa,
- struct ext4_allocation_context *ac)
+ struct ext4_prealloc_space *pa)
{
struct super_block *sb = e4b->bd_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -3554,11 +3606,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
end = bit + pa->pa_len;
- if (ac) {
- ac->ac_sb = sb;
- ac->ac_inode = pa->pa_inode;
- }
-
while (bit < end) {
bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
if (bit >= end)
@@ -3569,16 +3616,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
(unsigned) next - bit, (unsigned) group);
free += next - bit;
- if (ac) {
- ac->ac_b_ex.fe_group = group;
- ac->ac_b_ex.fe_start = bit;
- ac->ac_b_ex.fe_len = next - bit;
- ac->ac_b_ex.fe_logical = 0;
- trace_ext4_mballoc_discard(ac);
- }
-
- trace_ext4_mb_release_inode_pa(sb, ac, pa, grp_blk_start + bit,
- next - bit);
+ trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
+ trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa,
+ grp_blk_start + bit, next - bit);
mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
bit = next + 1;
}
@@ -3601,29 +3641,19 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy *e4b,
- struct ext4_prealloc_space *pa,
- struct ext4_allocation_context *ac)
+ struct ext4_prealloc_space *pa)
{
struct super_block *sb = e4b->bd_sb;
ext4_group_t group;
ext4_grpblk_t bit;
- trace_ext4_mb_release_group_pa(sb, ac, pa);
+ trace_ext4_mb_release_group_pa(sb, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
-
- if (ac) {
- ac->ac_sb = sb;
- ac->ac_inode = NULL;
- ac->ac_b_ex.fe_group = group;
- ac->ac_b_ex.fe_start = bit;
- ac->ac_b_ex.fe_len = pa->pa_len;
- ac->ac_b_ex.fe_logical = 0;
- trace_ext4_mballoc_discard(ac);
- }
+ trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
return 0;
}
@@ -3644,7 +3674,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
- struct ext4_allocation_context *ac;
struct list_head list;
struct ext4_buddy e4b;
int err;
@@ -3673,9 +3702,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
INIT_LIST_HEAD(&list);
- ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
- if (ac)
- ac->ac_sb = sb;
repeat:
ext4_lock_group(sb, group);
list_for_each_entry_safe(pa, tmp,
@@ -3730,9 +3756,9 @@ repeat:
spin_unlock(pa->pa_obj_lock);
if (pa->pa_type == MB_GROUP_PA)
- ext4_mb_release_group_pa(&e4b, pa, ac);
+ ext4_mb_release_group_pa(&e4b, pa);
else
- ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
+ ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
@@ -3740,8 +3766,6 @@ repeat:
out:
ext4_unlock_group(sb, group);
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
return free;
@@ -3762,7 +3786,6 @@ void ext4_discard_preallocations(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp;
- struct ext4_allocation_context *ac;
ext4_group_t group = 0;
struct list_head list;
struct ext4_buddy e4b;
@@ -3778,11 +3801,6 @@ void ext4_discard_preallocations(struct inode *inode)
INIT_LIST_HEAD(&list);
- ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
- if (ac) {
- ac->ac_sb = sb;
- ac->ac_inode = inode;
- }
repeat:
/* first, collect all pa's in the inode */
spin_lock(&ei->i_prealloc_lock);
@@ -3852,7 +3870,7 @@ repeat:
ext4_lock_group(sb, group);
list_del(&pa->pa_group_list);
- ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
+ ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
@@ -3861,8 +3879,6 @@ repeat:
list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
}
/*
@@ -4060,14 +4076,10 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
struct ext4_buddy e4b;
struct list_head discard_list;
struct ext4_prealloc_space *pa, *tmp;
- struct ext4_allocation_context *ac;
mb_debug(1, "discard locality group preallocation\n");
INIT_LIST_HEAD(&discard_list);
- ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
- if (ac)
- ac->ac_sb = sb;
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
@@ -4119,15 +4131,13 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
}
ext4_lock_group(sb, group);
list_del(&pa->pa_group_list);
- ext4_mb_release_group_pa(&e4b, pa, ac);
+ ext4_mb_release_group_pa(&e4b, pa);
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
}
/*
@@ -4491,7 +4501,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
{
struct buffer_head *bitmap_bh = NULL;
struct super_block *sb = inode->i_sb;
- struct ext4_allocation_context *ac = NULL;
struct ext4_group_desc *gdp;
unsigned long freed = 0;
unsigned int overflow;
@@ -4531,6 +4540,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
if (!bh)
tbh = sb_find_get_block(inode->i_sb,
block + i);
+ if (unlikely(!tbh))
+ continue;
ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
inode, tbh, block + i);
}
@@ -4546,12 +4557,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
if (!ext4_should_writeback_data(inode))
flags |= EXT4_FREE_BLOCKS_METADATA;
- ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
- if (ac) {
- ac->ac_inode = inode;
- ac->ac_sb = sb;
- }
-
do_more:
overflow = 0;
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
@@ -4609,12 +4614,7 @@ do_more:
BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
}
#endif
- if (ac) {
- ac->ac_b_ex.fe_group = block_group;
- ac->ac_b_ex.fe_start = bit;
- ac->ac_b_ex.fe_len = count;
- trace_ext4_mballoc_free(ac);
- }
+ trace_ext4_mballoc_free(sb, inode, block_group, bit, count);
err = ext4_mb_load_buddy(sb, block_group, &e4b);
if (err)
@@ -4640,12 +4640,12 @@ do_more:
* with group lock held. generate_buddy look at
* them with group lock_held
*/
+ if (test_opt(sb, DISCARD))
+ ext4_issue_discard(sb, block_group, bit, count);
ext4_lock_group(sb, block_group);
mb_clear_bits(bitmap_bh->b_data, bit, count);
mb_free_blocks(inode, &e4b, bit, count);
ext4_mb_return_to_preallocation(inode, &e4b, block, count);
- if (test_opt(sb, DISCARD))
- ext4_issue_discard(sb, block_group, bit, count);
}
ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4685,7 +4685,190 @@ error_return:
dquot_free_block(inode, freed);
brelse(bitmap_bh);
ext4_std_error(sb, err);
- if (ac)
- kmem_cache_free(ext4_ac_cachep, ac);
return;
}
+
+/**
+ * ext4_trim_extent -- function to TRIM one single free extent in the group
+ * @sb: super block for the file system
+ * @start: starting block of the free extent in the alloc. group
+ * @count: number of blocks to TRIM
+ * @group: alloc. group we are working with
+ * @e4b: ext4 buddy for the group
+ *
+ * Trim "count" blocks starting at "start" in the "group". To assure that no
+ * one will allocate those blocks, mark it as used in buddy bitmap. This must
+ * be called with under the group lock.
+ */
+static int ext4_trim_extent(struct super_block *sb, int start, int count,
+ ext4_group_t group, struct ext4_buddy *e4b)
+{
+ struct ext4_free_extent ex;
+ int ret = 0;
+
+ assert_spin_locked(ext4_group_lock_ptr(sb, group));
+
+ ex.fe_start = start;
+ ex.fe_group = group;
+ ex.fe_len = count;
+
+ /*
+ * Mark blocks used, so no one can reuse them while
+ * being trimmed.
+ */
+ mb_mark_used(e4b, &ex);
+ ext4_unlock_group(sb, group);
+
+ ret = ext4_issue_discard(sb, group, start, count);
+ if (ret)
+ ext4_std_error(sb, ret);
+
+ ext4_lock_group(sb, group);
+ mb_free_blocks(NULL, e4b, start, ex.fe_len);
+ return ret;
+}
+
+/**
+ * ext4_trim_all_free -- function to trim all free space in alloc. group
+ * @sb: super block for file system
+ * @e4b: ext4 buddy
+ * @start: first group block to examine
+ * @max: last group block to examine
+ * @minblocks: minimum extent block count
+ *
+ * ext4_trim_all_free walks through group's buddy bitmap searching for free
+ * extents. When the free block is found, ext4_trim_extent is called to TRIM
+ * the extent.
+ *
+ *
+ * ext4_trim_all_free walks through group's block bitmap searching for free
+ * extents. When the free extent is found, mark it as used in group buddy
+ * bitmap. Then issue a TRIM command on this extent and free the extent in
+ * the group buddy bitmap. This is done until whole group is scanned.
+ */
+ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
+ ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)
+{
+ void *bitmap;
+ ext4_grpblk_t next, count = 0;
+ ext4_group_t group;
+ int ret = 0;
+
+ BUG_ON(e4b == NULL);
+
+ bitmap = e4b->bd_bitmap;
+ group = e4b->bd_group;
+ start = (e4b->bd_info->bb_first_free > start) ?
+ e4b->bd_info->bb_first_free : start;
+ ext4_lock_group(sb, group);
+
+ while (start < max) {
+ start = mb_find_next_zero_bit(bitmap, max, start);
+ if (start >= max)
+ break;
+ next = mb_find_next_bit(bitmap, max, start);
+
+ if ((next - start) >= minblocks) {
+ ret = ext4_trim_extent(sb, start,
+ next - start, group, e4b);
+ if (ret < 0)
+ break;
+ count += next - start;
+ }
+ start = next + 1;
+
+ if (fatal_signal_pending(current)) {
+ count = -ERESTARTSYS;
+ break;
+ }
+
+ if (need_resched()) {
+ ext4_unlock_group(sb, group);
+ cond_resched();
+ ext4_lock_group(sb, group);
+ }
+
+ if ((e4b->bd_info->bb_free - count) < minblocks)
+ break;
+ }
+ ext4_unlock_group(sb, group);
+
+ ext4_debug("trimmed %d blocks in the group %d\n",
+ count, group);
+
+ if (ret < 0)
+ count = ret;
+
+ return count;
+}
+
+/**
+ * ext4_trim_fs() -- trim ioctl handle function
+ * @sb: superblock for filesystem
+ * @range: fstrim_range structure
+ *
+ * start: First Byte to trim
+ * len: number of Bytes to trim from start
+ * minlen: minimum extent length in Bytes
+ * ext4_trim_fs goes through all allocation groups containing Bytes from
+ * start to start+len. For each such a group ext4_trim_all_free function
+ * is invoked to trim all free space.
+ */
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+ struct ext4_buddy e4b;
+ ext4_group_t first_group, last_group;
+ ext4_group_t group, ngroups = ext4_get_groups_count(sb);
+ ext4_grpblk_t cnt = 0, first_block, last_block;
+ uint64_t start, len, minlen, trimmed;
+ int ret = 0;
+
+ start = range->start >> sb->s_blocksize_bits;
+ len = range->len >> sb->s_blocksize_bits;
+ minlen = range->minlen >> sb->s_blocksize_bits;
+ trimmed = 0;
+
+ if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
+ return -EINVAL;
+
+ /* Determine first and last group to examine based on start and len */
+ ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
+ &first_group, &first_block);
+ ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) (start + len),
+ &last_group, &last_block);
+ last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
+ last_block = EXT4_BLOCKS_PER_GROUP(sb);
+
+ if (first_group > last_group)
+ return -EINVAL;
+
+ for (group = first_group; group <= last_group; group++) {
+ ret = ext4_mb_load_buddy(sb, group, &e4b);
+ if (ret) {
+ ext4_error(sb, "Error in loading buddy "
+ "information for %u", group);
+ break;
+ }
+
+ if (len >= EXT4_BLOCKS_PER_GROUP(sb))
+ len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
+ else
+ last_block = len;
+
+ if (e4b.bd_info->bb_free >= minlen) {
+ cnt = ext4_trim_all_free(sb, &e4b, first_block,
+ last_block, minlen);
+ if (cnt < 0) {
+ ret = cnt;
+ ext4_mb_unload_buddy(&e4b);
+ break;
+ }
+ }
+ ext4_mb_unload_buddy(&e4b);
+ trimmed += cnt;
+ first_block = 0;
+ }
+ range->len = trimmed * sb->s_blocksize;
+
+ return ret;
+}
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 1765c2c50a9b..25f3a974b725 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -412,7 +412,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
struct buffer_head *bh;
struct ext4_extent_header *eh;
- block = idx_pblock(ix);
+ block = ext4_idx_pblock(ix);
bh = sb_bread(inode->i_sb, block);
if (!bh)
return -EIO;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 5f1ed9fc913c..b9f3e7862f13 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -85,7 +85,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
/* leaf block */
*extent = ++path[ppos].p_ext;
- path[ppos].p_block = ext_pblock(path[ppos].p_ext);
+ path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
return 0;
}
@@ -96,7 +96,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
/* index block */
path[ppos].p_idx++;
- path[ppos].p_block = idx_pblock(path[ppos].p_idx);
+ path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
if (path[ppos+1].p_bh)
brelse(path[ppos+1].p_bh);
path[ppos+1].p_bh =
@@ -111,7 +111,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
path[cur_ppos].p_idx =
EXT_FIRST_INDEX(path[cur_ppos].p_hdr);
path[cur_ppos].p_block =
- idx_pblock(path[cur_ppos].p_idx);
+ ext4_idx_pblock(path[cur_ppos].p_idx);
if (path[cur_ppos+1].p_bh)
brelse(path[cur_ppos+1].p_bh);
path[cur_ppos+1].p_bh = sb_bread(inode->i_sb,
@@ -133,7 +133,7 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
path[leaf_ppos].p_ext = *extent =
EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
path[leaf_ppos].p_block =
- ext_pblock(path[leaf_ppos].p_ext);
+ ext4_ext_pblock(path[leaf_ppos].p_ext);
return 0;
}
}
@@ -249,7 +249,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
*/
o_end->ee_block = end_ext->ee_block;
o_end->ee_len = end_ext->ee_len;
- ext4_ext_store_pblock(o_end, ext_pblock(end_ext));
+ ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
}
o_start->ee_len = start_ext->ee_len;
@@ -276,7 +276,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
*/
o_end->ee_block = end_ext->ee_block;
o_end->ee_len = end_ext->ee_len;
- ext4_ext_store_pblock(o_end, ext_pblock(end_ext));
+ ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
/*
* Set 0 to the extent block if new_ext was
@@ -361,7 +361,7 @@ mext_insert_inside_block(struct ext4_extent *o_start,
/* Insert new entry */
if (new_ext->ee_len) {
o_start[i] = *new_ext;
- ext4_ext_store_pblock(&o_start[i++], ext_pblock(new_ext));
+ ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext));
}
/* Insert end entry */
@@ -488,7 +488,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
start_ext.ee_len = end_ext.ee_len = 0;
new_ext.ee_block = cpu_to_le32(*from);
- ext4_ext_store_pblock(&new_ext, ext_pblock(dext));
+ ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext));
new_ext.ee_len = dext->ee_len;
new_ext_alen = ext4_ext_get_actual_len(&new_ext);
new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
@@ -553,7 +553,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
copy_extent_status(oext, &end_ext);
end_ext_alen = ext4_ext_get_actual_len(&end_ext);
ext4_ext_store_pblock(&end_ext,
- (ext_pblock(o_end) + oext_alen - end_ext_alen));
+ (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen));
end_ext.ee_block =
cpu_to_le32(le32_to_cpu(o_end->ee_block) +
oext_alen - end_ext_alen);
@@ -604,7 +604,7 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
/* When tmp_dext is too large, pick up the target range. */
diff = donor_off - le32_to_cpu(tmp_dext->ee_block);
- ext4_ext_store_pblock(tmp_dext, ext_pblock(tmp_dext) + diff);
+ ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff);
tmp_dext->ee_block =
cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff);
tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff);
@@ -613,7 +613,7 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
tmp_dext->ee_len = cpu_to_le16(max_count);
orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block);
- ext4_ext_store_pblock(tmp_oext, ext_pblock(tmp_oext) + orig_diff);
+ ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff);
/* Adjust extent length if donor extent is larger than orig */
if (ext4_ext_get_actual_len(tmp_dext) >
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 314c0d3b3fa9..92203b8a099f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -856,6 +856,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
struct buffer_head *bh_use[NAMEI_RA_SIZE];
struct buffer_head *bh, *ret = NULL;
ext4_lblk_t start, block, b;
+ const u8 *name = d_name->name;
int ra_max = 0; /* Number of bh's in the readahead
buffer, bh_use[] */
int ra_ptr = 0; /* Current index into readahead
@@ -870,6 +871,16 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
namelen = d_name->len;
if (namelen > EXT4_NAME_LEN)
return NULL;
+ if ((namelen <= 2) && (name[0] == '.') &&
+ (name[1] == '.' || name[1] == '0')) {
+ /*
+ * "." or ".." will only be in the first block
+ * NFS may look up ".."; "." should be handled by the VFS
+ */
+ block = start = 0;
+ nblocks = 1;
+ goto restart;
+ }
if (is_dx(dir)) {
bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
/*
@@ -960,55 +971,35 @@ cleanup_and_exit:
static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
struct ext4_dir_entry_2 **res_dir, int *err)
{
- struct super_block * sb;
+ struct super_block * sb = dir->i_sb;
struct dx_hash_info hinfo;
- u32 hash;
struct dx_frame frames[2], *frame;
- struct ext4_dir_entry_2 *de, *top;
struct buffer_head *bh;
ext4_lblk_t block;
int retval;
- int namelen = d_name->len;
- const u8 *name = d_name->name;
- sb = dir->i_sb;
- /* NFS may look up ".." - look at dx_root directory block */
- if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
- if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
- return NULL;
- } else {
- frame = frames;
- frame->bh = NULL; /* for dx_release() */
- frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
- dx_set_block(frame->at, 0); /* dx_root block is 0 */
- }
- hash = hinfo.hash;
+ if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
+ return NULL;
do {
block = dx_get_block(frame->at);
- if (!(bh = ext4_bread (NULL,dir, block, 0, err)))
+ if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
goto errout;
- de = (struct ext4_dir_entry_2 *) bh->b_data;
- top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
- EXT4_DIR_REC_LEN(0));
- for (; de < top; de = ext4_next_entry(de, sb->s_blocksize)) {
- int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
- + ((char *) de - bh->b_data);
-
- if (!ext4_check_dir_entry(dir, de, bh, off)) {
- brelse(bh);
- *err = ERR_BAD_DX_DIR;
- goto errout;
- }
- if (ext4_match(namelen, name, de)) {
- *res_dir = de;
- dx_release(frames);
- return bh;
- }
+ retval = search_dirblock(bh, dir, d_name,
+ block << EXT4_BLOCK_SIZE_BITS(sb),
+ res_dir);
+ if (retval == 1) { /* Success! */
+ dx_release(frames);
+ return bh;
}
brelse(bh);
+ if (retval == -1) {
+ *err = ERR_BAD_DX_DIR;
+ goto errout;
+ }
+
/* Check to see if we should continue to search */
- retval = ext4_htree_next_block(dir, hash, frame,
+ retval = ext4_htree_next_block(dir, hinfo.hash, frame,
frames, NULL);
if (retval < 0) {
ext4_warning(sb,
@@ -2312,7 +2303,7 @@ retry:
inode->i_ctime = ext4_current_time(inode);
ext4_inc_count(handle, inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
err = ext4_add_entry(handle, dentry, inode);
if (!err) {
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
new file mode 100644
index 000000000000..46a7d6a9d976
--- /dev/null
+++ b/fs/ext4/page-io.c
@@ -0,0 +1,430 @@
+/*
+ * linux/fs/ext4/page-io.c
+ *
+ * This contains the new page_io functions for ext4
+ *
+ * Written by Theodore Ts'o, 2010.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/jbd2.h>
+#include <linux/highuid.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include <linux/mpage.h>
+#include <linux/namei.h>
+#include <linux/uio.h>
+#include <linux/bio.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "ext4_jbd2.h"
+#include "xattr.h"
+#include "acl.h"
+#include "ext4_extents.h"
+
+static struct kmem_cache *io_page_cachep, *io_end_cachep;
+
+int __init ext4_init_pageio(void)
+{
+ io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
+ if (io_page_cachep == NULL)
+ return -ENOMEM;
+ io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
+ if (io_page_cachep == NULL) {
+ kmem_cache_destroy(io_page_cachep);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ext4_exit_pageio(void)
+{
+ kmem_cache_destroy(io_end_cachep);
+ kmem_cache_destroy(io_page_cachep);
+}
+
+void ext4_free_io_end(ext4_io_end_t *io)
+{
+ int i;
+
+ BUG_ON(!io);
+ if (io->page)
+ put_page(io->page);
+ for (i = 0; i < io->num_io_pages; i++) {
+ if (--io->pages[i]->p_count == 0) {
+ struct page *page = io->pages[i]->p_page;
+
+ end_page_writeback(page);
+ put_page(page);
+ kmem_cache_free(io_page_cachep, io->pages[i]);
+ }
+ }
+ io->num_io_pages = 0;
+ iput(io->inode);
+ kmem_cache_free(io_end_cachep, io);
+}
+
+/*
+ * check a range of space and convert unwritten extents to written.
+ */
+int ext4_end_io_nolock(ext4_io_end_t *io)
+{
+ struct inode *inode = io->inode;
+ loff_t offset = io->offset;
+ ssize_t size = io->size;
+ int ret = 0;
+
+ ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
+ "list->prev 0x%p\n",
+ io, inode->i_ino, io->list.next, io->list.prev);
+
+ if (list_empty(&io->list))
+ return ret;
+
+ if (!(io->flag & EXT4_IO_END_UNWRITTEN))
+ return ret;
+
+ ret = ext4_convert_unwritten_extents(inode, offset, size);
+ if (ret < 0) {
+ printk(KERN_EMERG "%s: failed to convert unwritten "
+ "extents to written extents, error is %d "
+ "io is still on inode %lu aio dio list\n",
+ __func__, ret, inode->i_ino);
+ return ret;
+ }
+
+ if (io->iocb)
+ aio_complete(io->iocb, io->result, 0);
+ /* clear the DIO AIO unwritten flag */
+ io->flag &= ~EXT4_IO_END_UNWRITTEN;
+ return ret;
+}
+
+/*
+ * work on completed aio dio IO, to convert unwritten extents to extents
+ */
+static void ext4_end_io_work(struct work_struct *work)
+{
+ ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
+ struct inode *inode = io->inode;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned long flags;
+ int ret;
+
+ mutex_lock(&inode->i_mutex);
+ ret = ext4_end_io_nolock(io);
+ if (ret < 0) {
+ mutex_unlock(&inode->i_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ if (!list_empty(&io->list))
+ list_del_init(&io->list);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+ mutex_unlock(&inode->i_mutex);
+ ext4_free_io_end(io);
+}
+
+ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
+{
+ ext4_io_end_t *io = NULL;
+
+ io = kmem_cache_alloc(io_end_cachep, flags);
+ if (io) {
+ memset(io, 0, sizeof(*io));
+ io->inode = igrab(inode);
+ BUG_ON(!io->inode);
+ INIT_WORK(&io->work, ext4_end_io_work);
+ INIT_LIST_HEAD(&io->list);
+ }
+ return io;
+}
+
+/*
+ * Print an buffer I/O error compatible with the fs/buffer.c. This
+ * provides compatibility with dmesg scrapers that look for a specific
+ * buffer I/O error message. We really need a unified error reporting
+ * structure to userspace ala Digital Unix's uerf system, but it's
+ * probably not going to happen in my lifetime, due to LKML politics...
+ */
+static void buffer_io_error(struct buffer_head *bh)
+{
+ char b[BDEVNAME_SIZE];
+ printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
+ bdevname(bh->b_bdev, b),
+ (unsigned long long)bh->b_blocknr);
+}
+
+static void ext4_end_bio(struct bio *bio, int error)
+{
+ ext4_io_end_t *io_end = bio->bi_private;
+ struct workqueue_struct *wq;
+ struct inode *inode;
+ unsigned long flags;
+ ext4_fsblk_t err_block;
+ int i;
+
+ BUG_ON(!io_end);
+ inode = io_end->inode;
+ bio->bi_private = NULL;
+ bio->bi_end_io = NULL;
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = 0;
+ err_block = bio->bi_sector >> (inode->i_blkbits - 9);
+ bio_put(bio);
+
+ if (!(inode->i_sb->s_flags & MS_ACTIVE)) {
+ pr_err("sb umounted, discard end_io request for inode %lu\n",
+ io_end->inode->i_ino);
+ ext4_free_io_end(io_end);
+ return;
+ }
+
+ if (error) {
+ io_end->flag |= EXT4_IO_END_ERROR;
+ ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+ "(offset %llu size %ld starting block %llu)",
+ inode->i_ino,
+ (unsigned long long) io_end->offset,
+ (long) io_end->size,
+ (unsigned long long) err_block);
+ }
+
+ for (i = 0; i < io_end->num_io_pages; i++) {
+ struct page *page = io_end->pages[i]->p_page;
+ struct buffer_head *bh, *head;
+ int partial_write = 0;
+
+ head = page_buffers(page);
+ if (error)
+ SetPageError(page);
+ BUG_ON(!head);
+ if (head->b_size == PAGE_CACHE_SIZE)
+ clear_buffer_dirty(head);
+ else {
+ loff_t offset;
+ loff_t io_end_offset = io_end->offset + io_end->size;
+
+ offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
+ bh = head;
+ do {
+ if ((offset >= io_end->offset) &&
+ (offset+bh->b_size <= io_end_offset)) {
+ if (error)
+ buffer_io_error(bh);
+
+ clear_buffer_dirty(bh);
+ }
+ if (buffer_delay(bh))
+ partial_write = 1;
+ else if (!buffer_mapped(bh))
+ clear_buffer_dirty(bh);
+ else if (buffer_dirty(bh))
+ partial_write = 1;
+ offset += bh->b_size;
+ bh = bh->b_this_page;
+ } while (bh != head);
+ }
+
+ if (--io_end->pages[i]->p_count == 0) {
+ struct page *page = io_end->pages[i]->p_page;
+
+ end_page_writeback(page);
+ put_page(page);
+ kmem_cache_free(io_page_cachep, io_end->pages[i]);
+ }
+
+ /*
+ * If this is a partial write which happened to make
+ * all buffers uptodate then we can optimize away a
+ * bogus readpage() for the next read(). Here we
+ * 'discover' whether the page went uptodate as a
+ * result of this (potentially partial) write.
+ */
+ if (!partial_write)
+ SetPageUptodate(page);
+ }
+
+ io_end->num_io_pages = 0;
+
+ /* Add the io_end to per-inode completed io list*/
+ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
+ list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
+ spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
+
+ wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
+ /* queue the work to convert unwritten extents to written */
+ queue_work(wq, &io_end->work);
+}
+
+void ext4_io_submit(struct ext4_io_submit *io)
+{
+ struct bio *bio = io->io_bio;
+
+ if (bio) {
+ bio_get(io->io_bio);
+ submit_bio(io->io_op, io->io_bio);
+ BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
+ bio_put(io->io_bio);
+ }
+ io->io_bio = 0;
+ io->io_op = 0;
+ io->io_end = 0;
+}
+
+static int io_submit_init(struct ext4_io_submit *io,
+ struct inode *inode,
+ struct writeback_control *wbc,
+ struct buffer_head *bh)
+{
+ ext4_io_end_t *io_end;
+ struct page *page = bh->b_page;
+ int nvecs = bio_get_nr_vecs(bh->b_bdev);
+ struct bio *bio;
+
+ io_end = ext4_init_io_end(inode, GFP_NOFS);
+ if (!io_end)
+ return -ENOMEM;
+ do {
+ bio = bio_alloc(GFP_NOIO, nvecs);
+ nvecs >>= 1;
+ } while (bio == NULL);
+
+ bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_bdev = bh->b_bdev;
+ bio->bi_private = io->io_end = io_end;
+ bio->bi_end_io = ext4_end_bio;
+
+ io_end->inode = inode;
+ io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
+
+ io->io_bio = bio;
+ io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?
+ WRITE_SYNC_PLUG : WRITE);
+ io->io_next_block = bh->b_blocknr;
+ return 0;
+}
+
+static int io_submit_add_bh(struct ext4_io_submit *io,
+ struct ext4_io_page *io_page,
+ struct inode *inode,
+ struct writeback_control *wbc,
+ struct buffer_head *bh)
+{
+ ext4_io_end_t *io_end;
+ int ret;
+
+ if (buffer_new(bh)) {
+ clear_buffer_new(bh);
+ unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+ }
+
+ if (!buffer_mapped(bh) || buffer_delay(bh)) {
+ if (!buffer_mapped(bh))
+ clear_buffer_dirty(bh);
+ if (io->io_bio)
+ ext4_io_submit(io);
+ return 0;
+ }
+
+ if (io->io_bio && bh->b_blocknr != io->io_next_block) {
+submit_and_retry:
+ ext4_io_submit(io);
+ }
+ if (io->io_bio == NULL) {
+ ret = io_submit_init(io, inode, wbc, bh);
+ if (ret)
+ return ret;
+ }
+ io_end = io->io_end;
+ if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
+ (io_end->pages[io_end->num_io_pages-1] != io_page))
+ goto submit_and_retry;
+ if (buffer_uninit(bh))
+ io->io_end->flag |= EXT4_IO_END_UNWRITTEN;
+ io->io_end->size += bh->b_size;
+ io->io_next_block++;
+ ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+ if (ret != bh->b_size)
+ goto submit_and_retry;
+ if ((io_end->num_io_pages == 0) ||
+ (io_end->pages[io_end->num_io_pages-1] != io_page)) {
+ io_end->pages[io_end->num_io_pages++] = io_page;
+ io_page->p_count++;
+ }
+ return 0;
+}
+
+int ext4_bio_write_page(struct ext4_io_submit *io,
+ struct page *page,
+ int len,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = page->mapping->host;
+ unsigned block_start, block_end, blocksize;
+ struct ext4_io_page *io_page;
+ struct buffer_head *bh, *head;
+ int ret = 0;
+
+ blocksize = 1 << inode->i_blkbits;
+
+ BUG_ON(PageWriteback(page));
+ set_page_writeback(page);
+ ClearPageError(page);
+
+ io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
+ if (!io_page) {
+ set_page_dirty(page);
+ unlock_page(page);
+ return -ENOMEM;
+ }
+ io_page->p_page = page;
+ io_page->p_count = 0;
+ get_page(page);
+
+ for (bh = head = page_buffers(page), block_start = 0;
+ bh != head || !block_start;
+ block_start = block_end, bh = bh->b_this_page) {
+ block_end = block_start + blocksize;
+ if (block_start >= len) {
+ clear_buffer_dirty(bh);
+ set_buffer_uptodate(bh);
+ continue;
+ }
+ ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
+ if (ret) {
+ /*
+ * We only get here on ENOMEM. Not much else
+ * we can do but mark the page as dirty, and
+ * better luck next time.
+ */
+ set_page_dirty(page);
+ break;
+ }
+ }
+ unlock_page(page);
+ /*
+ * If the page was truncated before we could do the writeback,
+ * or we had a memory allocation error while trying to write
+ * the first buffer head, we won't have submitted any pages for
+ * I/O. In that case we need to make sure we've cleared the
+ * PageWriteback bit from the page to prevent the system from
+ * wedging later on.
+ */
+ if (io_page->p_count == 0) {
+ put_page(page);
+ end_page_writeback(page);
+ kmem_cache_free(io_page_cachep, io_page);
+ }
+ return ret;
+}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ca5c8aa00a2f..dc963929de65 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -226,23 +226,13 @@ static int setup_new_group_blocks(struct super_block *sb,
}
/* Zero out all of the reserved backup group descriptor table blocks */
- for (i = 0, bit = gdblocks + 1, block = start + bit;
- i < reserved_gdb; i++, block++, bit++) {
- struct buffer_head *gdb;
-
- ext4_debug("clear reserved block %#04llx (+%d)\n", block, bit);
-
- if ((err = extend_or_restart_transaction(handle, 1, bh)))
- goto exit_bh;
+ ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ block, sbi->s_itb_per_group);
+ err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
+ GFP_NOFS);
+ if (err)
+ goto exit_bh;
- if (IS_ERR(gdb = bclean(handle, sb, block))) {
- err = PTR_ERR(gdb);
- goto exit_bh;
- }
- ext4_handle_dirty_metadata(handle, NULL, gdb);
- ext4_set_bit(bit, bh->b_data);
- brelse(gdb);
- }
ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
input->block_bitmap - start);
ext4_set_bit(input->block_bitmap - start, bh->b_data);
@@ -251,28 +241,18 @@ static int setup_new_group_blocks(struct super_block *sb,
ext4_set_bit(input->inode_bitmap - start, bh->b_data);
/* Zero out all of the inode table blocks */
- for (i = 0, block = input->inode_table, bit = block - start;
- i < sbi->s_itb_per_group; i++, bit++, block++) {
- struct buffer_head *it;
-
- ext4_debug("clear inode block %#04llx (+%d)\n", block, bit);
-
- if ((err = extend_or_restart_transaction(handle, 1, bh)))
- goto exit_bh;
-
- if (IS_ERR(it = bclean(handle, sb, block))) {
- err = PTR_ERR(it);
- goto exit_bh;
- }
- ext4_handle_dirty_metadata(handle, NULL, it);
- brelse(it);
- ext4_set_bit(bit, bh->b_data);
- }
+ block = input->inode_table;
+ ext4_debug("clear inode table blocks %#04llx -> %#04llx\n",
+ block, sbi->s_itb_per_group);
+ err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
+ if (err)
+ goto exit_bh;
if ((err = extend_or_restart_transaction(handle, 2, bh)))
goto exit_bh;
- mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data);
+ ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
+ bh->b_data);
ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
/* Mark unused entries in inode bitmap used */
@@ -283,8 +263,8 @@ static int setup_new_group_blocks(struct super_block *sb,
goto exit_journal;
}
- mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
- bh->b_data);
+ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+ bh->b_data);
ext4_handle_dirty_metadata(handle, NULL, bh);
exit_bh:
brelse(bh);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8ecc1e590303..0348ce066592 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -40,6 +40,9 @@
#include <linux/crc16.h>
#include <asm/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -49,8 +52,11 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>
-struct proc_dir_entry *ext4_proc_root;
+static struct proc_dir_entry *ext4_proc_root;
static struct kset *ext4_kset;
+struct ext4_lazy_init *ext4_li_info;
+struct mutex ext4_li_mtx;
+struct ext4_features *ext4_feat;
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
@@ -69,6 +75,8 @@ static void ext4_write_super(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
static int ext4_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt);
+static void ext4_destroy_lazyinit_thread(void);
+static void ext4_unregister_li_request(struct super_block *sb);
#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
static struct file_system_type ext3_fs_type = {
@@ -701,6 +709,7 @@ static void ext4_put_super(struct super_block *sb)
struct ext4_super_block *es = sbi->s_es;
int i, err;
+ ext4_unregister_li_request(sb);
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
flush_workqueue(sbi->dio_unwritten_wq);
@@ -717,6 +726,7 @@ static void ext4_put_super(struct super_block *sb)
ext4_abort(sb, "Couldn't clean up the journal");
}
+ del_timer(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
ext4_ext_release(sb);
@@ -1042,6 +1052,12 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
!(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY))
seq_puts(seq, ",block_validity");
+ if (!test_opt(sb, INIT_INODE_TABLE))
+ seq_puts(seq, ",noinit_inode_table");
+ else if (sbi->s_li_wait_mult)
+ seq_printf(seq, ",init_inode_table=%u",
+ (unsigned) sbi->s_li_wait_mult);
+
ext4_show_quota_options(seq, sb);
return 0;
@@ -1170,6 +1186,7 @@ static const struct super_operations ext4_sops = {
.quota_write = ext4_quota_write,
#endif
.bdev_try_to_free_page = bdev_try_to_free_page,
+ .trim_fs = ext4_trim_fs
};
static const struct super_operations ext4_nojournal_sops = {
@@ -1216,6 +1233,7 @@ enum {
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard,
+ Opt_init_inode_table, Opt_noinit_inode_table,
};
static const match_table_t tokens = {
@@ -1286,6 +1304,9 @@ static const match_table_t tokens = {
{Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
+ {Opt_init_inode_table, "init_itable=%u"},
+ {Opt_init_inode_table, "init_itable"},
+ {Opt_noinit_inode_table, "noinit_itable"},
{Opt_err, NULL},
};
@@ -1756,6 +1777,20 @@ set_qf_format:
case Opt_dioread_lock:
clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
break;
+ case Opt_init_inode_table:
+ set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+ if (args[0].from) {
+ if (match_int(&args[0], &option))
+ return 0;
+ } else
+ option = EXT4_DEF_LI_WAIT_MULT;
+ if (option < 0)
+ return 0;
+ sbi->s_li_wait_mult = option;
+ break;
+ case Opt_noinit_inode_table:
+ clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+ break;
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
@@ -1939,7 +1974,8 @@ int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
}
/* Called at mount-time, super-block is locked */
-static int ext4_check_descriptors(struct super_block *sb)
+static int ext4_check_descriptors(struct super_block *sb,
+ ext4_group_t *first_not_zeroed)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
@@ -1948,7 +1984,7 @@ static int ext4_check_descriptors(struct super_block *sb)
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
int flexbg_flag = 0;
- ext4_group_t i;
+ ext4_group_t i, grp = sbi->s_groups_count;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
flexbg_flag = 1;
@@ -1964,6 +2000,10 @@ static int ext4_check_descriptors(struct super_block *sb)
last_block = first_block +
(EXT4_BLOCKS_PER_GROUP(sb) - 1);
+ if ((grp == sbi->s_groups_count) &&
+ !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+ grp = i;
+
block_bitmap = ext4_block_bitmap(sb, gdp);
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2001,6 +2041,8 @@ static int ext4_check_descriptors(struct super_block *sb)
if (!flexbg_flag)
first_block += EXT4_BLOCKS_PER_GROUP(sb);
}
+ if (NULL != first_not_zeroed)
+ *first_not_zeroed = grp;
ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
@@ -2373,6 +2415,7 @@ static struct ext4_attr ext4_attr_##_name = { \
#define EXT4_ATTR(name, mode, show, store) \
static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
+#define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL)
#define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL)
#define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store)
#define EXT4_RW_ATTR_SBI_UI(name, elname) \
@@ -2409,6 +2452,16 @@ static struct attribute *ext4_attrs[] = {
NULL,
};
+/* Features this copy of ext4 supports */
+EXT4_INFO_ATTR(lazy_itable_init);
+EXT4_INFO_ATTR(batched_discard);
+
+static struct attribute *ext4_feat_attrs[] = {
+ ATTR_LIST(lazy_itable_init),
+ ATTR_LIST(batched_discard),
+ NULL,
+};
+
static ssize_t ext4_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -2437,7 +2490,6 @@ static void ext4_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister);
}
-
static const struct sysfs_ops ext4_attr_ops = {
.show = ext4_attr_show,
.store = ext4_attr_store,
@@ -2449,6 +2501,17 @@ static struct kobj_type ext4_ktype = {
.release = ext4_sb_release,
};
+static void ext4_feat_release(struct kobject *kobj)
+{
+ complete(&ext4_feat->f_kobj_unregister);
+}
+
+static struct kobj_type ext4_feat_ktype = {
+ .default_attrs = ext4_feat_attrs,
+ .sysfs_ops = &ext4_attr_ops,
+ .release = ext4_feat_release,
+};
+
/*
* Check whether this filesystem can be mounted based on
* the features present and the RDONLY/RDWR mount requested.
@@ -2539,6 +2602,372 @@ static void print_daily_error_info(unsigned long arg)
mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
}
+static void ext4_lazyinode_timeout(unsigned long data)
+{
+ struct task_struct *p = (struct task_struct *)data;
+ wake_up_process(p);
+}
+
+/* Find next suitable group and run ext4_init_inode_table */
+static int ext4_run_li_request(struct ext4_li_request *elr)
+{
+ struct ext4_group_desc *gdp = NULL;
+ ext4_group_t group, ngroups;
+ struct super_block *sb;
+ unsigned long timeout = 0;
+ int ret = 0;
+
+ sb = elr->lr_super;
+ ngroups = EXT4_SB(sb)->s_groups_count;
+
+ for (group = elr->lr_next_group; group < ngroups; group++) {
+ gdp = ext4_get_group_desc(sb, group, NULL);
+ if (!gdp) {
+ ret = 1;
+ break;
+ }
+
+ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+ break;
+ }
+
+ if (group == ngroups)
+ ret = 1;
+
+ if (!ret) {
+ timeout = jiffies;
+ ret = ext4_init_inode_table(sb, group,
+ elr->lr_timeout ? 0 : 1);
+ if (elr->lr_timeout == 0) {
+ timeout = jiffies - timeout;
+ if (elr->lr_sbi->s_li_wait_mult)
+ timeout *= elr->lr_sbi->s_li_wait_mult;
+ else
+ timeout *= 20;
+ elr->lr_timeout = timeout;
+ }
+ elr->lr_next_sched = jiffies + elr->lr_timeout;
+ elr->lr_next_group = group + 1;
+ }
+
+ return ret;
+}
+
+/*
+ * Remove lr_request from the list_request and free the
+ * request tructure. Should be called with li_list_mtx held
+ */
+static void ext4_remove_li_request(struct ext4_li_request *elr)
+{
+ struct ext4_sb_info *sbi;
+
+ if (!elr)
+ return;
+
+ sbi = elr->lr_sbi;
+
+ list_del(&elr->lr_request);
+ sbi->s_li_request = NULL;
+ kfree(elr);
+}
+
+static void ext4_unregister_li_request(struct super_block *sb)
+{
+ struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request;
+
+ if (!ext4_li_info)
+ return;
+
+ mutex_lock(&ext4_li_info->li_list_mtx);
+ ext4_remove_li_request(elr);
+ mutex_unlock(&ext4_li_info->li_list_mtx);
+}
+
+/*
+ * This is the function where ext4lazyinit thread lives. It walks
+ * through the request list searching for next scheduled filesystem.
+ * When such a fs is found, run the lazy initialization request
+ * (ext4_rn_li_request) and keep track of the time spend in this
+ * function. Based on that time we compute next schedule time of
+ * the request. When walking through the list is complete, compute
+ * next waking time and put itself into sleep.
+ */
+static int ext4_lazyinit_thread(void *arg)
+{
+ struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
+ struct list_head *pos, *n;
+ struct ext4_li_request *elr;
+ unsigned long next_wakeup;
+ DEFINE_WAIT(wait);
+ int ret;
+
+ BUG_ON(NULL == eli);
+
+ eli->li_timer.data = (unsigned long)current;
+ eli->li_timer.function = ext4_lazyinode_timeout;
+
+ eli->li_task = current;
+ wake_up(&eli->li_wait_task);
+
+cont_thread:
+ while (true) {
+ next_wakeup = MAX_JIFFY_OFFSET;
+
+ mutex_lock(&eli->li_list_mtx);
+ if (list_empty(&eli->li_request_list)) {
+ mutex_unlock(&eli->li_list_mtx);
+ goto exit_thread;
+ }
+
+ list_for_each_safe(pos, n, &eli->li_request_list) {
+ elr = list_entry(pos, struct ext4_li_request,
+ lr_request);
+
+ if (time_after_eq(jiffies, elr->lr_next_sched))
+ ret = ext4_run_li_request(elr);
+
+ if (ret) {
+ ret = 0;
+ ext4_remove_li_request(elr);
+ continue;
+ }
+
+ if (time_before(elr->lr_next_sched, next_wakeup))
+ next_wakeup = elr->lr_next_sched;
+ }
+ mutex_unlock(&eli->li_list_mtx);
+
+ if (freezing(current))
+ refrigerator();
+
+ if (time_after_eq(jiffies, next_wakeup)) {
+ cond_resched();
+ continue;
+ }
+
+ eli->li_timer.expires = next_wakeup;
+ add_timer(&eli->li_timer);
+ prepare_to_wait(&eli->li_wait_daemon, &wait,
+ TASK_INTERRUPTIBLE);
+ if (time_before(jiffies, next_wakeup))
+ schedule();
+ finish_wait(&eli->li_wait_daemon, &wait);
+ }
+
+exit_thread:
+ /*
+ * It looks like the request list is empty, but we need
+ * to check it under the li_list_mtx lock, to prevent any
+ * additions into it, and of course we should lock ext4_li_mtx
+ * to atomically free the list and ext4_li_info, because at
+ * this point another ext4 filesystem could be registering
+ * new one.
+ */
+ mutex_lock(&ext4_li_mtx);
+ mutex_lock(&eli->li_list_mtx);
+ if (!list_empty(&eli->li_request_list)) {
+ mutex_unlock(&eli->li_list_mtx);
+ mutex_unlock(&ext4_li_mtx);
+ goto cont_thread;
+ }
+ mutex_unlock(&eli->li_list_mtx);
+ del_timer_sync(&ext4_li_info->li_timer);
+ eli->li_task = NULL;
+ wake_up(&eli->li_wait_task);
+
+ kfree(ext4_li_info);
+ ext4_li_info = NULL;
+ mutex_unlock(&ext4_li_mtx);
+
+ return 0;
+}
+
+static void ext4_clear_request_list(void)
+{
+ struct list_head *pos, *n;
+ struct ext4_li_request *elr;
+
+ mutex_lock(&ext4_li_info->li_list_mtx);
+ if (list_empty(&ext4_li_info->li_request_list))
+ return;
+
+ list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
+ elr = list_entry(pos, struct ext4_li_request,
+ lr_request);
+ ext4_remove_li_request(elr);
+ }
+ mutex_unlock(&ext4_li_info->li_list_mtx);
+}
+
+static int ext4_run_lazyinit_thread(void)
+{
+ struct task_struct *t;
+
+ t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit");
+ if (IS_ERR(t)) {
+ int err = PTR_ERR(t);
+ ext4_clear_request_list();
+ del_timer_sync(&ext4_li_info->li_timer);
+ kfree(ext4_li_info);
+ ext4_li_info = NULL;
+ printk(KERN_CRIT "EXT4: error %d creating inode table "
+ "initialization thread\n",
+ err);
+ return err;
+ }
+ ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
+
+ wait_event(ext4_li_info->li_wait_task, ext4_li_info->li_task != NULL);
+ return 0;
+}
+
+/*
+ * Check whether it make sense to run itable init. thread or not.
+ * If there is at least one uninitialized inode table, return
+ * corresponding group number, else the loop goes through all
+ * groups and return total number of groups.
+ */
+static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
+{
+ ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
+ struct ext4_group_desc *gdp = NULL;
+
+ for (group = 0; group < ngroups; group++) {
+ gdp = ext4_get_group_desc(sb, group, NULL);
+ if (!gdp)
+ continue;
+
+ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+ break;
+ }
+
+ return group;
+}
+
+static int ext4_li_info_new(void)
+{
+ struct ext4_lazy_init *eli = NULL;
+
+ eli = kzalloc(sizeof(*eli), GFP_KERNEL);
+ if (!eli)
+ return -ENOMEM;
+
+ eli->li_task = NULL;
+ INIT_LIST_HEAD(&eli->li_request_list);
+ mutex_init(&eli->li_list_mtx);
+
+ init_waitqueue_head(&eli->li_wait_daemon);
+ init_waitqueue_head(&eli->li_wait_task);
+ init_timer(&eli->li_timer);
+ eli->li_state |= EXT4_LAZYINIT_QUIT;
+
+ ext4_li_info = eli;
+
+ return 0;
+}
+
+static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
+ ext4_group_t start)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_li_request *elr;
+ unsigned long rnd;
+
+ elr = kzalloc(sizeof(*elr), GFP_KERNEL);
+ if (!elr)
+ return NULL;
+
+ elr->lr_super = sb;
+ elr->lr_sbi = sbi;
+ elr->lr_next_group = start;
+
+ /*
+ * Randomize first schedule time of the request to
+ * spread the inode table initialization requests
+ * better.
+ */
+ get_random_bytes(&rnd, sizeof(rnd));
+ elr->lr_next_sched = jiffies + (unsigned long)rnd %
+ (EXT4_DEF_LI_MAX_START_DELAY * HZ);
+
+ return elr;
+}
+
+static int ext4_register_li_request(struct super_block *sb,
+ ext4_group_t first_not_zeroed)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_li_request *elr;
+ ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
+ int ret;
+
+ if (sbi->s_li_request != NULL)
+ return 0;
+
+ if (first_not_zeroed == ngroups ||
+ (sb->s_flags & MS_RDONLY) ||
+ !test_opt(sb, INIT_INODE_TABLE)) {
+ sbi->s_li_request = NULL;
+ return 0;
+ }
+
+ if (first_not_zeroed == ngroups) {
+ sbi->s_li_request = NULL;
+ return 0;
+ }
+
+ elr = ext4_li_request_new(sb, first_not_zeroed);
+ if (!elr)
+ return -ENOMEM;
+
+ mutex_lock(&ext4_li_mtx);
+
+ if (NULL == ext4_li_info) {
+ ret = ext4_li_info_new();
+ if (ret)
+ goto out;
+ }
+
+ mutex_lock(&ext4_li_info->li_list_mtx);
+ list_add(&elr->lr_request, &ext4_li_info->li_request_list);
+ mutex_unlock(&ext4_li_info->li_list_mtx);
+
+ sbi->s_li_request = elr;
+
+ if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
+ ret = ext4_run_lazyinit_thread();
+ if (ret)
+ goto out;
+ }
+out:
+ mutex_unlock(&ext4_li_mtx);
+ if (ret)
+ kfree(elr);
+ return ret;
+}
+
+/*
+ * We do not need to lock anything since this is called on
+ * module unload.
+ */
+static void ext4_destroy_lazyinit_thread(void)
+{
+ /*
+ * If thread exited earlier
+ * there's nothing to be done.
+ */
+ if (!ext4_li_info)
+ return;
+
+ ext4_clear_request_list();
+
+ while (ext4_li_info->li_task) {
+ wake_up(&ext4_li_info->li_wait_daemon);
+ wait_event(ext4_li_info->li_wait_task,
+ ext4_li_info->li_task == NULL);
+ }
+}
+
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
__releases(kernel_lock)
__acquires(kernel_lock)
@@ -2564,6 +2993,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
__u64 blocks_count;
int err;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
+ ext4_group_t first_not_zeroed;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
@@ -2624,6 +3054,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+ set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
if (def_mount_opts & EXT4_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
@@ -2901,7 +3332,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
- if (!ext4_check_descriptors(sb)) {
+ if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
goto failed_mount2;
}
@@ -3122,6 +3553,10 @@ no_journal:
goto failed_mount4;
}
+ err = ext4_register_li_request(sb, first_not_zeroed);
+ if (err)
+ goto failed_mount4;
+
sbi->s_kobj.kset = ext4_kset;
init_completion(&sbi->s_kobj_unregister);
err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL,
@@ -3461,7 +3896,7 @@ static int ext4_load_journal(struct super_block *sb,
EXT4_SB(sb)->s_journal = journal;
ext4_clear_journal_err(sb, es);
- if (journal_devnum &&
+ if (!really_read_only && journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
es->s_journal_dev = cpu_to_le32(journal_devnum);
@@ -3514,9 +3949,12 @@ static int ext4_commit_super(struct super_block *sb, int sync)
else
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
- ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
+ if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter))
+ ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeblocks_counter));
- es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
+ if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+ es->s_free_inodes_count =
+ cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
sb->s_dirt = 0;
BUFFER_TRACE(sbh, "marking dirty");
@@ -3835,6 +4273,19 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
enable_quota = 1;
}
}
+
+ /*
+ * Reinitialize lazy itable initialization thread based on
+ * current settings
+ */
+ if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
+ ext4_unregister_li_request(sb);
+ else {
+ ext4_group_t first_not_zeroed;
+ first_not_zeroed = ext4_has_uninit_itable(sb);
+ ext4_register_li_request(sb, first_not_zeroed);
+ }
+
ext4_setup_system_zone(sb);
if (sbi->s_journal == NULL)
ext4_commit_super(sb, 1);
@@ -4276,23 +4727,53 @@ static struct file_system_type ext4_fs_type = {
.fs_flags = FS_REQUIRES_DEV,
};
-static int __init init_ext4_fs(void)
+int __init ext4_init_feat_adverts(void)
+{
+ struct ext4_features *ef;
+ int ret = -ENOMEM;
+
+ ef = kzalloc(sizeof(struct ext4_features), GFP_KERNEL);
+ if (!ef)
+ goto out;
+
+ ef->f_kobj.kset = ext4_kset;
+ init_completion(&ef->f_kobj_unregister);
+ ret = kobject_init_and_add(&ef->f_kobj, &ext4_feat_ktype, NULL,
+ "features");
+ if (ret) {
+ kfree(ef);
+ goto out;
+ }
+
+ ext4_feat = ef;
+ ret = 0;
+out:
+ return ret;
+}
+
+static int __init ext4_init_fs(void)
{
int err;
ext4_check_flag_values();
- err = init_ext4_system_zone();
+ err = ext4_init_pageio();
if (err)
return err;
+ err = ext4_init_system_zone();
+ if (err)
+ goto out5;
ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
if (!ext4_kset)
goto out4;
ext4_proc_root = proc_mkdir("fs/ext4", NULL);
- err = init_ext4_mballoc();
+
+ err = ext4_init_feat_adverts();
+
+ err = ext4_init_mballoc();
if (err)
goto out3;
- err = init_ext4_xattr();
+ err = ext4_init_xattr();
if (err)
goto out2;
err = init_inodecache();
@@ -4303,38 +4784,46 @@ static int __init init_ext4_fs(void)
err = register_filesystem(&ext4_fs_type);
if (err)
goto out;
+
+ ext4_li_info = NULL;
+ mutex_init(&ext4_li_mtx);
return 0;
out:
unregister_as_ext2();
unregister_as_ext3();
destroy_inodecache();
out1:
- exit_ext4_xattr();
+ ext4_exit_xattr();
out2:
- exit_ext4_mballoc();
+ ext4_exit_mballoc();
out3:
+ kfree(ext4_feat);
remove_proc_entry("fs/ext4", NULL);
kset_unregister(ext4_kset);
out4:
- exit_ext4_system_zone();
+ ext4_exit_system_zone();
+out5:
+ ext4_exit_pageio();
return err;
}
-static void __exit exit_ext4_fs(void)
+static void __exit ext4_exit_fs(void)
{
+ ext4_destroy_lazyinit_thread();
unregister_as_ext2();
unregister_as_ext3();
unregister_filesystem(&ext4_fs_type);
destroy_inodecache();
- exit_ext4_xattr();
- exit_ext4_mballoc();
+ ext4_exit_xattr();
+ ext4_exit_mballoc();
remove_proc_entry("fs/ext4", NULL);
kset_unregister(ext4_kset);
- exit_ext4_system_zone();
+ ext4_exit_system_zone();
+ ext4_exit_pageio();
}
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
MODULE_DESCRIPTION("Fourth Extended Filesystem");
MODULE_LICENSE("GPL");
-module_init(init_ext4_fs)
-module_exit(exit_ext4_fs)
+module_init(ext4_init_fs)
+module_exit(ext4_exit_fs)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 3a8cd8dff1ad..fa4b899da4b3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1588,7 +1588,7 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
#undef BLOCK_HASH_SHIFT
int __init
-init_ext4_xattr(void)
+ext4_init_xattr(void)
{
ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
if (!ext4_xattr_cache)
@@ -1597,7 +1597,7 @@ init_ext4_xattr(void)
}
void
-exit_ext4_xattr(void)
+ext4_exit_xattr(void)
{
if (ext4_xattr_cache)
mb_cache_destroy(ext4_xattr_cache);
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 518e96e43905..1ef16520b950 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -83,8 +83,8 @@ extern void ext4_xattr_put_super(struct super_block *);
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle);
-extern int init_ext4_xattr(void);
-extern void exit_ext4_xattr(void);
+extern int __init ext4_init_xattr(void);
+extern void ext4_exit_xattr(void);
extern const struct xattr_handler *ext4_xattr_handlers[];
@@ -121,14 +121,14 @@ ext4_xattr_put_super(struct super_block *sb)
{
}
-static inline int
-init_ext4_xattr(void)
+static __init inline int
+ext4_init_xattr(void)
{
return 0;
}
static inline void
-exit_ext4_xattr(void)
+ext4_exit_xattr(void)
{
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index f8cc34f542c3..ecc8b3954ed6 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head)
* match the state "is the filp on a fasync list".
*
*/
-static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
+int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
int result = 0;
@@ -666,21 +666,31 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
return result;
}
+struct fasync_struct *fasync_alloc(void)
+{
+ return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
+}
+
/*
- * Add a fasync entry. Return negative on error, positive if
- * added, and zero if did nothing but change an existing one.
+ * NOTE! This can be used only for unused fasync entries:
+ * entries that actually got inserted on the fasync list
+ * need to be released by rcu - see fasync_remove_entry.
+ */
+void fasync_free(struct fasync_struct *new)
+{
+ kmem_cache_free(fasync_cache, new);
+}
+
+/*
+ * Insert a new entry into the fasync list. Return the pointer to the
+ * old one if we didn't use the new one.
*
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*/
-static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
{
- struct fasync_struct *new, *fa, **fp;
- int result = 0;
-
- new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
+ struct fasync_struct *fa, **fp;
spin_lock(&filp->f_lock);
spin_lock(&fasync_lock);
@@ -691,8 +701,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
spin_lock_irq(&fa->fa_lock);
fa->fa_fd = fd;
spin_unlock_irq(&fa->fa_lock);
-
- kmem_cache_free(fasync_cache, new);
goto out;
}
@@ -702,13 +710,39 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
new->fa_fd = fd;
new->fa_next = *fapp;
rcu_assign_pointer(*fapp, new);
- result = 1;
filp->f_flags |= FASYNC;
out:
spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
- return result;
+ return fa;
+}
+
+/*
+ * Add a fasync entry. Return negative on error, positive if
+ * added, and zero if did nothing but change an existing one.
+ */
+static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+{
+ struct fasync_struct *new;
+
+ new = fasync_alloc();
+ if (!new)
+ return -ENOMEM;
+
+ /*
+ * fasync_insert_entry() returns the old (update) entry if
+ * it existed.
+ *
+ * So free the (unused) new entry and return 0 to let the
+ * caller know that we didn't add any new fasync entries.
+ */
+ if (fasync_insert_entry(fd, filp, fapp, new)) {
+ fasync_free(new);
+ return 0;
+ }
+
+ return 1;
}
/*
diff --git a/fs/file_table.c b/fs/file_table.c
index a04bdd81c11c..c3dee381f1b4 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -60,7 +60,7 @@ static inline void file_free(struct file *f)
/*
* Return the total number of open files in the system
*/
-static int get_nr_files(void)
+static long get_nr_files(void)
{
return percpu_counter_read_positive(&nr_files);
}
@@ -68,7 +68,7 @@ static int get_nr_files(void)
/*
* Return the maximum number of open files in the system
*/
-int get_max_files(void)
+unsigned long get_max_files(void)
{
return files_stat.max_files;
}
@@ -82,7 +82,7 @@ int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
- return proc_dointvec(table, write, buffer, lenp, ppos);
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
#else
int proc_nr_files(ctl_table *table, int write,
@@ -105,7 +105,7 @@ int proc_nr_files(ctl_table *table, int write,
struct file *get_empty_filp(void)
{
const struct cred *cred = current_cred();
- static int old_max;
+ static long old_max;
struct file * f;
/*
@@ -140,8 +140,7 @@ struct file *get_empty_filp(void)
over:
/* Ran out of filps - report that */
if (get_nr_files() > old_max) {
- printk(KERN_INFO "VFS: file-max limit %d reached\n",
- get_max_files());
+ pr_info("VFS: file-max limit %lu reached\n", get_max_files());
old_max = get_nr_files();
}
goto fail;
@@ -487,7 +486,7 @@ retry:
void __init files_init(unsigned long mempages)
{
- int n;
+ unsigned long n;
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
@@ -498,9 +497,7 @@ void __init files_init(unsigned long mempages)
*/
n = (mempages * (PAGE_SIZE / 1024)) / 10;
- files_stat.max_files = n;
- if (files_stat.max_files < NR_FILE)
- files_stat.max_files = NR_FILE;
+ files_stat.max_files = max_t(unsigned long, n, NR_FILE);
files_defer_init();
lg_lock_init(files_lglock);
percpu_counter_init(&nr_files, 0);
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 79d1b4ea13e7..8c04eac5079d 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -260,6 +260,7 @@ vxfs_get_fake_inode(struct super_block *sbp, struct vxfs_inode_info *vip)
struct inode *ip = NULL;
if ((ip = new_inode(sbp))) {
+ ip->i_ino = get_next_ino();
vxfs_iinit(ip, vip);
ip->i_mapping->a_ops = &vxfs_aops;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ab38fef1c9a1..aed881a76b22 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -79,6 +79,11 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
return sb->s_bdi;
}
+static inline struct inode *wb_inode(struct list_head *head)
+{
+ return list_entry(head, struct inode, i_wb_list);
+}
+
static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
@@ -172,11 +177,11 @@ static void redirty_tail(struct inode *inode)
if (!list_empty(&wb->b_dirty)) {
struct inode *tail;
- tail = list_entry(wb->b_dirty.next, struct inode, i_list);
+ tail = wb_inode(wb->b_dirty.next);
if (time_before(inode->dirtied_when, tail->dirtied_when))
inode->dirtied_when = jiffies;
}
- list_move(&inode->i_list, &wb->b_dirty);
+ list_move(&inode->i_wb_list, &wb->b_dirty);
}
/*
@@ -186,7 +191,7 @@ static void requeue_io(struct inode *inode)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- list_move(&inode->i_list, &wb->b_more_io);
+ list_move(&inode->i_wb_list, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
@@ -227,14 +232,14 @@ static void move_expired_inodes(struct list_head *delaying_queue,
int do_sb_sort = 0;
while (!list_empty(delaying_queue)) {
- inode = list_entry(delaying_queue->prev, struct inode, i_list);
+ inode = wb_inode(delaying_queue->prev);
if (older_than_this &&
inode_dirtied_after(inode, *older_than_this))
break;
if (sb && sb != inode->i_sb)
do_sb_sort = 1;
sb = inode->i_sb;
- list_move(&inode->i_list, &tmp);
+ list_move(&inode->i_wb_list, &tmp);
}
/* just one sb in list, splice to dispatch_queue and we're done */
@@ -245,12 +250,11 @@ static void move_expired_inodes(struct list_head *delaying_queue,
/* Move inodes from one superblock together */
while (!list_empty(&tmp)) {
- inode = list_entry(tmp.prev, struct inode, i_list);
- sb = inode->i_sb;
+ sb = wb_inode(tmp.prev)->i_sb;
list_for_each_prev_safe(pos, node, &tmp) {
- inode = list_entry(pos, struct inode, i_list);
+ inode = wb_inode(pos);
if (inode->i_sb == sb)
- list_move(&inode->i_list, dispatch_queue);
+ list_move(&inode->i_wb_list, dispatch_queue);
}
}
}
@@ -408,16 +412,13 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* completion.
*/
redirty_tail(inode);
- } else if (atomic_read(&inode->i_count)) {
- /*
- * The inode is clean, inuse
- */
- list_move(&inode->i_list, &inode_in_use);
} else {
/*
- * The inode is clean, unused
+ * The inode is clean. At this point we either have
+ * a reference to the inode or it's on it's way out.
+ * No need to add it back to the LRU.
*/
- list_move(&inode->i_list, &inode_unused);
+ list_del_init(&inode->i_wb_list);
}
}
inode_sync_complete(inode);
@@ -465,8 +466,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
{
while (!list_empty(&wb->b_io)) {
long pages_skipped;
- struct inode *inode = list_entry(wb->b_io.prev,
- struct inode, i_list);
+ struct inode *inode = wb_inode(wb->b_io.prev);
if (inode->i_sb != sb) {
if (only_this_sb) {
@@ -487,10 +487,16 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
return 0;
}
- if (inode->i_state & (I_NEW | I_WILL_FREE)) {
+ /*
+ * Don't bother with new inodes or inodes beeing freed, first
+ * kind does not need peridic writeout yet, and for the latter
+ * kind writeout is handled by the freer.
+ */
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
requeue_io(inode);
continue;
}
+
/*
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
@@ -498,7 +504,6 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
if (inode_dirtied_after(inode, wbc->wb_start))
return 1;
- BUG_ON(inode->i_state & I_FREEING);
__iget(inode);
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
@@ -536,8 +541,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
queue_io(wb, wbc->older_than_this);
while (!list_empty(&wb->b_io)) {
- struct inode *inode = list_entry(wb->b_io.prev,
- struct inode, i_list);
+ struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb;
if (!pin_sb_for_writeback(sb)) {
@@ -582,7 +586,7 @@ static inline bool over_bground_thresh(void)
global_dirty_limits(&background_thresh, &dirty_thresh);
return (global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
+ global_page_state(NR_UNSTABLE_NFS) > background_thresh);
}
/*
@@ -675,8 +679,7 @@ static long wb_writeback(struct bdi_writeback *wb,
*/
spin_lock(&inode_lock);
if (!list_empty(&wb->b_more_io)) {
- inode = list_entry(wb->b_more_io.prev,
- struct inode, i_list);
+ inode = wb_inode(wb->b_more_io.prev);
trace_wbc_writeback_wait(&wbc, wb->bdi);
inode_wait_for_writeback(inode);
}
@@ -721,9 +724,13 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
return 0;
wb->last_old_flush = jiffies;
+ /*
+ * Add in the number of potentially dirty inodes, because each inode
+ * write can dirty pagecache in the underlying blockdev.
+ */
nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+ get_nr_dirty_inodes();
if (nr_pages) {
struct wb_writeback_work work = {
@@ -790,7 +797,7 @@ int bdi_writeback_thread(void *data)
struct backing_dev_info *bdi = wb->bdi;
long pages_written;
- current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ current->flags |= PF_SWAPWRITE;
set_freezable();
wb->last_active = jiffies;
@@ -962,7 +969,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* dirty list. Add blockdev inodes as well.
*/
if (!S_ISBLK(inode->i_mode)) {
- if (hlist_unhashed(&inode->i_hash))
+ if (inode_unhashed(inode))
goto out;
}
if (inode->i_state & I_FREEING)
@@ -990,7 +997,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
}
inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &bdi->wb.b_dirty);
+ list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
}
}
out:
@@ -1090,8 +1097,7 @@ void writeback_inodes_sb(struct super_block *sb)
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- work.nr_pages = nr_dirty + nr_unstable +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+ work.nr_pages = nr_dirty + nr_unstable + get_nr_dirty_inodes();
bdi_queue_work(sb->s_bdi, &work);
wait_for_completion(&done);
@@ -1198,3 +1204,23 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc)
return ret;
}
EXPORT_SYMBOL(sync_inode);
+
+/**
+ * sync_inode - write an inode to disk
+ * @inode: the inode to sync
+ * @wait: wait for I/O to complete.
+ *
+ * Write an inode to disk and adjust it's dirty state after completion.
+ *
+ * Note: only writes the actual inode, no associated data or other metadata.
+ */
+int sync_inode_metadata(struct inode *inode, int wait)
+{
+ struct writeback_control wbc = {
+ .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
+ .nr_to_write = 0, /* metadata-only */
+ };
+
+ return sync_inode(inode, &wbc);
+}
+EXPORT_SYMBOL(sync_inode_metadata);
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 7367e177186f..4eba07661e5c 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -222,6 +222,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
if (!inode)
return NULL;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = fc->user_id;
inode->i_gid = fc->group_id;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index cde755cca564..6e07696308dc 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -809,11 +809,9 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
int err;
struct page *page = *pagep;
- if (page && zeroing && count < PAGE_SIZE) {
- void *mapaddr = kmap_atomic(page, KM_USER1);
- memset(mapaddr, 0, PAGE_SIZE);
- kunmap_atomic(mapaddr, KM_USER1);
- }
+ if (page && zeroing && count < PAGE_SIZE)
+ clear_highpage(page);
+
while (count) {
if (cs->write && cs->pipebufs && page) {
return fuse_ref_page(cs, page, offset, count);
@@ -830,10 +828,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
}
}
if (page) {
- void *mapaddr = kmap_atomic(page, KM_USER1);
+ void *mapaddr = kmap_atomic(page, KM_USER0);
void *buf = mapaddr + offset;
offset += fuse_copy_do(cs, &buf, &count);
- kunmap_atomic(mapaddr, KM_USER1);
+ kunmap_atomic(mapaddr, KM_USER0);
} else
offset += fuse_copy_do(cs, NULL, &count);
}
@@ -1336,12 +1334,7 @@ out_finish:
static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
{
- int i;
-
- for (i = 0; i < req->num_pages; i++) {
- struct page *page = req->pages[i];
- page_cache_release(page);
- }
+ release_pages(req->pages, req->num_pages, 0);
}
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 6b24afb96aae..4f36f8832b9b 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -618,7 +618,6 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
struct gfs2_alloc *al = NULL;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
- unsigned to = from + len;
struct page *page;
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -691,7 +690,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
}
prepare_write:
- error = block_prepare_write(page, from, to, gfs2_block_map);
+ error = __block_write_begin(page, from, len, gfs2_block_map);
out:
if (error == 0)
return 0;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index f3b071f921aa..939739c7b3f9 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
* activity, but those code paths have their own higher-level
* throttling.
*/
- if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+ if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index aeafc233dc89..cade1acbcea9 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1219,7 +1219,6 @@ fail_sb:
fail_locking:
init_locking(sdp, &mount_gh, UNDO);
fail_lm:
- invalidate_inodes(sb);
gfs2_gl_hash_clear(sdp);
gfs2_lm_unmount(sdp);
fail_sys:
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 0534510200d5..12cbea7502c2 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -255,7 +255,7 @@ out_parent:
gfs2_holder_uninit(ghs);
gfs2_holder_uninit(ghs + 1);
if (!error) {
- atomic_inc(&inode->i_count);
+ ihold(inode);
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
}
@@ -1294,7 +1294,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
int error;
if (!page_has_buffers(page)) {
- error = block_prepare_write(page, from, to, gfs2_block_map);
+ error = __block_write_begin(page, from, to - from, gfs2_block_map);
if (unlikely(error))
return error;
@@ -1313,7 +1313,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
next += bh->b_size;
if (buffer_mapped(bh)) {
if (end) {
- error = block_prepare_write(page, start, end,
+ error = __block_write_begin(page, start, end - start,
gfs2_block_map);
if (unlikely(error))
return error;
@@ -1328,7 +1328,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
} while (next < to);
if (end) {
- error = block_prepare_write(page, start, end, gfs2_block_map);
+ error = __block_write_begin(page, start, end - start, gfs2_block_map);
if (unlikely(error))
return error;
empty_write_end(page, start, end);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 047d1176096c..2b2c4997430b 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -857,7 +857,6 @@ restart:
gfs2_clear_rgrpd(sdp);
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
- invalidate_inodes(sdp->sd_vfs);
gfs2_gl_hash_clear(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 4f55651aaa51..c8cffb81e849 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -147,8 +147,6 @@ struct hfs_sb_info {
u16 blockoffset;
int fs_div;
-
- struct hlist_head rsrc_inodes;
};
#define HFS_FLG_BITMAP_DIRTY 0
@@ -254,17 +252,6 @@ static inline void hfs_bitmap_dirty(struct super_block *sb)
sb->s_dirt = 1;
}
-static inline void hfs_buffer_sync(struct buffer_head *bh)
-{
- while (buffer_locked(bh)) {
- wait_on_buffer(bh);
- }
- if (buffer_dirty(bh)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
-}
-
#define sb_bread512(sb, sec, data) ({ \
struct buffer_head *__bh; \
sector_t __block; \
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 397b7adc7ce6..dffb4e996643 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -524,7 +524,7 @@ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
HFS_I(inode)->rsrc_inode = dir;
HFS_I(dir)->rsrc_inode = inode;
igrab(dir);
- hlist_add_head(&inode->i_hash, &HFS_SB(dir->i_sb)->rsrc_inodes);
+ hlist_add_fake(&inode->i_hash);
mark_inode_dirty(inode);
out:
d_add(dentry, inode);
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 86428f5ac991..1563d5ce5764 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -220,7 +220,7 @@ int hfs_mdb_get(struct super_block *sb)
mdb->drLsMod = hfs_mtime();
mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
- hfs_buffer_sync(HFS_SB(sb)->mdb_bh);
+ sync_dirty_buffer(HFS_SB(sb)->mdb_bh);
}
return 0;
@@ -287,7 +287,7 @@ void hfs_mdb_commit(struct super_block *sb)
HFS_SB(sb)->alt_mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT);
HFS_SB(sb)->alt_mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT);
mark_buffer_dirty(HFS_SB(sb)->alt_mdb_bh);
- hfs_buffer_sync(HFS_SB(sb)->alt_mdb_bh);
+ sync_dirty_buffer(HFS_SB(sb)->alt_mdb_bh);
}
if (test_and_clear_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags)) {
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 33254160f650..6ee1586f2334 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -382,7 +382,6 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
sb->s_fs_info = sbi;
- INIT_HLIST_HEAD(&sbi->rsrc_inodes);
res = -EINVAL;
if (!parse_options((char *)data, sbi)) {
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index d236d85ec9d7..9d59c0571f59 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -286,7 +286,7 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
inc_nlink(inode);
hfsplus_instantiate(dst_dentry, inode, cnid);
- atomic_inc(&inode->i_count);
+ ihold(inode);
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
sbi->file_count++;
@@ -317,8 +317,10 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
res = hfsplus_rename_cat(inode->i_ino,
dir, &dentry->d_name,
sbi->hidden_dir, &str);
- if (!res)
+ if (!res) {
inode->i_flags |= S_DEAD;
+ drop_nlink(inode);
+ }
goto out;
}
res = hfsplus_delete_cat(cnid, dir, &dentry->d_name);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 78449280dae0..8afd7e84f98d 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -211,7 +211,7 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
* appear hashed, but do not put on any lists. hlist_del()
* will work fine and require no locking.
*/
- inode->i_hash.pprev = &inode->i_hash.next;
+ hlist_add_fake(&inode->i_hash);
mark_inode_dirty(inode);
out:
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 5b4667e08ef7..40a85a3ded6e 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -92,7 +92,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
mark_inode_dirty(inode);
out_unlock_inode:
- mutex_lock(&inode->i_mutex);
+ mutex_unlock(&inode->i_mutex);
out_drop_write:
mnt_drop_write(file->f_path.mnt);
out:
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 7c232c1487ee..bf15a43016b9 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -91,7 +91,6 @@ extern int rename_file(char *from, char *to);
extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
long long *bfree_out, long long *bavail_out,
long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out);
+ void *fsid_out, int fsid_size, long *namelen_out);
#endif
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index f7dc9b5f9ef8..cd7c93917cc7 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -217,7 +217,7 @@ int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
err = do_statfs(dentry->d_sb->s_fs_info,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
- &sf->f_namelen, sf->f_spare);
+ &sf->f_namelen);
if (err)
return err;
sf->f_blocks = f_blocks;
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 6777aa06ce2c..d51a98384bc0 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -94,8 +94,7 @@ void *open_dir(char *path, int *err_out)
dir = opendir(path);
*err_out = errno;
- if (dir == NULL)
- return NULL;
+
return dir;
}
@@ -205,7 +204,7 @@ int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
if (attrs->ia_valid & HOSTFS_ATTR_MODE) {
if (fd >= 0) {
if (fchmod(fd, attrs->ia_mode) != 0)
- return (-errno);
+ return -errno;
} else if (chmod(file, attrs->ia_mode) != 0) {
return -errno;
}
@@ -364,8 +363,7 @@ int rename_file(char *from, char *to)
int do_statfs(char *root, long *bsize_out, long long *blocks_out,
long long *bfree_out, long long *bavail_out,
long long *files_out, long long *ffree_out,
- void *fsid_out, int fsid_size, long *namelen_out,
- long *spare_out)
+ void *fsid_out, int fsid_size, long *namelen_out)
{
struct statfs64 buf;
int err;
@@ -384,10 +382,6 @@ int do_statfs(char *root, long *bsize_out, long long *blocks_out,
sizeof(buf.f_fsid) > fsid_size ? fsid_size :
sizeof(buf.f_fsid));
*namelen_out = buf.f_namelen;
- spare_out[0] = buf.f_spare[0];
- spare_out[1] = buf.f_spare[1];
- spare_out[2] = buf.f_spare[2];
- spare_out[3] = buf.f_spare[3];
- spare_out[4] = buf.f_spare[4];
+
return 0;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 113eba3d3c38..b14be3f781c7 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -31,6 +31,7 @@
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/magic.h>
+#include <linux/migrate.h>
#include <asm/uaccess.h>
@@ -455,6 +456,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
inode = new_inode(sb);
if (inode) {
struct hugetlbfs_inode_info *info;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = uid;
inode->i_gid = gid;
@@ -573,6 +575,19 @@ static int hugetlbfs_set_page_dirty(struct page *page)
return 0;
}
+static int hugetlbfs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ int rc;
+
+ rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+ if (rc)
+ return rc;
+ migrate_page_copy(newpage, page);
+
+ return 0;
+}
+
static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
@@ -659,6 +674,7 @@ static const struct address_space_operations hugetlbfs_aops = {
.write_begin = hugetlbfs_write_begin,
.write_end = hugetlbfs_write_end,
.set_page_dirty = hugetlbfs_set_page_dirty,
+ .migratepage = hugetlbfs_migrate_page,
};
diff --git a/fs/inode.c b/fs/inode.c
index 86464332e590..ae2727ab0c3a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -24,11 +24,11 @@
#include <linux/mount.h>
#include <linux/async.h>
#include <linux/posix_acl.h>
+#include <linux/ima.h>
/*
* This is needed for the following functions:
* - inode_has_buffers
- * - invalidate_inode_buffers
* - invalidate_bdev
*
* FIXME: remove all knowledge of the buffer layer from this file
@@ -72,8 +72,7 @@ static unsigned int i_hash_shift __read_mostly;
* allowing for low-overhead inode sync() operations.
*/
-LIST_HEAD(inode_in_use);
-LIST_HEAD(inode_unused);
+static LIST_HEAD(inode_lru);
static struct hlist_head *inode_hashtable __read_mostly;
/*
@@ -103,8 +102,41 @@ static DECLARE_RWSEM(iprune_sem);
*/
struct inodes_stat_t inodes_stat;
+static struct percpu_counter nr_inodes __cacheline_aligned_in_smp;
+static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
+
static struct kmem_cache *inode_cachep __read_mostly;
+static inline int get_nr_inodes(void)
+{
+ return percpu_counter_sum_positive(&nr_inodes);
+}
+
+static inline int get_nr_inodes_unused(void)
+{
+ return percpu_counter_sum_positive(&nr_inodes_unused);
+}
+
+int get_nr_dirty_inodes(void)
+{
+ int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
+ return nr_dirty > 0 ? nr_dirty : 0;
+
+}
+
+/*
+ * Handle nr_inode sysctl
+ */
+#ifdef CONFIG_SYSCTL
+int proc_nr_inodes(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ inodes_stat.nr_inodes = get_nr_inodes();
+ inodes_stat.nr_unused = get_nr_inodes_unused();
+ return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+#endif
+
static void wake_up_inode(struct inode *inode)
{
/*
@@ -192,6 +224,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_fsnotify_mask = 0;
#endif
+ percpu_counter_inc(&nr_inodes);
+
return 0;
out:
return -ENOMEM;
@@ -232,11 +266,13 @@ void __destroy_inode(struct inode *inode)
if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
posix_acl_release(inode->i_default_acl);
#endif
+ percpu_counter_dec(&nr_inodes);
}
EXPORT_SYMBOL(__destroy_inode);
-void destroy_inode(struct inode *inode)
+static void destroy_inode(struct inode *inode)
{
+ BUG_ON(!list_empty(&inode->i_lru));
__destroy_inode(inode);
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
@@ -255,6 +291,8 @@ void inode_init_once(struct inode *inode)
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices);
+ INIT_LIST_HEAD(&inode->i_wb_list);
+ INIT_LIST_HEAD(&inode->i_lru);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
spin_lock_init(&inode->i_data.tree_lock);
spin_lock_init(&inode->i_data.i_mmap_lock);
@@ -281,14 +319,109 @@ static void init_once(void *foo)
*/
void __iget(struct inode *inode)
{
- if (atomic_inc_return(&inode->i_count) != 1)
- return;
+ atomic_inc(&inode->i_count);
+}
+
+/*
+ * get additional reference to inode; caller must already hold one.
+ */
+void ihold(struct inode *inode)
+{
+ WARN_ON(atomic_inc_return(&inode->i_count) < 2);
+}
+EXPORT_SYMBOL(ihold);
+
+static void inode_lru_list_add(struct inode *inode)
+{
+ if (list_empty(&inode->i_lru)) {
+ list_add(&inode->i_lru, &inode_lru);
+ percpu_counter_inc(&nr_inodes_unused);
+ }
+}
- if (!(inode->i_state & (I_DIRTY|I_SYNC)))
- list_move(&inode->i_list, &inode_in_use);
- inodes_stat.nr_unused--;
+static void inode_lru_list_del(struct inode *inode)
+{
+ if (!list_empty(&inode->i_lru)) {
+ list_del_init(&inode->i_lru);
+ percpu_counter_dec(&nr_inodes_unused);
+ }
+}
+
+static inline void __inode_sb_list_add(struct inode *inode)
+{
+ list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
}
+/**
+ * inode_sb_list_add - add inode to the superblock list of inodes
+ * @inode: inode to add
+ */
+void inode_sb_list_add(struct inode *inode)
+{
+ spin_lock(&inode_lock);
+ __inode_sb_list_add(inode);
+ spin_unlock(&inode_lock);
+}
+EXPORT_SYMBOL_GPL(inode_sb_list_add);
+
+static inline void __inode_sb_list_del(struct inode *inode)
+{
+ list_del_init(&inode->i_sb_list);
+}
+
+static unsigned long hash(struct super_block *sb, unsigned long hashval)
+{
+ unsigned long tmp;
+
+ tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
+ L1_CACHE_BYTES;
+ tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
+ return tmp & I_HASHMASK;
+}
+
+/**
+ * __insert_inode_hash - hash an inode
+ * @inode: unhashed inode
+ * @hashval: unsigned long value used to locate this object in the
+ * inode_hashtable.
+ *
+ * Add an inode to the inode hash for this superblock.
+ */
+void __insert_inode_hash(struct inode *inode, unsigned long hashval)
+{
+ struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
+
+ spin_lock(&inode_lock);
+ hlist_add_head(&inode->i_hash, b);
+ spin_unlock(&inode_lock);
+}
+EXPORT_SYMBOL(__insert_inode_hash);
+
+/**
+ * __remove_inode_hash - remove an inode from the hash
+ * @inode: inode to unhash
+ *
+ * Remove an inode from the superblock.
+ */
+static void __remove_inode_hash(struct inode *inode)
+{
+ hlist_del_init(&inode->i_hash);
+}
+
+/**
+ * remove_inode_hash - remove an inode from the hash
+ * @inode: inode to unhash
+ *
+ * Remove an inode from the superblock.
+ */
+void remove_inode_hash(struct inode *inode)
+{
+ spin_lock(&inode_lock);
+ hlist_del_init(&inode->i_hash);
+ spin_unlock(&inode_lock);
+}
+EXPORT_SYMBOL(remove_inode_hash);
+
void end_writeback(struct inode *inode)
{
might_sleep();
@@ -327,101 +460,113 @@ static void evict(struct inode *inode)
*/
static void dispose_list(struct list_head *head)
{
- int nr_disposed = 0;
-
while (!list_empty(head)) {
struct inode *inode;
- inode = list_first_entry(head, struct inode, i_list);
- list_del(&inode->i_list);
+ inode = list_first_entry(head, struct inode, i_lru);
+ list_del_init(&inode->i_lru);
evict(inode);
spin_lock(&inode_lock);
- hlist_del_init(&inode->i_hash);
- list_del_init(&inode->i_sb_list);
+ __remove_inode_hash(inode);
+ __inode_sb_list_del(inode);
spin_unlock(&inode_lock);
wake_up_inode(inode);
destroy_inode(inode);
- nr_disposed++;
}
- spin_lock(&inode_lock);
- inodes_stat.nr_inodes -= nr_disposed;
- spin_unlock(&inode_lock);
}
-/*
- * Invalidate all inodes for a device.
+/**
+ * evict_inodes - evict all evictable inodes for a superblock
+ * @sb: superblock to operate on
+ *
+ * Make sure that no inodes with zero refcount are retained. This is
+ * called by superblock shutdown after having MS_ACTIVE flag removed,
+ * so any inode reaching zero refcount during or after that call will
+ * be immediately evicted.
*/
-static int invalidate_list(struct list_head *head, struct list_head *dispose)
+void evict_inodes(struct super_block *sb)
{
- struct list_head *next;
- int busy = 0, count = 0;
-
- next = head->next;
- for (;;) {
- struct list_head *tmp = next;
- struct inode *inode;
+ struct inode *inode, *next;
+ LIST_HEAD(dispose);
- /*
- * We can reschedule here without worrying about the list's
- * consistency because the per-sb list of inodes must not
- * change during umount anymore, and because iprune_sem keeps
- * shrink_icache_memory() away.
- */
- cond_resched_lock(&inode_lock);
+ down_write(&iprune_sem);
- next = next->next;
- if (tmp == head)
- break;
- inode = list_entry(tmp, struct inode, i_sb_list);
- if (inode->i_state & I_NEW)
+ spin_lock(&inode_lock);
+ list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
+ if (atomic_read(&inode->i_count))
continue;
- invalidate_inode_buffers(inode);
- if (!atomic_read(&inode->i_count)) {
- list_move(&inode->i_list, dispose);
- WARN_ON(inode->i_state & I_NEW);
- inode->i_state |= I_FREEING;
- count++;
+
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ WARN_ON(1);
continue;
}
- busy = 1;
+
+ inode->i_state |= I_FREEING;
+
+ /*
+ * Move the inode off the IO lists and LRU once I_FREEING is
+ * set so that it won't get moved back on there if it is dirty.
+ */
+ list_move(&inode->i_lru, &dispose);
+ list_del_init(&inode->i_wb_list);
+ if (!(inode->i_state & (I_DIRTY | I_SYNC)))
+ percpu_counter_dec(&nr_inodes_unused);
}
- /* only unused inodes may be cached with i_count zero */
- inodes_stat.nr_unused -= count;
- return busy;
+ spin_unlock(&inode_lock);
+
+ dispose_list(&dispose);
+ up_write(&iprune_sem);
}
/**
- * invalidate_inodes - discard the inodes on a device
- * @sb: superblock
+ * invalidate_inodes - attempt to free all inodes on a superblock
+ * @sb: superblock to operate on
*
- * Discard all of the inodes for a given superblock. If the discard
- * fails because there are busy inodes then a non zero value is returned.
- * If the discard is successful all the inodes have been discarded.
+ * Attempts to free all inodes for a given superblock. If there were any
+ * busy inodes return a non-zero value, else zero.
*/
int invalidate_inodes(struct super_block *sb)
{
- int busy;
- LIST_HEAD(throw_away);
+ int busy = 0;
+ struct inode *inode, *next;
+ LIST_HEAD(dispose);
down_write(&iprune_sem);
+
spin_lock(&inode_lock);
- fsnotify_unmount_inodes(&sb->s_inodes);
- busy = invalidate_list(&sb->s_inodes, &throw_away);
+ list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+ continue;
+ if (atomic_read(&inode->i_count)) {
+ busy = 1;
+ continue;
+ }
+
+ inode->i_state |= I_FREEING;
+
+ /*
+ * Move the inode off the IO lists and LRU once I_FREEING is
+ * set so that it won't get moved back on there if it is dirty.
+ */
+ list_move(&inode->i_lru, &dispose);
+ list_del_init(&inode->i_wb_list);
+ if (!(inode->i_state & (I_DIRTY | I_SYNC)))
+ percpu_counter_dec(&nr_inodes_unused);
+ }
spin_unlock(&inode_lock);
- dispose_list(&throw_away);
+ dispose_list(&dispose);
up_write(&iprune_sem);
return busy;
}
-EXPORT_SYMBOL(invalidate_inodes);
static int can_unuse(struct inode *inode)
{
- if (inode->i_state)
+ if (inode->i_state & ~I_REFERENCED)
return 0;
if (inode_has_buffers(inode))
return 0;
@@ -433,22 +578,24 @@ static int can_unuse(struct inode *inode)
}
/*
- * Scan `goal' inodes on the unused list for freeable ones. They are moved to
- * a temporary list and then are freed outside inode_lock by dispose_list().
+ * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
+ * temporary list and then are freed outside inode_lock by dispose_list().
*
* Any inodes which are pinned purely because of attached pagecache have their
- * pagecache removed. We expect the final iput() on that inode to add it to
- * the front of the inode_unused list. So look for it there and if the
- * inode is still freeable, proceed. The right inode is found 99.9% of the
- * time in testing on a 4-way.
+ * pagecache removed. If the inode has metadata buffers attached to
+ * mapping->private_list then try to remove them.
*
- * If the inode has metadata buffers attached to mapping->private_list then
- * try to remove them.
+ * If the inode has the I_REFERENCED flag set, then it means that it has been
+ * used recently - the flag is set in iput_final(). When we encounter such an
+ * inode, clear the flag and move it to the back of the LRU so it gets another
+ * pass through the LRU before it gets reclaimed. This is necessary because of
+ * the fact we are doing lazy LRU updates to minimise lock contention so the
+ * LRU does not have strict ordering. Hence we don't want to reclaim inodes
+ * with this flag set because they are the inodes that are out of order.
*/
static void prune_icache(int nr_to_scan)
{
LIST_HEAD(freeable);
- int nr_pruned = 0;
int nr_scanned;
unsigned long reap = 0;
@@ -457,13 +604,26 @@ static void prune_icache(int nr_to_scan)
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
struct inode *inode;
- if (list_empty(&inode_unused))
+ if (list_empty(&inode_lru))
break;
- inode = list_entry(inode_unused.prev, struct inode, i_list);
+ inode = list_entry(inode_lru.prev, struct inode, i_lru);
- if (inode->i_state || atomic_read(&inode->i_count)) {
- list_move(&inode->i_list, &inode_unused);
+ /*
+ * Referenced or dirty inodes are still in use. Give them
+ * another pass through the LRU as we canot reclaim them now.
+ */
+ if (atomic_read(&inode->i_count) ||
+ (inode->i_state & ~I_REFERENCED)) {
+ list_del_init(&inode->i_lru);
+ percpu_counter_dec(&nr_inodes_unused);
+ continue;
+ }
+
+ /* recently referenced inodes get one more pass */
+ if (inode->i_state & I_REFERENCED) {
+ list_move(&inode->i_lru, &inode_lru);
+ inode->i_state &= ~I_REFERENCED;
continue;
}
if (inode_has_buffers(inode) || inode->i_data.nrpages) {
@@ -475,18 +635,23 @@ static void prune_icache(int nr_to_scan)
iput(inode);
spin_lock(&inode_lock);
- if (inode != list_entry(inode_unused.next,
- struct inode, i_list))
+ if (inode != list_entry(inode_lru.next,
+ struct inode, i_lru))
continue; /* wrong inode or list_empty */
if (!can_unuse(inode))
continue;
}
- list_move(&inode->i_list, &freeable);
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
- nr_pruned++;
+
+ /*
+ * Move the inode off the IO lists and LRU once I_FREEING is
+ * set so that it won't get moved back on there if it is dirty.
+ */
+ list_move(&inode->i_lru, &freeable);
+ list_del_init(&inode->i_wb_list);
+ percpu_counter_dec(&nr_inodes_unused);
}
- inodes_stat.nr_unused -= nr_pruned;
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap);
else
@@ -518,7 +683,7 @@ static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
return -1;
prune_icache(nr);
}
- return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
+ return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
}
static struct shrinker icache_shrinker = {
@@ -529,9 +694,6 @@ static struct shrinker icache_shrinker = {
static void __wait_on_freeing_inode(struct inode *inode);
/*
* Called with the inode lock held.
- * NOTE: we are not increasing the inode-refcount, you must call __iget()
- * by hand after calling find_inode now! This simplifies iunique and won't
- * add any additional branch in the common code.
*/
static struct inode *find_inode(struct super_block *sb,
struct hlist_head *head,
@@ -551,9 +713,10 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
- break;
+ __iget(inode);
+ return inode;
}
- return node ? inode : NULL;
+ return NULL;
}
/*
@@ -576,53 +739,49 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
- break;
+ __iget(inode);
+ return inode;
}
- return node ? inode : NULL;
-}
-
-static unsigned long hash(struct super_block *sb, unsigned long hashval)
-{
- unsigned long tmp;
-
- tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
- L1_CACHE_BYTES;
- tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
- return tmp & I_HASHMASK;
-}
-
-static inline void
-__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
- struct inode *inode)
-{
- inodes_stat.nr_inodes++;
- list_add(&inode->i_list, &inode_in_use);
- list_add(&inode->i_sb_list, &sb->s_inodes);
- if (head)
- hlist_add_head(&inode->i_hash, head);
+ return NULL;
}
-/**
- * inode_add_to_lists - add a new inode to relevant lists
- * @sb: superblock inode belongs to
- * @inode: inode to mark in use
+/*
+ * Each cpu owns a range of LAST_INO_BATCH numbers.
+ * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
+ * to renew the exhausted range.
*
- * When an inode is allocated it needs to be accounted for, added to the in use
- * list, the owning superblock and the inode hash. This needs to be done under
- * the inode_lock, so export a function to do this rather than the inode lock
- * itself. We calculate the hash list to add to here so it is all internal
- * which requires the caller to have already set up the inode number in the
- * inode to add.
+ * This does not significantly increase overflow rate because every CPU can
+ * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
+ * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
+ * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
+ * overflow rate by 2x, which does not seem too significant.
+ *
+ * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
+ * error if st_ino won't fit in target struct field. Use 32bit counter
+ * here to attempt to avoid that.
*/
-void inode_add_to_lists(struct super_block *sb, struct inode *inode)
+#define LAST_INO_BATCH 1024
+static DEFINE_PER_CPU(unsigned int, last_ino);
+
+unsigned int get_next_ino(void)
{
- struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);
+ unsigned int *p = &get_cpu_var(last_ino);
+ unsigned int res = *p;
- spin_lock(&inode_lock);
- __inode_add_to_lists(sb, head, inode);
- spin_unlock(&inode_lock);
+#ifdef CONFIG_SMP
+ if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+ static atomic_t shared_last_ino;
+ int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
+
+ res = next - LAST_INO_BATCH;
+ }
+#endif
+
+ *p = ++res;
+ put_cpu_var(last_ino);
+ return res;
}
-EXPORT_SYMBOL_GPL(inode_add_to_lists);
+EXPORT_SYMBOL(get_next_ino);
/**
* new_inode - obtain an inode
@@ -638,12 +797,6 @@ EXPORT_SYMBOL_GPL(inode_add_to_lists);
*/
struct inode *new_inode(struct super_block *sb)
{
- /*
- * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
- * error if st_ino won't fit in target struct field. Use 32bit counter
- * here to attempt to avoid that.
- */
- static unsigned int last_ino;
struct inode *inode;
spin_lock_prefetch(&inode_lock);
@@ -651,8 +804,7 @@ struct inode *new_inode(struct super_block *sb)
inode = alloc_inode(sb);
if (inode) {
spin_lock(&inode_lock);
- __inode_add_to_lists(sb, NULL, inode);
- inode->i_ino = ++last_ino;
+ __inode_sb_list_add(inode);
inode->i_state = 0;
spin_unlock(&inode_lock);
}
@@ -663,7 +815,7 @@ EXPORT_SYMBOL(new_inode);
void unlock_new_inode(struct inode *inode)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- if (inode->i_mode & S_IFDIR) {
+ if (S_ISDIR(inode->i_mode)) {
struct file_system_type *type = inode->i_sb->s_type;
/* Set new key only if filesystem hasn't already changed it */
@@ -720,7 +872,8 @@ static struct inode *get_new_inode(struct super_block *sb,
if (set(inode, data))
goto set_failed;
- __inode_add_to_lists(sb, head, inode);
+ hlist_add_head(&inode->i_hash, head);
+ __inode_sb_list_add(inode);
inode->i_state = I_NEW;
spin_unlock(&inode_lock);
@@ -735,7 +888,6 @@ static struct inode *get_new_inode(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
- __iget(old);
spin_unlock(&inode_lock);
destroy_inode(inode);
inode = old;
@@ -767,7 +919,8 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
old = find_inode_fast(sb, head, ino);
if (!old) {
inode->i_ino = ino;
- __inode_add_to_lists(sb, head, inode);
+ hlist_add_head(&inode->i_hash, head);
+ __inode_sb_list_add(inode);
inode->i_state = I_NEW;
spin_unlock(&inode_lock);
@@ -782,7 +935,6 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
- __iget(old);
spin_unlock(&inode_lock);
destroy_inode(inode);
inode = old;
@@ -791,6 +943,27 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
return inode;
}
+/*
+ * search the inode cache for a matching inode number.
+ * If we find one, then the inode number we are trying to
+ * allocate is not unique and so we should not use it.
+ *
+ * Returns 1 if the inode number is unique, 0 if it is not.
+ */
+static int test_inode_iunique(struct super_block *sb, unsigned long ino)
+{
+ struct hlist_head *b = inode_hashtable + hash(sb, ino);
+ struct hlist_node *node;
+ struct inode *inode;
+
+ hlist_for_each_entry(inode, node, b, i_hash) {
+ if (inode->i_ino == ino && inode->i_sb == sb)
+ return 0;
+ }
+
+ return 1;
+}
+
/**
* iunique - get a unique inode number
* @sb: superblock
@@ -812,19 +985,18 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
* error if st_ino won't fit in target struct field. Use 32bit counter
* here to attempt to avoid that.
*/
+ static DEFINE_SPINLOCK(iunique_lock);
static unsigned int counter;
- struct inode *inode;
- struct hlist_head *head;
ino_t res;
spin_lock(&inode_lock);
+ spin_lock(&iunique_lock);
do {
if (counter <= max_reserved)
counter = max_reserved + 1;
res = counter++;
- head = inode_hashtable + hash(sb, res);
- inode = find_inode_fast(sb, head, res);
- } while (inode != NULL);
+ } while (!test_inode_iunique(sb, res));
+ spin_unlock(&iunique_lock);
spin_unlock(&inode_lock);
return res;
@@ -876,7 +1048,6 @@ static struct inode *ifind(struct super_block *sb,
spin_lock(&inode_lock);
inode = find_inode(sb, head, test, data);
if (inode) {
- __iget(inode);
spin_unlock(&inode_lock);
if (likely(wait))
wait_on_inode(inode);
@@ -909,7 +1080,6 @@ static struct inode *ifind_fast(struct super_block *sb,
spin_lock(&inode_lock);
inode = find_inode_fast(sb, head, ino);
if (inode) {
- __iget(inode);
spin_unlock(&inode_lock);
wait_on_inode(inode);
return inode;
@@ -1095,7 +1265,7 @@ int insert_inode_locked(struct inode *inode)
__iget(old);
spin_unlock(&inode_lock);
wait_on_inode(old);
- if (unlikely(!hlist_unhashed(&old->i_hash))) {
+ if (unlikely(!inode_unhashed(old))) {
iput(old);
return -EBUSY;
}
@@ -1134,7 +1304,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
__iget(old);
spin_unlock(&inode_lock);
wait_on_inode(old);
- if (unlikely(!hlist_unhashed(&old->i_hash))) {
+ if (unlikely(!inode_unhashed(old))) {
iput(old);
return -EBUSY;
}
@@ -1143,36 +1313,6 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
}
EXPORT_SYMBOL(insert_inode_locked4);
-/**
- * __insert_inode_hash - hash an inode
- * @inode: unhashed inode
- * @hashval: unsigned long value used to locate this object in the
- * inode_hashtable.
- *
- * Add an inode to the inode hash for this superblock.
- */
-void __insert_inode_hash(struct inode *inode, unsigned long hashval)
-{
- struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
- spin_lock(&inode_lock);
- hlist_add_head(&inode->i_hash, head);
- spin_unlock(&inode_lock);
-}
-EXPORT_SYMBOL(__insert_inode_hash);
-
-/**
- * remove_inode_hash - remove an inode from the hash
- * @inode: inode to unhash
- *
- * Remove an inode from the superblock.
- */
-void remove_inode_hash(struct inode *inode)
-{
- spin_lock(&inode_lock);
- hlist_del_init(&inode->i_hash);
- spin_unlock(&inode_lock);
-}
-EXPORT_SYMBOL(remove_inode_hash);
int generic_delete_inode(struct inode *inode)
{
@@ -1187,7 +1327,7 @@ EXPORT_SYMBOL(generic_delete_inode);
*/
int generic_drop_inode(struct inode *inode)
{
- return !inode->i_nlink || hlist_unhashed(&inode->i_hash);
+ return !inode->i_nlink || inode_unhashed(inode);
}
EXPORT_SYMBOL_GPL(generic_drop_inode);
@@ -1213,10 +1353,11 @@ static void iput_final(struct inode *inode)
drop = generic_drop_inode(inode);
if (!drop) {
- if (!(inode->i_state & (I_DIRTY|I_SYNC)))
- list_move(&inode->i_list, &inode_unused);
- inodes_stat.nr_unused++;
if (sb->s_flags & MS_ACTIVE) {
+ inode->i_state |= I_REFERENCED;
+ if (!(inode->i_state & (I_DIRTY|I_SYNC))) {
+ inode_lru_list_add(inode);
+ }
spin_unlock(&inode_lock);
return;
}
@@ -1227,19 +1368,23 @@ static void iput_final(struct inode *inode)
spin_lock(&inode_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state &= ~I_WILL_FREE;
- inodes_stat.nr_unused--;
- hlist_del_init(&inode->i_hash);
+ __remove_inode_hash(inode);
}
- list_del_init(&inode->i_list);
- list_del_init(&inode->i_sb_list);
+
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
- inodes_stat.nr_inodes--;
+
+ /*
+ * Move the inode off the IO lists and LRU once I_FREEING is
+ * set so that it won't get moved back on there if it is dirty.
+ */
+ inode_lru_list_del(inode);
+ list_del_init(&inode->i_wb_list);
+
+ __inode_sb_list_del(inode);
spin_unlock(&inode_lock);
evict(inode);
- spin_lock(&inode_lock);
- hlist_del_init(&inode->i_hash);
- spin_unlock(&inode_lock);
+ remove_inode_hash(inode);
wake_up_inode(inode);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
destroy_inode(inode);
@@ -1503,6 +1648,8 @@ void __init inode_init(void)
SLAB_MEM_SPREAD),
init_once);
register_shrinker(&icache_shrinker);
+ percpu_counter_init(&nr_inodes, 0);
+ percpu_counter_init(&nr_inodes_unused, 0);
/* Hash may have been set up in inode_init_early */
if (!hashdist)
diff --git a/fs/internal.h b/fs/internal.h
index a6910e91cee8..ebad3b90752d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -101,3 +101,10 @@ extern void put_super(struct super_block *sb);
struct nameidata;
extern struct file *nameidata_to_filp(struct nameidata *);
extern void release_open_intent(struct nameidata *);
+
+/*
+ * inode.c
+ */
+extern int get_nr_dirty_inodes(void);
+extern int evict_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index f855ea4fc888..e92fdbb3bc3a 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -530,6 +530,41 @@ static int ioctl_fsthaw(struct file *filp)
return thaw_super(sb);
}
+static int ioctl_fstrim(struct file *filp, void __user *argp)
+{
+ struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+ struct fstrim_range range;
+ int ret = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* If filesystem doesn't support trim feature, return. */
+ if (sb->s_op->trim_fs == NULL)
+ return -EOPNOTSUPP;
+
+ /* If a blockdevice-backed filesystem isn't specified, return EINVAL. */
+ if (sb->s_bdev == NULL)
+ return -EINVAL;
+
+ if (argp == NULL) {
+ range.start = 0;
+ range.len = ULLONG_MAX;
+ range.minlen = 0;
+ } else if (copy_from_user(&range, argp, sizeof(range)))
+ return -EFAULT;
+
+ ret = sb->s_op->trim_fs(sb, &range);
+ if (ret < 0)
+ return ret;
+
+ if ((argp != NULL) &&
+ (copy_to_user(argp, &range, sizeof(range))))
+ return -EFAULT;
+
+ return 0;
+}
+
/*
* When you add any new common ioctls to the switches above and below
* please update compat_sys_ioctl() too.
@@ -580,6 +615,10 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
error = ioctl_fsthaw(filp);
break;
+ case FITRIM:
+ error = ioctl_fstrim(filp, argp);
+ break;
+
case FS_IOC_FIEMAP:
return ioctl_fiemap(filp, arg);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 09ff41a752a0..79cf7f616bbe 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -544,6 +544,34 @@ static unsigned int isofs_get_last_session(struct super_block *sb, s32 session)
}
/*
+ * Check if root directory is empty (has less than 3 files).
+ *
+ * Used to detect broken CDs where ISO root directory is empty but Joliet root
+ * directory is OK. If such CD has Rock Ridge extensions, they will be disabled
+ * (and Joliet used instead) or else no files would be visible.
+ */
+static bool rootdir_empty(struct super_block *sb, unsigned long block)
+{
+ int offset = 0, files = 0, de_len;
+ struct iso_directory_record *de;
+ struct buffer_head *bh;
+
+ bh = sb_bread(sb, block);
+ if (!bh)
+ return true;
+ while (files < 3) {
+ de = (struct iso_directory_record *) (bh->b_data + offset);
+ de_len = *(unsigned char *) de;
+ if (de_len == 0)
+ break;
+ files++;
+ offset += de_len;
+ }
+ brelse(bh);
+ return files < 3;
+}
+
+/*
* Initialize the superblock and read the root inode.
*
* Note: a check_disk_change() has been done immediately prior
@@ -843,6 +871,18 @@ root_found:
goto out_no_root;
/*
+ * Fix for broken CDs with Rock Ridge and empty ISO root directory but
+ * correct Joliet root directory.
+ */
+ if (sbi->s_rock == 1 && joliet_level &&
+ rootdir_empty(s, sbi->s_firstdatazone)) {
+ printk(KERN_NOTICE
+ "ISOFS: primary root directory is empty. "
+ "Disabling Rock Ridge and switching to Joliet.");
+ sbi->s_rock = 0;
+ }
+
+ /*
* If this disk has both Rock Ridge and Joliet on it, then we
* want to use Rock Ridge by default. This can be overridden
* by using the norock mount option. There is still one other
@@ -962,25 +1002,23 @@ static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
* or getblk() if they are not. Returns the number of blocks inserted
* (-ve == error.)
*/
-int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
+int isofs_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head **bh, unsigned long nblocks)
{
- unsigned long b_off;
+ unsigned long b_off = iblock;
unsigned offset, sect_size;
unsigned int firstext;
unsigned long nextblk, nextoff;
- long iblock = (long)iblock_s;
int section, rv, error;
struct iso_inode_info *ei = ISOFS_I(inode);
error = -EIO;
rv = 0;
- if (iblock < 0 || iblock != iblock_s) {
+ if (iblock != b_off) {
printk(KERN_DEBUG "%s: block number too large\n", __func__);
goto abort;
}
- b_off = iblock;
offset = 0;
firstext = ei->i_first_extent;
@@ -998,8 +1036,9 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
* I/O errors.
*/
if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
- printk(KERN_DEBUG "%s: block >= EOF (%ld, %ld)\n",
- __func__, iblock, (unsigned long) inode->i_size);
+ printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
+ __func__, b_off,
+ (unsigned long long)inode->i_size);
goto abort;
}
@@ -1025,9 +1064,9 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
if (++section > 100) {
printk(KERN_DEBUG "%s: More than 100 file sections ?!?"
" aborting...\n", __func__);
- printk(KERN_DEBUG "%s: block=%ld firstext=%u sect_size=%u "
+ printk(KERN_DEBUG "%s: block=%lu firstext=%u sect_size=%u "
"nextblk=%lu nextoff=%lu\n", __func__,
- iblock, firstext, (unsigned) sect_size,
+ b_off, firstext, (unsigned) sect_size,
nextblk, nextoff);
goto abort;
}
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 05a38b9c4c0e..e4b87bc1fa56 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -221,7 +221,7 @@ restart:
goto restart;
}
if (buffer_locked(bh)) {
- atomic_inc(&bh->b_count);
+ get_bh(bh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
wait_on_buffer(bh);
@@ -283,7 +283,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
int ret = 0;
if (buffer_locked(bh)) {
- atomic_inc(&bh->b_count);
+ get_bh(bh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
wait_on_buffer(bh);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 85a6883c0aca..34a4861c14b8 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -587,13 +587,13 @@ void journal_commit_transaction(journal_t *journal)
/* Bump b_count to prevent truncate from stumbling over
the shadowed buffer! @@@ This can go if we ever get
rid of the BJ_IO/BJ_Shadow pairing of buffers. */
- atomic_inc(&jh2bh(jh)->b_count);
+ get_bh(jh2bh(jh));
/* Make a temporary IO buffer with which to write it out
(this will requeue both the metadata buffer and the
temporary IO buffer). new_bh goes on BJ_IO*/
- set_bit(BH_JWrite, &jh2bh(jh)->b_state);
+ set_buffer_jwrite(jh2bh(jh));
/*
* akpm: journal_write_metadata_buffer() sets
* new_bh->b_transaction to commit_transaction.
@@ -603,7 +603,7 @@ void journal_commit_transaction(journal_t *journal)
JBUFFER_TRACE(jh, "ph3: write metadata");
flags = journal_write_metadata_buffer(commit_transaction,
jh, &new_jh, blocknr);
- set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
+ set_buffer_jwrite(jh2bh(new_jh));
wbuf[bufs++] = jh2bh(new_jh);
/* Record the new block's tag in the current descriptor
@@ -713,7 +713,7 @@ wait_for_iobuf:
shadowed buffer */
jh = commit_transaction->t_shadow_list->b_tprev;
bh = jh2bh(jh);
- clear_bit(BH_JWrite, &bh->b_state);
+ clear_buffer_jwrite(bh);
J_ASSERT_BH(bh, buffer_jbddirty(bh));
/* The metadata is now released for reuse, but we need
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 2c4b1f109da9..da1b5e4ffce1 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -36,6 +36,7 @@
#include <linux/poison.h>
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -84,6 +85,7 @@ EXPORT_SYMBOL(journal_force_commit);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
+static const char *journal_dev_name(journal_t *journal, char *buffer);
/*
* Helper function used to manage commit timeouts
@@ -439,7 +441,7 @@ int __log_start_commit(journal_t *journal, tid_t target)
*/
if (!tid_geq(journal->j_commit_request, target)) {
/*
- * We want a new commit: OK, mark the request and wakup the
+ * We want a new commit: OK, mark the request and wakeup the
* commit thread. We do _not_ do the commit ourselves.
*/
@@ -950,6 +952,8 @@ int journal_create(journal_t *journal)
if (err)
return err;
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ if (unlikely(!bh))
+ return -ENOMEM;
lock_buffer(bh);
memset (bh->b_data, 0, journal->j_blocksize);
BUFFER_TRACE(bh, "marking dirty");
@@ -1010,6 +1014,23 @@ void journal_update_superblock(journal_t *journal, int wait)
goto out;
}
+ if (buffer_write_io_error(bh)) {
+ char b[BDEVNAME_SIZE];
+ /*
+ * Oh, dear. A previous attempt to write the journal
+ * superblock failed. This could happen because the
+ * USB device was yanked out. Or it could happen to
+ * be a transient write error and maybe the block will
+ * be remapped. Nothing we can do but to retry the
+ * write and hope for the best.
+ */
+ printk(KERN_ERR "JBD: previous I/O error detected "
+ "for journal superblock update for %s.\n",
+ journal_dev_name(journal, b));
+ clear_buffer_write_io_error(bh);
+ set_buffer_uptodate(bh);
+ }
+
spin_lock(&journal->j_state_lock);
jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
journal->j_tail, journal->j_tail_sequence, journal->j_errno);
@@ -1021,9 +1042,17 @@ void journal_update_superblock(journal_t *journal, int wait)
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
- if (wait)
+ if (wait) {
sync_dirty_buffer(bh);
- else
+ if (buffer_write_io_error(bh)) {
+ char b[BDEVNAME_SIZE];
+ printk(KERN_ERR "JBD: I/O error detected "
+ "when updating journal superblock for %s.\n",
+ journal_dev_name(journal, b));
+ clear_buffer_write_io_error(bh);
+ set_buffer_uptodate(bh);
+ }
+ } else
write_dirty_buffer(bh, WRITE);
out:
@@ -1719,7 +1748,6 @@ static void journal_destroy_journal_head_cache(void)
static struct journal_head *journal_alloc_journal_head(void)
{
struct journal_head *ret;
- static unsigned long last_warning;
#ifdef CONFIG_JBD_DEBUG
atomic_inc(&nr_journal_heads);
@@ -1727,11 +1755,9 @@ static struct journal_head *journal_alloc_journal_head(void)
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
if (ret == NULL) {
jbd_debug(1, "out of memory for journal_head\n");
- if (time_after(jiffies, last_warning + 5*HZ)) {
- printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
- __func__);
- last_warning = jiffies;
- }
+ printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n",
+ __func__);
+
while (ret == NULL) {
yield();
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 81051dafebf5..5b43e96788e6 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -296,10 +296,10 @@ int journal_skip_recovery(journal_t *journal)
#ifdef CONFIG_JBD_DEBUG
int dropped = info.end_transaction -
be32_to_cpu(journal->j_superblock->s_sequence);
-#endif
jbd_debug(1,
"JBD: ignoring %d transaction%s from the journal.\n",
dropped, (dropped == 1) ? "" : "s");
+#endif
journal->j_transaction_sequence = ++info.end_transaction;
}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 5ae71e75a491..846a3f314111 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -293,9 +293,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
jbd_free_handle(handle);
current->journal_info = NULL;
handle = ERR_PTR(err);
- goto out;
}
-out:
return handle;
}
@@ -528,7 +526,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
transaction = handle->h_transaction;
journal = transaction->t_journal;
- jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+ jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
JBUFFER_TRACE(jh, "entry");
repeat:
@@ -713,7 +711,7 @@ done:
J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
"Possible IO failure.\n");
page = jh2bh(jh)->b_page;
- offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+ offset = offset_in_page(jh2bh(jh)->b_data);
source = kmap_atomic(page, KM_USER0);
memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
kunmap_atomic(source, KM_USER0);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 6571a056e55d..6a79fd0a1a32 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -299,6 +299,16 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
transaction->t_chp_stats.cs_forced_to_close++;
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
+ if (unlikely(journal->j_flags & JBD2_UNMOUNT))
+ /*
+ * The journal thread is dead; so starting and
+ * waiting for a commit to finish will cause
+ * us to wait for a _very_ long time.
+ */
+ printk(KERN_ERR "JBD2: %s: "
+ "Waiting for Godot: block %llu\n",
+ journal->j_devname,
+ (unsigned long long) bh->b_blocknr);
jbd2_log_start_commit(journal, tid);
jbd2_log_wait_commit(journal, tid);
ret = 1;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index bc6be8bda1cc..f3ad1598b201 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -26,7 +26,9 @@
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/bitops.h>
#include <trace/events/jbd2.h>
+#include <asm/system.h>
/*
* Default IO end handler for temporary BJ_IO buffer_heads.
@@ -201,7 +203,7 @@ static int journal_submit_data_buffers(journal_t *journal,
spin_lock(&journal->j_list_lock);
list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
mapping = jinode->i_vfs_inode->i_mapping;
- jinode->i_flags |= JI_COMMIT_RUNNING;
+ set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
spin_unlock(&journal->j_list_lock);
/*
* submit the inode data buffers. We use writepage
@@ -216,7 +218,8 @@ static int journal_submit_data_buffers(journal_t *journal,
spin_lock(&journal->j_list_lock);
J_ASSERT(jinode->i_transaction == commit_transaction);
commit_transaction->t_flushed_data_blocks = 1;
- jinode->i_flags &= ~JI_COMMIT_RUNNING;
+ clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
+ smp_mb__after_clear_bit();
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
}
spin_unlock(&journal->j_list_lock);
@@ -237,7 +240,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
/* For locking, see the comment in journal_submit_data_buffers() */
spin_lock(&journal->j_list_lock);
list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
- jinode->i_flags |= JI_COMMIT_RUNNING;
+ set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
spin_unlock(&journal->j_list_lock);
err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
if (err) {
@@ -253,7 +256,8 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
ret = err;
}
spin_lock(&journal->j_list_lock);
- jinode->i_flags &= ~JI_COMMIT_RUNNING;
+ clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
+ smp_mb__after_clear_bit();
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 262419f83d80..538417c1fdbb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -42,12 +42,14 @@
#include <linux/log2.h>
#include <linux/vmalloc.h>
#include <linux/backing-dev.h>
+#include <linux/bitops.h>
#define CREATE_TRACE_POINTS
#include <trace/events/jbd2.h>
#include <asm/uaccess.h>
#include <asm/page.h>
+#include <asm/system.h>
EXPORT_SYMBOL(jbd2_journal_extend);
EXPORT_SYMBOL(jbd2_journal_stop);
@@ -478,7 +480,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
*/
if (!tid_geq(journal->j_commit_request, target)) {
/*
- * We want a new commit: OK, mark the request and wakup the
+ * We want a new commit: OK, mark the request and wakeup the
* commit thread. We do _not_ do the commit ourselves.
*/
@@ -2210,7 +2212,7 @@ void jbd2_journal_release_jbd_inode(journal_t *journal,
restart:
spin_lock(&journal->j_list_lock);
/* Is commit writing out inode - we have to wait */
- if (jinode->i_flags & JI_COMMIT_RUNNING) {
+ if (test_bit(__JI_COMMIT_RUNNING, &jinode->i_flags)) {
wait_queue_head_t *wq;
DEFINE_WAIT_BIT(wait, &jinode->i_flags, __JI_COMMIT_RUNNING);
wq = bit_waitqueue(&jinode->i_flags, __JI_COMMIT_RUNNING);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index f3479d6e0a83..6bf0a242613e 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -156,6 +156,7 @@ alloc_transaction:
*/
repeat:
read_lock(&journal->j_state_lock);
+ BUG_ON(journal->j_flags & JBD2_UNMOUNT);
if (is_journal_aborted(journal) ||
(journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
read_unlock(&journal->j_state_lock);
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index ed78a3cf3cb0..79121aa5858b 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -289,7 +289,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
mutex_unlock(&f->sem);
d_instantiate(dentry, old_dentry->d_inode);
dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
- atomic_inc(&old_dentry->d_inode->i_count);
+ ihold(old_dentry->d_inode);
}
return ret;
}
@@ -864,7 +864,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
/* Might as well let the VFS know */
d_instantiate(new_dentry, old_dentry->d_inode);
- atomic_inc(&old_dentry->d_inode->i_count);
+ ihold(old_dentry->d_inode);
new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret;
}
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index f8332dc8eeb2..3a09423b6c22 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -497,7 +497,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
* appear hashed, but do not put on any lists. hlist_del()
* will work fine and require no locking.
*/
- ip->i_hash.pprev = &ip->i_hash.next;
+ hlist_add_fake(&ip->i_hash);
return (ip);
}
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index d945ea76b445..9466957ec841 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1279,7 +1279,7 @@ int txCommit(tid_t tid, /* transaction identifier */
* lazy commit thread finishes processing
*/
if (tblk->xflag & COMMIT_DELETE) {
- atomic_inc(&tblk->u.ip->i_count);
+ ihold(tblk->u.ip);
/*
* Avoid a rare deadlock
*
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a9cf8e8675be..231ca4af9bce 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -839,7 +839,7 @@ static int jfs_link(struct dentry *old_dentry,
ip->i_ctime = CURRENT_TIME;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
mark_inode_dirty(dir);
- atomic_inc(&ip->i_count);
+ ihold(ip);
iplist[0] = ip;
iplist[1] = dir;
diff --git a/fs/libfs.c b/fs/libfs.c
index 62baa0387d6e..304a5132ca27 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -255,7 +255,7 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
inc_nlink(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
dget(dentry);
d_instantiate(dentry, inode);
return 0;
@@ -892,10 +892,6 @@ EXPORT_SYMBOL_GPL(generic_fh_to_parent);
*/
int generic_file_fsync(struct file *file, int datasync)
{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0, /* metadata-only; caller takes care of data */
- };
struct inode *inode = file->f_mapping->host;
int err;
int ret;
@@ -906,7 +902,7 @@ int generic_file_fsync(struct file *file, int datasync)
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
return ret;
- err = sync_inode(inode, &wbc);
+ err = sync_inode_metadata(inode, 1);
if (ret == 0)
ret = err;
return ret;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index bb464d12104c..25e21e4023b2 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -353,6 +353,7 @@ nlm_bind_host(struct nlm_host *host)
.to_retries = 5U,
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = host->h_proto,
.address = nlm_addr(host),
.addrsize = host->h_addrlen,
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e3015464fbab..e0c918949644 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -69,6 +69,7 @@ static struct rpc_clnt *nsm_create(void)
.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = XPRT_TRANSPORT_UDP,
.address = (struct sockaddr *)&sin,
.addrsize = sizeof(sin),
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index f1bacf1a0391..abfff9d7979d 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -22,7 +22,6 @@
#include <linux/in.h>
#include <linux/uio.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -130,15 +129,6 @@ lockd(void *vrqstp)
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
- /*
- * FIXME: it would be nice if lockd didn't spend its entire life
- * running under the BKL. At the very least, it would be good to
- * have someone clarify what it's intended to protect here. I've
- * seen some handwavy posts about posix locking needing to be
- * done under the BKL, but it's far from clear.
- */
- lock_kernel();
-
if (!nlm_timeout)
nlm_timeout = LOCKD_DFLT_TIMEO;
nlmsvc_timeout = nlm_timeout * HZ;
@@ -195,7 +185,6 @@ lockd(void *vrqstp)
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
- unlock_kernel();
return 0;
}
@@ -206,7 +195,7 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name,
xprt = svc_find_xprt(serv, name, family, 0);
if (xprt == NULL)
- return svc_create_xprt(serv, name, family, port,
+ return svc_create_xprt(serv, name, &init_net, family, port,
SVC_SOCK_DEFAULTS);
svc_xprt_put(xprt);
return 0;
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 031c6569a134..a336e832475d 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -230,9 +230,7 @@ static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
static void nlm4svc_callback_release(void *data)
{
- lock_kernel();
nlm_release_call(data);
- unlock_kernel();
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 84055d31bfc5..c462d346acbd 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -52,12 +52,13 @@ static const struct rpc_call_ops nlmsvc_grant_ops;
* The list of blocked locks to retry
*/
static LIST_HEAD(nlm_blocked);
+static DEFINE_SPINLOCK(nlm_blocked_lock);
/*
* Insert a blocked lock into the global list
*/
static void
-nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
+nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
{
struct nlm_block *b;
struct list_head *pos;
@@ -87,6 +88,13 @@ nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
block->b_when = when;
}
+static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
+{
+ spin_lock(&nlm_blocked_lock);
+ nlmsvc_insert_block_locked(block, when);
+ spin_unlock(&nlm_blocked_lock);
+}
+
/*
* Remove a block from the global list
*/
@@ -94,7 +102,9 @@ static inline void
nlmsvc_remove_block(struct nlm_block *block)
{
if (!list_empty(&block->b_list)) {
+ spin_lock(&nlm_blocked_lock);
list_del_init(&block->b_list);
+ spin_unlock(&nlm_blocked_lock);
nlmsvc_release_block(block);
}
}
@@ -651,7 +661,7 @@ static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
struct nlm_block *block;
int rc = -ENOENT;
- lock_kernel();
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
@@ -665,13 +675,13 @@ static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
} else if (result == 0)
block->b_granted = 1;
- nlmsvc_insert_block(block, 0);
+ nlmsvc_insert_block_locked(block, 0);
svc_wake_up(block->b_daemon);
rc = 0;
break;
}
}
- unlock_kernel();
+ spin_unlock(&nlm_blocked_lock);
if (rc == -ENOENT)
printk(KERN_WARNING "lockd: grant for unknown block\n");
return rc;
@@ -690,14 +700,16 @@ nlmsvc_notify_blocked(struct file_lock *fl)
struct nlm_block *block;
dprintk("lockd: VFS unblock notification for block %p\n", fl);
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
- nlmsvc_insert_block(block, 0);
+ nlmsvc_insert_block_locked(block, 0);
+ spin_unlock(&nlm_blocked_lock);
svc_wake_up(block->b_daemon);
return;
}
}
-
+ spin_unlock(&nlm_blocked_lock);
printk(KERN_WARNING "lockd: notification for unknown block!\n");
}
@@ -803,7 +815,7 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
dprintk("lockd: GRANT_MSG RPC callback\n");
- lock_kernel();
+ spin_lock(&nlm_blocked_lock);
/* if the block is not on a list at this point then it has
* been invalidated. Don't try to requeue it.
*
@@ -825,19 +837,20 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
/* Call was successful, now wait for client callback */
timeout = 60 * HZ;
}
- nlmsvc_insert_block(block, timeout);
+ nlmsvc_insert_block_locked(block, timeout);
svc_wake_up(block->b_daemon);
out:
- unlock_kernel();
+ spin_unlock(&nlm_blocked_lock);
}
+/*
+ * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
+ * .rpc_release rpc_call_op
+ */
static void nlmsvc_grant_release(void *data)
{
struct nlm_rqst *call = data;
-
- lock_kernel();
nlmsvc_release_block(call->a_block);
- unlock_kernel();
}
static const struct rpc_call_ops nlmsvc_grant_ops = {
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0f2ab741ae7c..c3069f38d602 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -260,9 +260,7 @@ static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
static void nlmsvc_callback_release(void *data)
{
- lock_kernel();
nlm_release_call(data);
- unlock_kernel();
}
static const struct rpc_call_ops nlmsvc_callback_ops = {
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index d0ef94cfb3da..1ca0679c80bf 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -170,6 +170,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
again:
file->f_locks = 0;
+ lock_flocks(); /* protects i_flock list */
for (fl = inode->i_flock; fl; fl = fl->fl_next) {
if (fl->fl_lmops != &nlmsvc_lock_operations)
continue;
@@ -181,6 +182,7 @@ again:
if (match(lockhost, host)) {
struct file_lock lock = *fl;
+ unlock_flocks();
lock.fl_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
@@ -192,6 +194,7 @@ again:
goto again;
}
}
+ unlock_flocks();
return 0;
}
@@ -226,10 +229,14 @@ nlm_file_inuse(struct nlm_file *file)
if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
return 1;
+ lock_flocks();
for (fl = inode->i_flock; fl; fl = fl->fl_next) {
- if (fl->fl_lmops == &nlmsvc_lock_operations)
+ if (fl->fl_lmops == &nlmsvc_lock_operations) {
+ unlock_flocks();
return 1;
+ }
}
+ unlock_flocks();
file->f_locks = 0;
return 0;
}
diff --git a/fs/locks.c b/fs/locks.c
index 8b2b6ad56a09..50ec15927aab 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,6 +142,7 @@ int lease_break_time = 45;
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
+static DEFINE_SPINLOCK(file_lock_lock);
/*
* Protects the two list heads above, plus the inode->i_flock list
@@ -149,23 +150,24 @@ static LIST_HEAD(blocked_list);
*/
void lock_flocks(void)
{
- lock_kernel();
+ spin_lock(&file_lock_lock);
}
EXPORT_SYMBOL_GPL(lock_flocks);
void unlock_flocks(void)
{
- unlock_kernel();
+ spin_unlock(&file_lock_lock);
}
EXPORT_SYMBOL_GPL(unlock_flocks);
static struct kmem_cache *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
-static struct file_lock *locks_alloc_lock(void)
+struct file_lock *locks_alloc_lock(void)
{
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
}
+EXPORT_SYMBOL_GPL(locks_alloc_lock);
void locks_release_private(struct file_lock *fl)
{
@@ -1365,7 +1367,6 @@ int fcntl_getlease(struct file *filp)
int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
- struct file_lock *new_fl = NULL;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
int error, rdlease_count = 0, wrlease_count = 0;
@@ -1385,11 +1386,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
lease = *flp;
if (arg != F_UNLCK) {
- error = -ENOMEM;
- new_fl = locks_alloc_lock();
- if (new_fl == NULL)
- goto out;
-
error = -EAGAIN;
if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
goto out;
@@ -1434,7 +1430,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
goto out;
}
- error = 0;
if (arg == F_UNLCK)
goto out;
@@ -1442,15 +1437,11 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
if (!leases_enable)
goto out;
- locks_copy_lock(new_fl, lease);
- locks_insert_lock(before, new_fl);
-
- *flp = new_fl;
+ locks_insert_lock(before, lease);
return 0;
out:
- if (new_fl != NULL)
- locks_free_lock(new_fl);
+ locks_free_lock(lease);
return error;
}
EXPORT_SYMBOL(generic_setlease);
@@ -1514,26 +1505,38 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
*/
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
- struct file_lock fl, *flp = &fl;
+ struct file_lock *fl;
+ struct fasync_struct *new;
struct inode *inode = filp->f_path.dentry->d_inode;
int error;
- locks_init_lock(&fl);
- error = lease_init(filp, arg, &fl);
- if (error)
- return error;
+ fl = lease_alloc(filp, arg);
+ if (IS_ERR(fl))
+ return PTR_ERR(fl);
+ new = fasync_alloc();
+ if (!new) {
+ locks_free_lock(fl);
+ return -ENOMEM;
+ }
lock_flocks();
-
- error = __vfs_setlease(filp, arg, &flp);
+ error = __vfs_setlease(filp, arg, &fl);
if (error || arg == F_UNLCK)
goto out_unlock;
- error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
+ /*
+ * fasync_insert_entry() returns the old entry if any.
+ * If there was no old entry, then it used 'new' and
+ * inserted it into the fasync list. Clear new so that
+ * we don't release it here.
+ */
+ if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
+ new = NULL;
+
if (error < 0) {
/* remove lease just inserted by setlease */
- flp->fl_type = F_UNLCK | F_INPROGRESS;
- flp->fl_break_time = jiffies - 10;
+ fl->fl_type = F_UNLCK | F_INPROGRESS;
+ fl->fl_break_time = jiffies - 10;
time_out_leases(inode);
goto out_unlock;
}
@@ -1541,6 +1544,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
out_unlock:
unlock_flocks();
+ if (new)
+ fasync_free(new);
return error;
}
@@ -2109,7 +2114,7 @@ EXPORT_SYMBOL_GPL(vfs_cancel_lock);
#include <linux/seq_file.h>
static void lock_get_status(struct seq_file *f, struct file_lock *fl,
- int id, char *pfx)
+ loff_t id, char *pfx)
{
struct inode *inode = NULL;
unsigned int fl_pid;
@@ -2122,7 +2127,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
if (fl->fl_file != NULL)
inode = fl->fl_file->f_path.dentry->d_inode;
- seq_printf(f, "%d:%s ", id, pfx);
+ seq_printf(f, "%lld:%s ", id, pfx);
if (IS_POSIX(fl)) {
seq_printf(f, "%6s %s ",
(fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
@@ -2185,24 +2190,27 @@ static int locks_show(struct seq_file *f, void *v)
fl = list_entry(v, struct file_lock, fl_link);
- lock_get_status(f, fl, (long)f->private, "");
+ lock_get_status(f, fl, *((loff_t *)f->private), "");
list_for_each_entry(bfl, &fl->fl_block, fl_block)
- lock_get_status(f, bfl, (long)f->private, " ->");
+ lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
- f->private++;
return 0;
}
static void *locks_start(struct seq_file *f, loff_t *pos)
{
+ loff_t *p = f->private;
+
lock_flocks();
- f->private = (void *)1;
+ *p = (*pos + 1);
return seq_list_start(&file_lock_list, *pos);
}
static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
{
+ loff_t *p = f->private;
+ ++*p;
return seq_list_next(v, &file_lock_list, pos);
}
@@ -2220,14 +2228,14 @@ static const struct seq_operations locks_seq_operations = {
static int locks_open(struct inode *inode, struct file *filp)
{
- return seq_open(filp, &locks_seq_operations);
+ return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
}
static const struct file_operations proc_locks_operations = {
.open = locks_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = seq_release_private,
};
static int __init proc_locks_init(void)
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 1eb4e89e045b..409dfd65e9a1 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -569,7 +569,7 @@ static int logfs_link(struct dentry *old_dentry, struct inode *dir,
return -EMLINK;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- atomic_inc(&inode->i_count);
+ ihold(inode);
inode->i_nlink++;
mark_inode_dirty_sync(inode);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index f3f3578393a4..c0d35a3accef 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -101,7 +101,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
return add_nondir(dentry, inode);
}
diff --git a/fs/namei.c b/fs/namei.c
index 24896e833565..f7dbc06857ab 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1121,11 +1121,13 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
static struct dentry *__lookup_hash(struct qstr *name,
struct dentry *base, struct nameidata *nd)
{
+ struct inode *inode = base->d_inode;
struct dentry *dentry;
- struct inode *inode;
int err;
- inode = base->d_inode;
+ err = exec_permission(inode);
+ if (err)
+ return ERR_PTR(err);
/*
* See if the low-level filesystem might want
@@ -1161,11 +1163,6 @@ out:
*/
static struct dentry *lookup_hash(struct nameidata *nd)
{
- int err;
-
- err = exec_permission(nd->path.dentry->d_inode);
- if (err)
- return ERR_PTR(err);
return __lookup_hash(&nd->last, nd->path.dentry, nd);
}
@@ -1213,9 +1210,6 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
if (err)
return ERR_PTR(err);
- err = exec_permission(base->d_inode);
- if (err)
- return ERR_PTR(err);
return __lookup_hash(&this, base, NULL);
}
@@ -2291,7 +2285,7 @@ static long do_unlinkat(int dfd, const char __user *pathname)
goto slashes;
inode = dentry->d_inode;
if (inode)
- atomic_inc(&inode->i_count);
+ ihold(inode);
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit2;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7ca5182c0bed..8a415c9c5e55 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -595,7 +595,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
goto out_free;
}
- mnt->mnt_flags = old->mnt_flags;
+ mnt->mnt_flags = old->mnt_flags & ~MNT_WRITE_HOLD;
atomic_inc(&sb->s_active);
mnt->mnt_sb = sb;
mnt->mnt_root = dget(root);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 5c55c26af165..ba306658a6db 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -1,7 +1,6 @@
config NFS_FS
tristate "NFS client support"
depends on INET && FILE_LOCKING
- depends on BKL # fix as soon as lockd is done
select LOCKD
select SUNRPC
select NFS_ACL_SUPPORT if NFS_V3_ACL
@@ -77,13 +76,17 @@ config NFS_V4
config NFS_V4_1
bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
- depends on NFS_V4 && EXPERIMENTAL
+ depends on NFS_FS && NFS_V4 && EXPERIMENTAL
+ select PNFS_FILE_LAYOUT
help
This option enables support for minor version 1 of the NFSv4 protocol
- (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
+ (RFC 5661) in the kernel's NFS client.
If unsure, say N.
+config PNFS_FILE_LAYOUT
+ tristate
+
config ROOT_NFS
bool "Root file system on NFS"
depends on NFS_FS=y && IP_PNP
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index da7fda639eac..4776ff9e3814 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -15,5 +15,9 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o
+nfs-$(CONFIG_NFS_V4_1) += pnfs.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
+
+obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
+nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index e17b49e2eabd..aeec017fe814 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -109,7 +109,7 @@ nfs4_callback_up(struct svc_serv *serv)
{
int ret;
- ret = svc_create_xprt(serv, "tcp", PF_INET,
+ ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
if (ret <= 0)
goto out_err;
@@ -117,7 +117,7 @@ nfs4_callback_up(struct svc_serv *serv)
dprintk("NFS: Callback listener port = %u (af %u)\n",
nfs_callback_tcpport, PF_INET);
- ret = svc_create_xprt(serv, "tcp", PF_INET6,
+ ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
if (ret > 0) {
nfs_callback_tcpport6 = ret;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 930d10fecdaf..2950fca0c61b 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -118,11 +118,11 @@ int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const n
if (delegation == NULL)
return 0;
- /* seqid is 4-bytes long */
- if (((u32 *) &stateid->data)[0] != 0)
+ if (stateid->stateid.seqid != 0)
return 0;
- if (memcmp(&delegation->stateid.data[4], &stateid->data[4],
- sizeof(stateid->data)-4))
+ if (memcmp(&delegation->stateid.stateid.other,
+ &stateid->stateid.other,
+ NFS4_STATEID_OTHER_SIZE))
return 0;
return 1;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a882785eba41..0870d0d4efc0 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -48,6 +48,7 @@
#include "iostat.h"
#include "internal.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
@@ -155,7 +156,9 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
clp->cl_machine_cred = cred;
-
+#if defined(CONFIG_NFS_V4_1)
+ INIT_LIST_HEAD(&clp->cl_layouts);
+#endif
nfs_fscache_get_client_cookie(clp);
return clp;
@@ -252,6 +255,7 @@ void nfs_put_client(struct nfs_client *clp)
nfs_free_client(clp);
}
}
+EXPORT_SYMBOL_GPL(nfs_put_client);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
/*
@@ -601,6 +605,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
{
struct rpc_clnt *clnt = NULL;
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = clp->cl_proto,
.address = (struct sockaddr *)&clp->cl_addr,
.addrsize = clp->cl_addrlen,
@@ -900,6 +905,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ set_pnfs_layoutdriver(server, fsinfo->layouttype);
+
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
@@ -939,6 +946,7 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
}
fsinfo.fattr = fattr;
+ fsinfo.layouttype = 0;
error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo);
if (error < 0)
goto out_error;
@@ -1021,6 +1029,7 @@ void nfs_free_server(struct nfs_server *server)
{
dprintk("--> nfs_free_server()\n");
+ unset_pnfs_layoutdriver(server);
spin_lock(&nfs_client_lock);
list_del(&server->client_link);
list_del(&server->master_link);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 257e4052492e..07ac3847e562 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1801,7 +1801,7 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
d_drop(dentry);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
if (error == 0) {
- atomic_inc(&inode->i_count);
+ ihold(inode);
d_add(dentry, inode);
}
return error;
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index dba50a5625db..a6e711ad130f 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -167,7 +167,7 @@ static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
return 0;
}
item = container_of(h, struct nfs_dns_ent, h);
- ttl = (long)item->h.expiry_time - (long)get_seconds();
+ ttl = item->h.expiry_time - seconds_since_boot();
if (ttl < 0)
ttl = 0;
@@ -239,7 +239,7 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
ttl = get_expiry(&buf);
if (ttl == 0)
goto out;
- key.h.expiry_time = ttl + get_seconds();
+ key.h.expiry_time = ttl + seconds_since_boot();
ret = -ENOMEM;
item = nfs_dns_lookup(cd, &key);
@@ -301,7 +301,7 @@ static int do_cache_lookup_nowait(struct cache_detail *cd,
goto out_err;
ret = -ETIMEDOUT;
if (!test_bit(CACHE_VALID, &(*item)->h.flags)
- || (*item)->h.expiry_time < get_seconds()
+ || (*item)->h.expiry_time < seconds_since_boot()
|| cd->flush_time > (*item)->h.last_refresh)
goto out_put;
ret = -ENOENT;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index e18c31e08a28..e756075637b0 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -36,6 +36,7 @@
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_FILE
@@ -386,6 +387,10 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
file->f_path.dentry->d_name.name,
mapping->host->i_ino, len, (long long) pos);
+ pnfs_update_layout(mapping->host,
+ nfs_file_open_context(file),
+ IOMODE_RW);
+
start:
/*
* Prevent starvation issues if someone is doing a consistency
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index a70e446e1605..ac7b814ce162 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -54,8 +54,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
iput(inode);
return -ENOMEM;
}
- /* Circumvent igrab(): we know the inode is not being freed */
- atomic_inc(&inode->i_count);
+ ihold(inode);
/*
* Ensure that this dentry is invisible to d_find_alias().
* Otherwise, it may be spliced into the tree by
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6eec28656415..314f57164602 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -48,6 +48,7 @@
#include "internal.h"
#include "fscache.h"
#include "dns_resolve.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -1410,6 +1411,7 @@ void nfs4_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
+ pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */
nfs_inode_return_delegation_noreclaim(inode);
/* First call standard NFS clear_inode() code */
@@ -1447,6 +1449,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
nfsi->delegation = NULL;
nfsi->delegation_state = 0;
init_rwsem(&nfsi->rwsem);
+ nfsi->layout = NULL;
#endif
}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index d610203d95c6..eceafe74f473 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -153,6 +153,7 @@ int nfs_mount(struct nfs_mount_request *info)
.rpc_resp = &result,
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = info->protocol,
.address = info->sap,
.addrsize = info->salen,
@@ -224,6 +225,7 @@ void nfs_umount(const struct nfs_mount_request *info)
.to_retries = 2,
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = IPPROTO_UDP,
.address = info->sap,
.addrsize = info->salen,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
new file mode 100644
index 000000000000..2e92f0d8d654
--- /dev/null
+++ b/fs/nfs/nfs4filelayout.c
@@ -0,0 +1,280 @@
+/*
+ * Module for the pnfs nfs4 file layout driver.
+ * Defines all I/O and Policy interface operations, plus code
+ * to register itself with the pNFS client.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+
+#include "internal.h"
+#include "nfs4filelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
+MODULE_DESCRIPTION("The NFSv4 file layout driver");
+
+static int
+filelayout_set_layoutdriver(struct nfs_server *nfss)
+{
+ int status = pnfs_alloc_init_deviceid_cache(nfss->nfs_client,
+ nfs4_fl_free_deviceid_callback);
+ if (status) {
+ printk(KERN_WARNING "%s: deviceid cache could not be "
+ "initialized\n", __func__);
+ return status;
+ }
+ dprintk("%s: deviceid cache has been initialized successfully\n",
+ __func__);
+ return 0;
+}
+
+/* Clear out the layout by destroying its device list */
+static int
+filelayout_clear_layoutdriver(struct nfs_server *nfss)
+{
+ dprintk("--> %s\n", __func__);
+
+ if (nfss->nfs_client->cl_devid_cache)
+ pnfs_put_deviceid_cache(nfss->nfs_client);
+ return 0;
+}
+
+/*
+ * filelayout_check_layout()
+ *
+ * Make sure layout segment parameters are sane WRT the device.
+ * At this point no generic layer initialization of the lseg has occurred,
+ * and nothing has been added to the layout_hdr cache.
+ *
+ */
+static int
+filelayout_check_layout(struct pnfs_layout_hdr *lo,
+ struct nfs4_filelayout_segment *fl,
+ struct nfs4_layoutget_res *lgr,
+ struct nfs4_deviceid *id)
+{
+ struct nfs4_file_layout_dsaddr *dsaddr;
+ int status = -EINVAL;
+ struct nfs_server *nfss = NFS_SERVER(lo->inode);
+
+ dprintk("--> %s\n", __func__);
+
+ if (fl->pattern_offset > lgr->range.offset) {
+ dprintk("%s pattern_offset %lld to large\n",
+ __func__, fl->pattern_offset);
+ goto out;
+ }
+
+ if (fl->stripe_unit % PAGE_SIZE) {
+ dprintk("%s Stripe unit (%u) not page aligned\n",
+ __func__, fl->stripe_unit);
+ goto out;
+ }
+
+ /* find and reference the deviceid */
+ dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
+ if (dsaddr == NULL) {
+ dsaddr = get_device_info(lo->inode, id);
+ if (dsaddr == NULL)
+ goto out;
+ }
+ fl->dsaddr = dsaddr;
+
+ if (fl->first_stripe_index < 0 ||
+ fl->first_stripe_index >= dsaddr->stripe_count) {
+ dprintk("%s Bad first_stripe_index %d\n",
+ __func__, fl->first_stripe_index);
+ goto out_put;
+ }
+
+ if ((fl->stripe_type == STRIPE_SPARSE &&
+ fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
+ (fl->stripe_type == STRIPE_DENSE &&
+ fl->num_fh != dsaddr->stripe_count)) {
+ dprintk("%s num_fh %u not valid for given packing\n",
+ __func__, fl->num_fh);
+ goto out_put;
+ }
+
+ if (fl->stripe_unit % nfss->rsize || fl->stripe_unit % nfss->wsize) {
+ dprintk("%s Stripe unit (%u) not aligned with rsize %u "
+ "wsize %u\n", __func__, fl->stripe_unit, nfss->rsize,
+ nfss->wsize);
+ }
+
+ status = 0;
+out:
+ dprintk("--> %s returns %d\n", __func__, status);
+ return status;
+out_put:
+ pnfs_put_deviceid(nfss->nfs_client->cl_devid_cache, &dsaddr->deviceid);
+ goto out;
+}
+
+static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
+{
+ int i;
+
+ for (i = 0; i < fl->num_fh; i++) {
+ if (!fl->fh_array[i])
+ break;
+ kfree(fl->fh_array[i]);
+ }
+ kfree(fl->fh_array);
+ fl->fh_array = NULL;
+}
+
+static void
+_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
+{
+ filelayout_free_fh_array(fl);
+ kfree(fl);
+}
+
+static int
+filelayout_decode_layout(struct pnfs_layout_hdr *flo,
+ struct nfs4_filelayout_segment *fl,
+ struct nfs4_layoutget_res *lgr,
+ struct nfs4_deviceid *id)
+{
+ uint32_t *p = (uint32_t *)lgr->layout.buf;
+ uint32_t nfl_util;
+ int i;
+
+ dprintk("%s: set_layout_map Begin\n", __func__);
+
+ memcpy(id, p, sizeof(*id));
+ p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
+ print_deviceid(id);
+
+ nfl_util = be32_to_cpup(p++);
+ if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
+ fl->commit_through_mds = 1;
+ if (nfl_util & NFL4_UFLG_DENSE)
+ fl->stripe_type = STRIPE_DENSE;
+ else
+ fl->stripe_type = STRIPE_SPARSE;
+ fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK;
+
+ fl->first_stripe_index = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &fl->pattern_offset);
+ fl->num_fh = be32_to_cpup(p++);
+
+ dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n",
+ __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
+ fl->pattern_offset);
+
+ fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+ GFP_KERNEL);
+ if (!fl->fh_array)
+ return -ENOMEM;
+
+ for (i = 0; i < fl->num_fh; i++) {
+ /* Do we want to use a mempool here? */
+ fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
+ if (!fl->fh_array[i]) {
+ filelayout_free_fh_array(fl);
+ return -ENOMEM;
+ }
+ fl->fh_array[i]->size = be32_to_cpup(p++);
+ if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+ printk(KERN_ERR "Too big fh %d received %d\n",
+ i, fl->fh_array[i]->size);
+ filelayout_free_fh_array(fl);
+ return -EIO;
+ }
+ memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
+ p += XDR_QUADLEN(fl->fh_array[i]->size);
+ dprintk("DEBUG: %s: fh len %d\n", __func__,
+ fl->fh_array[i]->size);
+ }
+
+ return 0;
+}
+
+static struct pnfs_layout_segment *
+filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
+ struct nfs4_layoutget_res *lgr)
+{
+ struct nfs4_filelayout_segment *fl;
+ int rc;
+ struct nfs4_deviceid id;
+
+ dprintk("--> %s\n", __func__);
+ fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return NULL;
+
+ rc = filelayout_decode_layout(layoutid, fl, lgr, &id);
+ if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) {
+ _filelayout_free_lseg(fl);
+ return NULL;
+ }
+ return &fl->generic_hdr;
+}
+
+static void
+filelayout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode);
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+
+ dprintk("--> %s\n", __func__);
+ pnfs_put_deviceid(nfss->nfs_client->cl_devid_cache,
+ &fl->dsaddr->deviceid);
+ _filelayout_free_lseg(fl);
+}
+
+static struct pnfs_layoutdriver_type filelayout_type = {
+ .id = LAYOUT_NFSV4_1_FILES,
+ .name = "LAYOUT_NFSV4_1_FILES",
+ .owner = THIS_MODULE,
+ .set_layoutdriver = filelayout_set_layoutdriver,
+ .clear_layoutdriver = filelayout_clear_layoutdriver,
+ .alloc_lseg = filelayout_alloc_lseg,
+ .free_lseg = filelayout_free_lseg,
+};
+
+static int __init nfs4filelayout_init(void)
+{
+ printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n",
+ __func__);
+ return pnfs_register_layoutdriver(&filelayout_type);
+}
+
+static void __exit nfs4filelayout_exit(void)
+{
+ printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n",
+ __func__);
+ pnfs_unregister_layoutdriver(&filelayout_type);
+}
+
+module_init(nfs4filelayout_init);
+module_exit(nfs4filelayout_exit);
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
new file mode 100644
index 000000000000..bbf60dd2ab9d
--- /dev/null
+++ b/fs/nfs/nfs4filelayout.h
@@ -0,0 +1,94 @@
+/*
+ * NFSv4 file layout driver data structures.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#ifndef FS_NFS_NFS4FILELAYOUT_H
+#define FS_NFS_NFS4FILELAYOUT_H
+
+#include "pnfs.h"
+
+/*
+ * Field testing shows we need to support upto 4096 stripe indices.
+ * We store each index as a u8 (u32 on the wire) to keep the memory footprint
+ * reasonable. This in turn means we support a maximum of 256
+ * RFC 5661 multipath_list4 structures.
+ */
+#define NFS4_PNFS_MAX_STRIPE_CNT 4096
+#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
+
+enum stripetype4 {
+ STRIPE_SPARSE = 1,
+ STRIPE_DENSE = 2
+};
+
+/* Individual ip address */
+struct nfs4_pnfs_ds {
+ struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ u32 ds_ip_addr;
+ u32 ds_port;
+ struct nfs_client *ds_clp;
+ atomic_t ds_count;
+};
+
+struct nfs4_file_layout_dsaddr {
+ struct pnfs_deviceid_node deviceid;
+ u32 stripe_count;
+ u8 *stripe_indices;
+ u32 ds_num;
+ struct nfs4_pnfs_ds *ds_list[1];
+};
+
+struct nfs4_filelayout_segment {
+ struct pnfs_layout_segment generic_hdr;
+ u32 stripe_type;
+ u32 commit_through_mds;
+ u32 stripe_unit;
+ u32 first_stripe_index;
+ u64 pattern_offset;
+ struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
+ unsigned int num_fh;
+ struct nfs_fh **fh_array;
+};
+
+static inline struct nfs4_filelayout_segment *
+FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
+{
+ return container_of(lseg,
+ struct nfs4_filelayout_segment,
+ generic_hdr);
+}
+
+extern void nfs4_fl_free_deviceid_callback(struct pnfs_deviceid_node *);
+extern void print_ds(struct nfs4_pnfs_ds *ds);
+extern void print_deviceid(struct nfs4_deviceid *dev_id);
+extern struct nfs4_file_layout_dsaddr *
+nfs4_fl_find_get_deviceid(struct nfs_client *, struct nfs4_deviceid *dev_id);
+struct nfs4_file_layout_dsaddr *
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id);
+
+#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
new file mode 100644
index 000000000000..51fe64ace55a
--- /dev/null
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -0,0 +1,448 @@
+/*
+ * Device operations for the pnfs nfs4 file layout driver.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ * Garth Goodson <Garth.Goodson@netapp.com>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/vmalloc.h>
+
+#include "internal.h"
+#include "nfs4filelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+/*
+ * Data server cache
+ *
+ * Data servers can be mapped to different device ids.
+ * nfs4_pnfs_ds reference counting
+ * - set to 1 on allocation
+ * - incremented when a device id maps a data server already in the cache.
+ * - decremented when deviceid is removed from the cache.
+ */
+DEFINE_SPINLOCK(nfs4_ds_cache_lock);
+static LIST_HEAD(nfs4_data_server_cache);
+
+/* Debug routines */
+void
+print_ds(struct nfs4_pnfs_ds *ds)
+{
+ if (ds == NULL) {
+ printk("%s NULL device\n", __func__);
+ return;
+ }
+ printk(" ip_addr %x port %hu\n"
+ " ref count %d\n"
+ " client %p\n"
+ " cl_exchange_flags %x\n",
+ ntohl(ds->ds_ip_addr), ntohs(ds->ds_port),
+ atomic_read(&ds->ds_count), ds->ds_clp,
+ ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
+}
+
+void
+print_ds_list(struct nfs4_file_layout_dsaddr *dsaddr)
+{
+ int i;
+
+ ifdebug(FACILITY) {
+ printk("%s dsaddr->ds_num %d\n", __func__,
+ dsaddr->ds_num);
+ for (i = 0; i < dsaddr->ds_num; i++)
+ print_ds(dsaddr->ds_list[i]);
+ }
+}
+
+void print_deviceid(struct nfs4_deviceid *id)
+{
+ u32 *p = (u32 *)id;
+
+ dprintk("%s: device id= [%x%x%x%x]\n", __func__,
+ p[0], p[1], p[2], p[3]);
+}
+
+/* nfs4_ds_cache_lock is held */
+static struct nfs4_pnfs_ds *
+_data_server_lookup_locked(u32 ip_addr, u32 port)
+{
+ struct nfs4_pnfs_ds *ds;
+
+ dprintk("_data_server_lookup: ip_addr=%x port=%hu\n",
+ ntohl(ip_addr), ntohs(port));
+
+ list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) {
+ if (ds->ds_ip_addr == ip_addr &&
+ ds->ds_port == port) {
+ return ds;
+ }
+ }
+ return NULL;
+}
+
+static void
+destroy_ds(struct nfs4_pnfs_ds *ds)
+{
+ dprintk("--> %s\n", __func__);
+ ifdebug(FACILITY)
+ print_ds(ds);
+
+ if (ds->ds_clp)
+ nfs_put_client(ds->ds_clp);
+ kfree(ds);
+}
+
+static void
+nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
+{
+ struct nfs4_pnfs_ds *ds;
+ int i;
+
+ print_deviceid(&dsaddr->deviceid.de_id);
+
+ for (i = 0; i < dsaddr->ds_num; i++) {
+ ds = dsaddr->ds_list[i];
+ if (ds != NULL) {
+ if (atomic_dec_and_lock(&ds->ds_count,
+ &nfs4_ds_cache_lock)) {
+ list_del_init(&ds->ds_node);
+ spin_unlock(&nfs4_ds_cache_lock);
+ destroy_ds(ds);
+ }
+ }
+ }
+ kfree(dsaddr->stripe_indices);
+ kfree(dsaddr);
+}
+
+void
+nfs4_fl_free_deviceid_callback(struct pnfs_deviceid_node *device)
+{
+ struct nfs4_file_layout_dsaddr *dsaddr =
+ container_of(device, struct nfs4_file_layout_dsaddr, deviceid);
+
+ nfs4_fl_free_deviceid(dsaddr);
+}
+
+static struct nfs4_pnfs_ds *
+nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port)
+{
+ struct nfs4_pnfs_ds *tmp_ds, *ds;
+
+ ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL);
+ if (!ds)
+ goto out;
+
+ spin_lock(&nfs4_ds_cache_lock);
+ tmp_ds = _data_server_lookup_locked(ip_addr, port);
+ if (tmp_ds == NULL) {
+ ds->ds_ip_addr = ip_addr;
+ ds->ds_port = port;
+ atomic_set(&ds->ds_count, 1);
+ INIT_LIST_HEAD(&ds->ds_node);
+ ds->ds_clp = NULL;
+ list_add(&ds->ds_node, &nfs4_data_server_cache);
+ dprintk("%s add new data server ip 0x%x\n", __func__,
+ ds->ds_ip_addr);
+ } else {
+ kfree(ds);
+ atomic_inc(&tmp_ds->ds_count);
+ dprintk("%s data server found ip 0x%x, inc'ed ds_count to %d\n",
+ __func__, tmp_ds->ds_ip_addr,
+ atomic_read(&tmp_ds->ds_count));
+ ds = tmp_ds;
+ }
+ spin_unlock(&nfs4_ds_cache_lock);
+out:
+ return ds;
+}
+
+/*
+ * Currently only support ipv4, and one multi-path address.
+ */
+static struct nfs4_pnfs_ds *
+decode_and_add_ds(__be32 **pp, struct inode *inode)
+{
+ struct nfs4_pnfs_ds *ds = NULL;
+ char *buf;
+ const char *ipend, *pstr;
+ u32 ip_addr, port;
+ int nlen, rlen, i;
+ int tmp[2];
+ __be32 *r_netid, *r_addr, *p = *pp;
+
+ /* r_netid */
+ nlen = be32_to_cpup(p++);
+ r_netid = p;
+ p += XDR_QUADLEN(nlen);
+
+ /* r_addr */
+ rlen = be32_to_cpup(p++);
+ r_addr = p;
+ p += XDR_QUADLEN(rlen);
+ *pp = p;
+
+ /* Check that netid is "tcp" */
+ if (nlen != 3 || memcmp((char *)r_netid, "tcp", 3)) {
+ dprintk("%s: ERROR: non ipv4 TCP r_netid\n", __func__);
+ goto out_err;
+ }
+
+ /* ipv6 length plus port is legal */
+ if (rlen > INET6_ADDRSTRLEN + 8) {
+ dprintk("%s Invalid address, length %d\n", __func__,
+ rlen);
+ goto out_err;
+ }
+ buf = kmalloc(rlen + 1, GFP_KERNEL);
+ buf[rlen] = '\0';
+ memcpy(buf, r_addr, rlen);
+
+ /* replace the port dots with dashes for the in4_pton() delimiter*/
+ for (i = 0; i < 2; i++) {
+ char *res = strrchr(buf, '.');
+ *res = '-';
+ }
+
+ /* Currently only support ipv4 address */
+ if (in4_pton(buf, rlen, (u8 *)&ip_addr, '-', &ipend) == 0) {
+ dprintk("%s: Only ipv4 addresses supported\n", __func__);
+ goto out_free;
+ }
+
+ /* port */
+ pstr = ipend;
+ sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
+ port = htons((tmp[0] << 8) | (tmp[1]));
+
+ ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
+ dprintk("%s Decoded address and port %s\n", __func__, buf);
+out_free:
+ kfree(buf);
+out_err:
+ return ds;
+}
+
+/* Decode opaque device data and return the result */
+static struct nfs4_file_layout_dsaddr*
+decode_device(struct inode *ino, struct pnfs_device *pdev)
+{
+ int i, dummy;
+ u32 cnt, num;
+ u8 *indexp;
+ __be32 *p = (__be32 *)pdev->area, *indicesp;
+ struct nfs4_file_layout_dsaddr *dsaddr;
+
+ /* Get the stripe count (number of stripe index) */
+ cnt = be32_to_cpup(p++);
+ dprintk("%s stripe count %d\n", __func__, cnt);
+ if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) {
+ printk(KERN_WARNING "%s: stripe count %d greater than "
+ "supported maximum %d\n", __func__,
+ cnt, NFS4_PNFS_MAX_STRIPE_CNT);
+ goto out_err;
+ }
+
+ /* Check the multipath list count */
+ indicesp = p;
+ p += XDR_QUADLEN(cnt << 2);
+ num = be32_to_cpup(p++);
+ dprintk("%s ds_num %u\n", __func__, num);
+ if (num > NFS4_PNFS_MAX_MULTI_CNT) {
+ printk(KERN_WARNING "%s: multipath count %d greater than "
+ "supported maximum %d\n", __func__,
+ num, NFS4_PNFS_MAX_MULTI_CNT);
+ goto out_err;
+ }
+ dsaddr = kzalloc(sizeof(*dsaddr) +
+ (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
+ GFP_KERNEL);
+ if (!dsaddr)
+ goto out_err;
+
+ dsaddr->stripe_indices = kzalloc(sizeof(u8) * cnt, GFP_KERNEL);
+ if (!dsaddr->stripe_indices)
+ goto out_err_free;
+
+ dsaddr->stripe_count = cnt;
+ dsaddr->ds_num = num;
+
+ memcpy(&dsaddr->deviceid.de_id, &pdev->dev_id, sizeof(pdev->dev_id));
+
+ /* Go back an read stripe indices */
+ p = indicesp;
+ indexp = &dsaddr->stripe_indices[0];
+ for (i = 0; i < dsaddr->stripe_count; i++) {
+ *indexp = be32_to_cpup(p++);
+ if (*indexp >= num)
+ goto out_err_free;
+ indexp++;
+ }
+ /* Skip already read multipath list count */
+ p++;
+
+ for (i = 0; i < dsaddr->ds_num; i++) {
+ int j;
+
+ dummy = be32_to_cpup(p++); /* multipath count */
+ if (dummy > 1) {
+ printk(KERN_WARNING
+ "%s: Multipath count %d not supported, "
+ "skipping all greater than 1\n", __func__,
+ dummy);
+ }
+ for (j = 0; j < dummy; j++) {
+ if (j == 0) {
+ dsaddr->ds_list[i] = decode_and_add_ds(&p, ino);
+ if (dsaddr->ds_list[i] == NULL)
+ goto out_err_free;
+ } else {
+ u32 len;
+ /* skip extra multipath */
+ len = be32_to_cpup(p++);
+ p += XDR_QUADLEN(len);
+ len = be32_to_cpup(p++);
+ p += XDR_QUADLEN(len);
+ continue;
+ }
+ }
+ }
+ return dsaddr;
+
+out_err_free:
+ nfs4_fl_free_deviceid(dsaddr);
+out_err:
+ dprintk("%s ERROR: returning NULL\n", __func__);
+ return NULL;
+}
+
+/*
+ * Decode the opaque device specified in 'dev'
+ * and add it to the list of available devices.
+ * If the deviceid is already cached, nfs4_add_deviceid will return
+ * a pointer to the cached struct and throw away the new.
+ */
+static struct nfs4_file_layout_dsaddr*
+decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
+{
+ struct nfs4_file_layout_dsaddr *dsaddr;
+ struct pnfs_deviceid_node *d;
+
+ dsaddr = decode_device(inode, dev);
+ if (!dsaddr) {
+ printk(KERN_WARNING "%s: Could not decode or add device\n",
+ __func__);
+ return NULL;
+ }
+
+ d = pnfs_add_deviceid(NFS_SERVER(inode)->nfs_client->cl_devid_cache,
+ &dsaddr->deviceid);
+
+ return container_of(d, struct nfs4_file_layout_dsaddr, deviceid);
+}
+
+/*
+ * Retrieve the information for dev_id, add it to the list
+ * of available devices, and return it.
+ */
+struct nfs4_file_layout_dsaddr *
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
+{
+ struct pnfs_device *pdev = NULL;
+ u32 max_resp_sz;
+ int max_pages;
+ struct page **pages = NULL;
+ struct nfs4_file_layout_dsaddr *dsaddr = NULL;
+ int rc, i;
+ struct nfs_server *server = NFS_SERVER(inode);
+
+ /*
+ * Use the session max response size as the basis for setting
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = max_resp_sz >> PAGE_SHIFT;
+ dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
+ __func__, inode, max_resp_sz, max_pages);
+
+ pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL);
+ if (pdev == NULL)
+ return NULL;
+
+ pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+ if (pages == NULL) {
+ kfree(pdev);
+ return NULL;
+ }
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_free;
+ }
+
+ /* set pdev->area */
+ pdev->area = vmap(pages, max_pages, VM_MAP, PAGE_KERNEL);
+ if (!pdev->area)
+ goto out_free;
+
+ memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
+ pdev->layout_type = LAYOUT_NFSV4_1_FILES;
+ pdev->pages = pages;
+ pdev->pgbase = 0;
+ pdev->pglen = PAGE_SIZE * max_pages;
+ pdev->mincount = 0;
+
+ rc = nfs4_proc_getdeviceinfo(server, pdev);
+ dprintk("%s getdevice info returns %d\n", __func__, rc);
+ if (rc)
+ goto out_free;
+
+ /*
+ * Found new device, need to decode it and then add it to the
+ * list of known devices for this mountpoint.
+ */
+ dsaddr = decode_and_add_device(inode, pdev);
+out_free:
+ if (pdev->area != NULL)
+ vunmap(pdev->area);
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ kfree(pdev);
+ dprintk("<-- %s dsaddr %p\n", __func__, dsaddr);
+ return dsaddr;
+}
+
+struct nfs4_file_layout_dsaddr *
+nfs4_fl_find_get_deviceid(struct nfs_client *clp, struct nfs4_deviceid *id)
+{
+ struct pnfs_deviceid_node *d;
+
+ d = pnfs_find_get_deviceid(clp->cl_devid_cache, id);
+ return (d == NULL) ? NULL :
+ container_of(d, struct nfs4_file_layout_dsaddr, deviceid);
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e87fe612ca18..32c8758c99fd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -55,6 +55,7 @@
#include "internal.h"
#include "iostat.h"
#include "callback.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -130,6 +131,7 @@ const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
| FATTR4_WORD0_MAXWRITE
| FATTR4_WORD0_LEASE_TIME,
FATTR4_WORD1_TIME_DELTA
+ | FATTR4_WORD1_FS_LAYOUT_TYPES
};
const u32 nfs4_fs_locations_bitmap[2] = {
@@ -4840,49 +4842,56 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
args->bc_attrs.max_reqs);
}
-static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd)
+static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
- if (rcvd <= sent)
- return 0;
- printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. "
- "sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd);
- return -EINVAL;
+ struct nfs4_channel_attrs *sent = &args->fc_attrs;
+ struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
+
+ if (rcvd->headerpadsz > sent->headerpadsz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz > sent->max_resp_sz)
+ return -EINVAL;
+ /*
+ * Our requested max_ops is the minimum we need; we're not
+ * prepared to break up compounds into smaller pieces than that.
+ * So, no point even trying to continue if the server won't
+ * cooperate:
+ */
+ if (rcvd->max_ops < sent->max_ops)
+ return -EINVAL;
+ if (rcvd->max_reqs == 0)
+ return -EINVAL;
+ return 0;
}
-#define _verify_fore_channel_attr(_name_) \
- _verify_channel_attr("fore", #_name_, \
- args->fc_attrs._name_, \
- session->fc_attrs._name_)
+static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+{
+ struct nfs4_channel_attrs *sent = &args->bc_attrs;
+ struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
-#define _verify_back_channel_attr(_name_) \
- _verify_channel_attr("back", #_name_, \
- args->bc_attrs._name_, \
- session->bc_attrs._name_)
+ if (rcvd->max_rqst_sz > sent->max_rqst_sz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz < sent->max_resp_sz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
+ return -EINVAL;
+ /* These would render the backchannel useless: */
+ if (rcvd->max_ops == 0)
+ return -EINVAL;
+ if (rcvd->max_reqs == 0)
+ return -EINVAL;
+ return 0;
+}
-/*
- * The server is not allowed to increase the fore channel header pad size,
- * maximum response size, or maximum number of operations.
- *
- * The back channel attributes are only negotiatied down: We send what the
- * (back channel) server insists upon.
- */
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
struct nfs4_session *session)
{
- int ret = 0;
-
- ret |= _verify_fore_channel_attr(headerpadsz);
- ret |= _verify_fore_channel_attr(max_resp_sz);
- ret |= _verify_fore_channel_attr(max_ops);
-
- ret |= _verify_back_channel_attr(headerpadsz);
- ret |= _verify_back_channel_attr(max_rqst_sz);
- ret |= _verify_back_channel_attr(max_resp_sz);
- ret |= _verify_back_channel_attr(max_resp_sz_cached);
- ret |= _verify_back_channel_attr(max_ops);
- ret |= _verify_back_channel_attr(max_reqs);
+ int ret;
- return ret;
+ ret = nfs4_verify_fore_channel_attrs(args, session);
+ if (ret)
+ return ret;
+ return nfs4_verify_back_channel_attrs(args, session);
}
static int _nfs4_proc_create_session(struct nfs_client *clp)
@@ -5255,6 +5264,147 @@ out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
+
+static void
+nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+ struct inode *ino = lgp->args.inode;
+ struct nfs_server *server = NFS_SERVER(ino);
+
+ dprintk("--> %s\n", __func__);
+ if (nfs4_setup_sequence(server, &lgp->args.seq_args,
+ &lgp->res.seq_res, 0, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+
+ dprintk("--> %s\n", __func__);
+
+ if (!nfs4_sequence_done(task, &lgp->res.seq_res))
+ return;
+
+ switch (task->tk_status) {
+ case 0:
+ break;
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
+ task->tk_status = -NFS4ERR_DELAY;
+ /* Fall through */
+ default:
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+ lgp->status = task->tk_status;
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_layoutget_release(void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ put_layout_hdr(lgp->args.inode);
+ if (lgp->res.layout.buf != NULL)
+ free_page((unsigned long) lgp->res.layout.buf);
+ put_nfs_open_context(lgp->args.ctx);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_layoutget_call_ops = {
+ .rpc_call_prepare = nfs4_layoutget_prepare,
+ .rpc_call_done = nfs4_layoutget_done,
+ .rpc_release = nfs4_layoutget_release,
+};
+
+int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+{
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
+ .rpc_argp = &lgp->args,
+ .rpc_resp = &lgp->res,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutget_call_ops,
+ .callback_data = lgp,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = 0;
+
+ dprintk("--> %s\n", __func__);
+
+ lgp->res.layout.buf = (void *)__get_free_page(GFP_NOFS);
+ if (lgp->res.layout.buf == NULL) {
+ nfs4_layoutget_release(lgp);
+ return -ENOMEM;
+ }
+
+ lgp->res.seq_res.sr_slot = NULL;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0)
+ goto out;
+ status = lgp->status;
+ if (status != 0)
+ goto out;
+ status = pnfs_layout_process(lgp);
+out:
+ rpc_put_task(task);
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
+
+static int
+_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
+{
+ struct nfs4_getdeviceinfo_args args = {
+ .pdev = pdev,
+ };
+ struct nfs4_getdeviceinfo_res res = {
+ .pdev = pdev,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ dprintk("<-- %s status=%d\n", __func__, status);
+
+ return status;
+}
+
+int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_proc_getdeviceinfo(server, pdev),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
+
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index aa0b02a610c4..f575a3126737 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -54,6 +54,7 @@
#include "callback.h"
#include "delegation.h"
#include "internal.h"
+#include "pnfs.h"
#define OPENOWNER_POOL_SIZE 8
@@ -1475,6 +1476,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
+ pnfs_destroy_all_layouts(clp);
}
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index bd2101d918c8..f313c4cce7e4 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,6 +52,7 @@
#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
#include "internal.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -310,6 +311,19 @@ static int nfs4_stat_to_errno(int);
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
+#define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \
+ XDR_QUADLEN(NFS4_DEVICEID4_SIZE))
+#define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \
+ 1 /* layout type */ + \
+ 1 /* opaque devaddr4 length */ + \
+ /* devaddr4 payload is read into page */ \
+ 1 /* notification bitmap length */ + \
+ 1 /* notification bitmap */)
+#define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \
+ encode_stateid_maxsz)
+#define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \
+ decode_stateid_maxsz + \
+ XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE))
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
@@ -699,6 +713,20 @@ static int nfs4_stat_to_errno(int);
#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_reclaim_complete_maxsz)
+#define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz +\
+ encode_getdeviceinfo_maxsz)
+#define NFS4_dec_getdeviceinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_getdeviceinfo_maxsz)
+#define NFS4_enc_layoutget_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_layoutget_maxsz)
+#define NFS4_dec_layoutget_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutget_maxsz)
const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
compound_encode_hdr_maxsz +
@@ -1737,6 +1765,58 @@ static void encode_sequence(struct xdr_stream *xdr,
#endif /* CONFIG_NFS_V4_1 */
}
+#ifdef CONFIG_NFS_V4_1
+static void
+encode_getdeviceinfo(struct xdr_stream *xdr,
+ const struct nfs4_getdeviceinfo_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 16 + NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(OP_GETDEVICEINFO);
+ p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data,
+ NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(args->pdev->layout_type);
+ *p++ = cpu_to_be32(args->pdev->pglen); /* gdia_maxcount */
+ *p++ = cpu_to_be32(0); /* bitmap length 0 */
+ hdr->nops++;
+ hdr->replen += decode_getdeviceinfo_maxsz;
+}
+
+static void
+encode_layoutget(struct xdr_stream *xdr,
+ const struct nfs4_layoutget_args *args,
+ struct compound_hdr *hdr)
+{
+ nfs4_stateid stateid;
+ __be32 *p;
+
+ p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_LAYOUTGET);
+ *p++ = cpu_to_be32(0); /* Signal layout available */
+ *p++ = cpu_to_be32(args->type);
+ *p++ = cpu_to_be32(args->range.iomode);
+ p = xdr_encode_hyper(p, args->range.offset);
+ p = xdr_encode_hyper(p, args->range.length);
+ p = xdr_encode_hyper(p, args->minlength);
+ pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout,
+ args->ctx->state);
+ p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(args->maxcount);
+
+ dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
+ __func__,
+ args->type,
+ args->range.iomode,
+ (unsigned long)args->range.offset,
+ (unsigned long)args->range.length,
+ args->maxcount);
+ hdr->nops++;
+ hdr->replen += decode_layoutget_maxsz;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* END OF "GENERIC" ENCODE ROUTINES.
*/
@@ -2554,6 +2634,51 @@ static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
return 0;
}
+/*
+ * Encode GETDEVICEINFO request
+ */
+static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_getdeviceinfo_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_getdeviceinfo(&xdr, args, &hdr);
+
+ /* set up reply kvec. Subtract notification bitmap max size (2)
+ * so that notification bitmap is put in xdr_buf tail */
+ xdr_inline_pages(&req->rq_rcv_buf, (hdr.replen - 2) << 2,
+ args->pdev->pages, args->pdev->pgbase,
+ args->pdev->pglen);
+
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * Encode LAYOUTGET request
+ */
+static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_layoutget_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_putfh(&xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutget(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -3978,6 +4103,61 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep);
}
+/*
+ * Decode potentially multiple layout types. Currently we only support
+ * one layout driver per file system.
+ */
+static int decode_first_pnfs_layout_type(struct xdr_stream *xdr,
+ uint32_t *layouttype)
+{
+ uint32_t *p;
+ int num;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ num = be32_to_cpup(p);
+
+ /* pNFS is not supported by the underlying file system */
+ if (num == 0) {
+ *layouttype = 0;
+ return 0;
+ }
+ if (num > 1)
+ printk(KERN_INFO "%s: Warning: Multiple pNFS layout drivers "
+ "per filesystem not supported\n", __func__);
+
+ /* Decode and set first layout type, move xdr->p past unused types */
+ p = xdr_inline_decode(xdr, num * 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *layouttype = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * The type of file system exported.
+ * Note we must ensure that layouttype is set in any non-error case.
+ */
+static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
+ uint32_t *layouttype)
+{
+ int status = 0;
+
+ dprintk("%s: bitmap is %x\n", __func__, bitmap[1]);
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U)))
+ return -EIO;
+ if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) {
+ status = decode_first_pnfs_layout_type(xdr, layouttype);
+ bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES;
+ } else
+ *layouttype = 0;
+ return status;
+}
+
static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
{
__be32 *savep;
@@ -4006,6 +4186,9 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta);
if (status != 0)
goto xdr_error;
+ status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype);
+ if (status != 0)
+ goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
@@ -4772,6 +4955,134 @@ out_overflow:
#endif /* CONFIG_NFS_V4_1 */
}
+#if defined(CONFIG_NFS_V4_1)
+
+static int decode_getdeviceinfo(struct xdr_stream *xdr,
+ struct pnfs_device *pdev)
+{
+ __be32 *p;
+ uint32_t len, type;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_GETDEVICEINFO);
+ if (status) {
+ if (status == -ETOOSMALL) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ pdev->mincount = be32_to_cpup(p);
+ dprintk("%s: Min count too small. mincnt = %u\n",
+ __func__, pdev->mincount);
+ }
+ return status;
+ }
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ type = be32_to_cpup(p++);
+ if (type != pdev->layout_type) {
+ dprintk("%s: layout mismatch req: %u pdev: %u\n",
+ __func__, pdev->layout_type, type);
+ return -EINVAL;
+ }
+ /*
+ * Get the length of the opaque device_addr4. xdr_read_pages places
+ * the opaque device_addr4 in the xdr_buf->pages (pnfs_device->pages)
+ * and places the remaining xdr data in xdr_buf->tail
+ */
+ pdev->mincount = be32_to_cpup(p);
+ xdr_read_pages(xdr, pdev->mincount); /* include space for the length */
+
+ /* Parse notification bitmap, verifying that it is zero. */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ if (len) {
+ int i;
+
+ p = xdr_inline_decode(xdr, 4 * len);
+ if (unlikely(!p))
+ goto out_overflow;
+ for (i = 0; i < len; i++, p++) {
+ if (be32_to_cpup(p)) {
+ dprintk("%s: notifications not supported\n",
+ __func__);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
+ struct nfs4_layoutget_res *res)
+{
+ __be32 *p;
+ int status;
+ u32 layout_count;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTGET);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 8 + NFS4_STATEID_SIZE);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->return_on_close = be32_to_cpup(p++);
+ p = xdr_decode_opaque_fixed(p, res->stateid.data, NFS4_STATEID_SIZE);
+ layout_count = be32_to_cpup(p);
+ if (!layout_count) {
+ dprintk("%s: server responded with empty layout array\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ p = xdr_inline_decode(xdr, 24);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &res->range.offset);
+ p = xdr_decode_hyper(p, &res->range.length);
+ res->range.iomode = be32_to_cpup(p++);
+ res->type = be32_to_cpup(p++);
+
+ status = decode_opaque_inline(xdr, &res->layout.len, (char **)&p);
+ if (unlikely(status))
+ return status;
+
+ dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n",
+ __func__,
+ (unsigned long)res->range.offset,
+ (unsigned long)res->range.length,
+ res->range.iomode,
+ res->type,
+ res->layout.len);
+
+ /* nfs4_proc_layoutget allocated a single page */
+ if (res->layout.len > PAGE_SIZE)
+ return -ENOMEM;
+ memcpy(res->layout.buf, p, res->layout.len);
+
+ if (layout_count > 1) {
+ /* We only handle a length one array at the moment. Any
+ * further entries are just ignored. Note that this means
+ * the client may see a response that is less than the
+ * minimum it requested.
+ */
+ dprintk("%s: server responded with %d layouts, dropping tail\n",
+ __func__, layout_count);
+ }
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* END OF "GENERIC" DECODE ROUTINES.
*/
@@ -5799,6 +6110,53 @@ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
status = decode_reclaim_complete(&xdr, (void *)NULL);
return status;
}
+
+/*
+ * Decode GETDEVINFO response
+ */
+static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_getdeviceinfo_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status != 0)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status != 0)
+ goto out;
+ status = decode_getdeviceinfo(&xdr, res->pdev);
+out:
+ return status;
+}
+
+/*
+ * Decode LAYOUTGET response
+ */
+static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_layoutget_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(&xdr);
+ if (status)
+ goto out;
+ status = decode_layoutget(&xdr, rqstp, res);
+out:
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
@@ -5990,6 +6348,8 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(SEQUENCE, enc_sequence, dec_sequence),
PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
+ PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
+ PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 460df3652889..903908a20023 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -101,6 +101,7 @@ static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = "";
/* server:export path string passed to super.c */
static char nfs_root_device[NFS_MAXPATHLEN + 1] __initdata = "";
+#ifdef RPC_DEBUG
/*
* When the "nfsrootdebug" kernel command line option is specified,
* enable debugging messages for NFSROOT.
@@ -112,6 +113,7 @@ static int __init nfs_root_debug(char *__unused)
}
__setup("nfsrootdebug", nfs_root_debug);
+#endif
/*
* Parse NFS server and directory information passed on the kernel
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
new file mode 100644
index 000000000000..db773428f95f
--- /dev/null
+++ b/fs/nfs/pnfs.c
@@ -0,0 +1,783 @@
+/*
+ * pNFS functions to call and manage layout drivers.
+ *
+ * Copyright (c) 2002 [year of first publication]
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+#include "internal.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
+
+/* Locking:
+ *
+ * pnfs_spinlock:
+ * protects pnfs_modules_tbl.
+ */
+static DEFINE_SPINLOCK(pnfs_spinlock);
+
+/*
+ * pnfs_modules_tbl holds all pnfs modules
+ */
+static LIST_HEAD(pnfs_modules_tbl);
+
+/* Return the registered pnfs layout driver module matching given id */
+static struct pnfs_layoutdriver_type *
+find_pnfs_driver_locked(u32 id)
+{
+ struct pnfs_layoutdriver_type *local;
+
+ list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
+ if (local->id == id)
+ goto out;
+ local = NULL;
+out:
+ dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
+ return local;
+}
+
+static struct pnfs_layoutdriver_type *
+find_pnfs_driver(u32 id)
+{
+ struct pnfs_layoutdriver_type *local;
+
+ spin_lock(&pnfs_spinlock);
+ local = find_pnfs_driver_locked(id);
+ spin_unlock(&pnfs_spinlock);
+ return local;
+}
+
+void
+unset_pnfs_layoutdriver(struct nfs_server *nfss)
+{
+ if (nfss->pnfs_curr_ld) {
+ nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+ module_put(nfss->pnfs_curr_ld->owner);
+ }
+ nfss->pnfs_curr_ld = NULL;
+}
+
+/*
+ * Try to set the server's pnfs module to the pnfs layout type specified by id.
+ * Currently only one pNFS layout driver per filesystem is supported.
+ *
+ * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
+ */
+void
+set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
+{
+ struct pnfs_layoutdriver_type *ld_type = NULL;
+
+ if (id == 0)
+ goto out_no_driver;
+ if (!(server->nfs_client->cl_exchange_flags &
+ (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
+ printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
+ id, server->nfs_client->cl_exchange_flags);
+ goto out_no_driver;
+ }
+ ld_type = find_pnfs_driver(id);
+ if (!ld_type) {
+ request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
+ ld_type = find_pnfs_driver(id);
+ if (!ld_type) {
+ dprintk("%s: No pNFS module found for %u.\n",
+ __func__, id);
+ goto out_no_driver;
+ }
+ }
+ if (!try_module_get(ld_type->owner)) {
+ dprintk("%s: Could not grab reference on module\n", __func__);
+ goto out_no_driver;
+ }
+ server->pnfs_curr_ld = ld_type;
+ if (ld_type->set_layoutdriver(server)) {
+ printk(KERN_ERR
+ "%s: Error initializing mount point for layout driver %u.\n",
+ __func__, id);
+ module_put(ld_type->owner);
+ goto out_no_driver;
+ }
+ dprintk("%s: pNFS module for %u set\n", __func__, id);
+ return;
+
+out_no_driver:
+ dprintk("%s: Using NFSv4 I/O\n", __func__);
+ server->pnfs_curr_ld = NULL;
+}
+
+int
+pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
+{
+ int status = -EINVAL;
+ struct pnfs_layoutdriver_type *tmp;
+
+ if (ld_type->id == 0) {
+ printk(KERN_ERR "%s id 0 is reserved\n", __func__);
+ return status;
+ }
+ if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
+ printk(KERN_ERR "%s Layout driver must provide "
+ "alloc_lseg and free_lseg.\n", __func__);
+ return status;
+ }
+
+ spin_lock(&pnfs_spinlock);
+ tmp = find_pnfs_driver_locked(ld_type->id);
+ if (!tmp) {
+ list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
+ status = 0;
+ dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
+ ld_type->name);
+ } else {
+ printk(KERN_ERR "%s Module with id %d already loaded!\n",
+ __func__, ld_type->id);
+ }
+ spin_unlock(&pnfs_spinlock);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
+
+void
+pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
+{
+ dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
+ spin_lock(&pnfs_spinlock);
+ list_del(&ld_type->pnfs_tblid);
+ spin_unlock(&pnfs_spinlock);
+}
+EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
+
+/*
+ * pNFS client layout cache
+ */
+
+static void
+get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+{
+ assert_spin_locked(&lo->inode->i_lock);
+ lo->refcount++;
+}
+
+static void
+put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+{
+ assert_spin_locked(&lo->inode->i_lock);
+ BUG_ON(lo->refcount == 0);
+
+ lo->refcount--;
+ if (!lo->refcount) {
+ dprintk("%s: freeing layout cache %p\n", __func__, lo);
+ BUG_ON(!list_empty(&lo->layouts));
+ NFS_I(lo->inode)->layout = NULL;
+ kfree(lo);
+ }
+}
+
+void
+put_layout_hdr(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ put_layout_hdr_locked(NFS_I(inode)->layout);
+ spin_unlock(&inode->i_lock);
+}
+
+static void
+init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
+{
+ INIT_LIST_HEAD(&lseg->fi_list);
+ kref_init(&lseg->kref);
+ lseg->layout = lo;
+}
+
+/* Called without i_lock held, as the free_lseg call may sleep */
+static void
+destroy_lseg(struct kref *kref)
+{
+ struct pnfs_layout_segment *lseg =
+ container_of(kref, struct pnfs_layout_segment, kref);
+ struct inode *ino = lseg->layout->inode;
+
+ dprintk("--> %s\n", __func__);
+ NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+ /* Matched by get_layout_hdr_locked in pnfs_insert_layout */
+ put_layout_hdr(ino);
+}
+
+static void
+put_lseg(struct pnfs_layout_segment *lseg)
+{
+ if (!lseg)
+ return;
+
+ dprintk("%s: lseg %p ref %d\n", __func__, lseg,
+ atomic_read(&lseg->kref.refcount));
+ kref_put(&lseg->kref, destroy_lseg);
+}
+
+static void
+pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
+{
+ struct pnfs_layout_segment *lseg, *next;
+ struct nfs_client *clp;
+
+ dprintk("%s:Begin lo %p\n", __func__, lo);
+
+ assert_spin_locked(&lo->inode->i_lock);
+ list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
+ dprintk("%s: freeing lseg %p\n", __func__, lseg);
+ list_move(&lseg->fi_list, tmp_list);
+ }
+ clp = NFS_SERVER(lo->inode)->nfs_client;
+ spin_lock(&clp->cl_lock);
+ /* List does not take a reference, so no need for put here */
+ list_del_init(&lo->layouts);
+ spin_unlock(&clp->cl_lock);
+ write_seqlock(&lo->seqlock);
+ clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
+ write_sequnlock(&lo->seqlock);
+
+ dprintk("%s:Return\n", __func__);
+}
+
+static void
+pnfs_free_lseg_list(struct list_head *tmp_list)
+{
+ struct pnfs_layout_segment *lseg;
+
+ while (!list_empty(tmp_list)) {
+ lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
+ fi_list);
+ dprintk("%s calling put_lseg on %p\n", __func__, lseg);
+ list_del(&lseg->fi_list);
+ put_lseg(lseg);
+ }
+}
+
+void
+pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+ struct pnfs_layout_hdr *lo;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+ lo = nfsi->layout;
+ if (lo) {
+ pnfs_clear_lseg_list(lo, &tmp_list);
+ /* Matched by refcount set to 1 in alloc_init_layout_hdr */
+ put_layout_hdr_locked(lo);
+ }
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+}
+
+/*
+ * Called by the state manger to remove all layouts established under an
+ * expired lease.
+ */
+void
+pnfs_destroy_all_layouts(struct nfs_client *clp)
+{
+ struct pnfs_layout_hdr *lo;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&clp->cl_lock);
+ list_splice_init(&clp->cl_layouts, &tmp_list);
+ spin_unlock(&clp->cl_lock);
+
+ while (!list_empty(&tmp_list)) {
+ lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
+ layouts);
+ dprintk("%s freeing layout for inode %lu\n", __func__,
+ lo->inode->i_ino);
+ pnfs_destroy_layout(NFS_I(lo->inode));
+ }
+}
+
+/* update lo->stateid with new if is more recent
+ *
+ * lo->stateid could be the open stateid, in which case we just use what given.
+ */
+static void
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *new)
+{
+ nfs4_stateid *old = &lo->stateid;
+ bool overwrite = false;
+
+ write_seqlock(&lo->seqlock);
+ if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
+ memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
+ overwrite = true;
+ else {
+ u32 oldseq, newseq;
+
+ oldseq = be32_to_cpu(old->stateid.seqid);
+ newseq = be32_to_cpu(new->stateid.seqid);
+ if ((int)(newseq - oldseq) > 0)
+ overwrite = true;
+ }
+ if (overwrite)
+ memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
+ write_sequnlock(&lo->seqlock);
+}
+
+static void
+pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
+ struct nfs4_state *state)
+{
+ int seq;
+
+ dprintk("--> %s\n", __func__);
+ write_seqlock(&lo->seqlock);
+ do {
+ seq = read_seqbegin(&state->seqlock);
+ memcpy(lo->stateid.data, state->stateid.data,
+ sizeof(state->stateid.data));
+ } while (read_seqretry(&state->seqlock, seq));
+ set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
+ write_sequnlock(&lo->seqlock);
+ dprintk("<-- %s\n", __func__);
+}
+
+void
+pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state)
+{
+ int seq;
+
+ dprintk("--> %s\n", __func__);
+ do {
+ seq = read_seqbegin(&lo->seqlock);
+ if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
+ /* This will trigger retry of the read */
+ pnfs_layout_from_open_stateid(lo, open_state);
+ } else
+ memcpy(dst->data, lo->stateid.data,
+ sizeof(lo->stateid.data));
+ } while (read_seqretry(&lo->seqlock, seq));
+ dprintk("<-- %s\n", __func__);
+}
+
+/*
+* Get layout from server.
+* for now, assume that whole file layouts are requested.
+* arg->offset: 0
+* arg->length: all ones
+*/
+static struct pnfs_layout_segment *
+send_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs_open_context *ctx,
+ u32 iomode)
+{
+ struct inode *ino = lo->inode;
+ struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs4_layoutget *lgp;
+ struct pnfs_layout_segment *lseg = NULL;
+
+ dprintk("--> %s\n", __func__);
+
+ BUG_ON(ctx == NULL);
+ lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
+ if (lgp == NULL) {
+ put_layout_hdr(lo->inode);
+ return NULL;
+ }
+ lgp->args.minlength = NFS4_MAX_UINT64;
+ lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+ lgp->args.range.iomode = iomode;
+ lgp->args.range.offset = 0;
+ lgp->args.range.length = NFS4_MAX_UINT64;
+ lgp->args.type = server->pnfs_curr_ld->id;
+ lgp->args.inode = ino;
+ lgp->args.ctx = get_nfs_open_context(ctx);
+ lgp->lsegpp = &lseg;
+
+ /* Synchronously retrieve layout information from server and
+ * store in lseg.
+ */
+ nfs4_proc_layoutget(lgp);
+ if (!lseg) {
+ /* remember that LAYOUTGET failed and suspend trying */
+ set_bit(lo_fail_bit(iomode), &lo->state);
+ }
+ return lseg;
+}
+
+/*
+ * Compare two layout segments for sorting into layout cache.
+ * We want to preferentially return RW over RO layouts, so ensure those
+ * are seen first.
+ */
+static s64
+cmp_layout(u32 iomode1, u32 iomode2)
+{
+ /* read > read/write */
+ return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
+}
+
+static void
+pnfs_insert_layout(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_segment *lp;
+ int found = 0;
+
+ dprintk("%s:Begin\n", __func__);
+
+ assert_spin_locked(&lo->inode->i_lock);
+ if (list_empty(&lo->segs)) {
+ struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
+
+ spin_lock(&clp->cl_lock);
+ BUG_ON(!list_empty(&lo->layouts));
+ list_add_tail(&lo->layouts, &clp->cl_layouts);
+ spin_unlock(&clp->cl_lock);
+ }
+ list_for_each_entry(lp, &lo->segs, fi_list) {
+ if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
+ continue;
+ list_add_tail(&lseg->fi_list, &lp->fi_list);
+ dprintk("%s: inserted lseg %p "
+ "iomode %d offset %llu length %llu before "
+ "lp %p iomode %d offset %llu length %llu\n",
+ __func__, lseg, lseg->range.iomode,
+ lseg->range.offset, lseg->range.length,
+ lp, lp->range.iomode, lp->range.offset,
+ lp->range.length);
+ found = 1;
+ break;
+ }
+ if (!found) {
+ list_add_tail(&lseg->fi_list, &lo->segs);
+ dprintk("%s: inserted lseg %p "
+ "iomode %d offset %llu length %llu at tail\n",
+ __func__, lseg, lseg->range.iomode,
+ lseg->range.offset, lseg->range.length);
+ }
+ get_layout_hdr_locked(lo);
+
+ dprintk("%s:Return\n", __func__);
+}
+
+static struct pnfs_layout_hdr *
+alloc_init_layout_hdr(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo;
+
+ lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
+ if (!lo)
+ return NULL;
+ lo->refcount = 1;
+ INIT_LIST_HEAD(&lo->layouts);
+ INIT_LIST_HEAD(&lo->segs);
+ seqlock_init(&lo->seqlock);
+ lo->inode = ino;
+ return lo;
+}
+
+static struct pnfs_layout_hdr *
+pnfs_find_alloc_layout(struct inode *ino)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_hdr *new = NULL;
+
+ dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
+
+ assert_spin_locked(&ino->i_lock);
+ if (nfsi->layout)
+ return nfsi->layout;
+
+ spin_unlock(&ino->i_lock);
+ new = alloc_init_layout_hdr(ino);
+ spin_lock(&ino->i_lock);
+
+ if (likely(nfsi->layout == NULL)) /* Won the race? */
+ nfsi->layout = new;
+ else
+ kfree(new);
+ return nfsi->layout;
+}
+
+/*
+ * iomode matching rules:
+ * iomode lseg match
+ * ----- ----- -----
+ * ANY READ true
+ * ANY RW true
+ * RW READ false
+ * RW RW true
+ * READ READ true
+ * READ RW true
+ */
+static int
+is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
+{
+ return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
+}
+
+/*
+ * lookup range in layout
+ */
+static struct pnfs_layout_segment *
+pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
+{
+ struct pnfs_layout_segment *lseg, *ret = NULL;
+
+ dprintk("%s:Begin\n", __func__);
+
+ assert_spin_locked(&lo->inode->i_lock);
+ list_for_each_entry(lseg, &lo->segs, fi_list) {
+ if (is_matching_lseg(lseg, iomode)) {
+ ret = lseg;
+ break;
+ }
+ if (cmp_layout(iomode, lseg->range.iomode) > 0)
+ break;
+ }
+
+ dprintk("%s:Return lseg %p ref %d\n",
+ __func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
+ return ret;
+}
+
+/*
+ * Layout segment is retreived from the server if not cached.
+ * The appropriate layout segment is referenced and returned to the caller.
+ */
+struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino,
+ struct nfs_open_context *ctx,
+ enum pnfs_iomode iomode)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_segment *lseg = NULL;
+
+ if (!pnfs_enabled_sb(NFS_SERVER(ino)))
+ return NULL;
+ spin_lock(&ino->i_lock);
+ lo = pnfs_find_alloc_layout(ino);
+ if (lo == NULL) {
+ dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
+ goto out_unlock;
+ }
+
+ /* Check to see if the layout for the given range already exists */
+ lseg = pnfs_has_layout(lo, iomode);
+ if (lseg) {
+ dprintk("%s: Using cached lseg %p for iomode %d)\n",
+ __func__, lseg, iomode);
+ goto out_unlock;
+ }
+
+ /* if LAYOUTGET already failed once we don't try again */
+ if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
+ goto out_unlock;
+
+ get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
+ spin_unlock(&ino->i_lock);
+
+ lseg = send_layoutget(lo, ctx, iomode);
+out:
+ dprintk("%s end, state 0x%lx lseg %p\n", __func__,
+ nfsi->layout->state, lseg);
+ return lseg;
+out_unlock:
+ spin_unlock(&ino->i_lock);
+ goto out;
+}
+
+int
+pnfs_layout_process(struct nfs4_layoutget *lgp)
+{
+ struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
+ struct nfs4_layoutget_res *res = &lgp->res;
+ struct pnfs_layout_segment *lseg;
+ struct inode *ino = lo->inode;
+ int status = 0;
+
+ /* Inject layout blob into I/O device driver */
+ lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
+ if (!lseg || IS_ERR(lseg)) {
+ if (!lseg)
+ status = -ENOMEM;
+ else
+ status = PTR_ERR(lseg);
+ dprintk("%s: Could not allocate layout: error %d\n",
+ __func__, status);
+ goto out;
+ }
+
+ spin_lock(&ino->i_lock);
+ init_lseg(lo, lseg);
+ lseg->range = res->range;
+ *lgp->lsegpp = lseg;
+ pnfs_insert_layout(lo, lseg);
+
+ /* Done processing layoutget. Set the layout stateid */
+ pnfs_set_layout_stateid(lo, &res->stateid);
+ spin_unlock(&ino->i_lock);
+out:
+ return status;
+}
+
+/*
+ * Device ID cache. Currently supports one layout type per struct nfs_client.
+ * Add layout type to the lookup key to expand to support multiple types.
+ */
+int
+pnfs_alloc_init_deviceid_cache(struct nfs_client *clp,
+ void (*free_callback)(struct pnfs_deviceid_node *))
+{
+ struct pnfs_deviceid_cache *c;
+
+ c = kzalloc(sizeof(struct pnfs_deviceid_cache), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+ spin_lock(&clp->cl_lock);
+ if (clp->cl_devid_cache != NULL) {
+ atomic_inc(&clp->cl_devid_cache->dc_ref);
+ dprintk("%s [kref [%d]]\n", __func__,
+ atomic_read(&clp->cl_devid_cache->dc_ref));
+ kfree(c);
+ } else {
+ /* kzalloc initializes hlists */
+ spin_lock_init(&c->dc_lock);
+ atomic_set(&c->dc_ref, 1);
+ c->dc_free_callback = free_callback;
+ clp->cl_devid_cache = c;
+ dprintk("%s [new]\n", __func__);
+ }
+ spin_unlock(&clp->cl_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_alloc_init_deviceid_cache);
+
+/*
+ * Called from pnfs_layoutdriver_type->free_lseg
+ * last layout segment reference frees deviceid
+ */
+void
+pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
+ struct pnfs_deviceid_node *devid)
+{
+ struct nfs4_deviceid *id = &devid->de_id;
+ struct pnfs_deviceid_node *d;
+ struct hlist_node *n;
+ long h = nfs4_deviceid_hash(id);
+
+ dprintk("%s [%d]\n", __func__, atomic_read(&devid->de_ref));
+ if (!atomic_dec_and_lock(&devid->de_ref, &c->dc_lock))
+ return;
+
+ hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[h], de_node)
+ if (!memcmp(&d->de_id, id, sizeof(*id))) {
+ hlist_del_rcu(&d->de_node);
+ spin_unlock(&c->dc_lock);
+ synchronize_rcu();
+ c->dc_free_callback(devid);
+ return;
+ }
+ spin_unlock(&c->dc_lock);
+ /* Why wasn't it found in the list? */
+ BUG();
+}
+EXPORT_SYMBOL_GPL(pnfs_put_deviceid);
+
+/* Find and reference a deviceid */
+struct pnfs_deviceid_node *
+pnfs_find_get_deviceid(struct pnfs_deviceid_cache *c, struct nfs4_deviceid *id)
+{
+ struct pnfs_deviceid_node *d;
+ struct hlist_node *n;
+ long hash = nfs4_deviceid_hash(id);
+
+ dprintk("--> %s hash %ld\n", __func__, hash);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[hash], de_node) {
+ if (!memcmp(&d->de_id, id, sizeof(*id))) {
+ if (!atomic_inc_not_zero(&d->de_ref)) {
+ goto fail;
+ } else {
+ rcu_read_unlock();
+ return d;
+ }
+ }
+ }
+fail:
+ rcu_read_unlock();
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pnfs_find_get_deviceid);
+
+/*
+ * Add a deviceid to the cache.
+ * GETDEVICEINFOs for same deviceid can race. If deviceid is found, discard new
+ */
+struct pnfs_deviceid_node *
+pnfs_add_deviceid(struct pnfs_deviceid_cache *c, struct pnfs_deviceid_node *new)
+{
+ struct pnfs_deviceid_node *d;
+ long hash = nfs4_deviceid_hash(&new->de_id);
+
+ dprintk("--> %s hash %ld\n", __func__, hash);
+ spin_lock(&c->dc_lock);
+ d = pnfs_find_get_deviceid(c, &new->de_id);
+ if (d) {
+ spin_unlock(&c->dc_lock);
+ dprintk("%s [discard]\n", __func__);
+ c->dc_free_callback(new);
+ return d;
+ }
+ INIT_HLIST_NODE(&new->de_node);
+ atomic_set(&new->de_ref, 1);
+ hlist_add_head_rcu(&new->de_node, &c->dc_deviceids[hash]);
+ spin_unlock(&c->dc_lock);
+ dprintk("%s [new]\n", __func__);
+ return new;
+}
+EXPORT_SYMBOL_GPL(pnfs_add_deviceid);
+
+void
+pnfs_put_deviceid_cache(struct nfs_client *clp)
+{
+ struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
+
+ dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache);
+ if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
+ int i;
+ /* Verify cache is empty */
+ for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i++)
+ BUG_ON(!hlist_empty(&local->dc_deviceids[i]));
+ clp->cl_devid_cache = NULL;
+ spin_unlock(&clp->cl_lock);
+ kfree(local);
+ }
+}
+EXPORT_SYMBOL_GPL(pnfs_put_deviceid_cache);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
new file mode 100644
index 000000000000..e12367d50489
--- /dev/null
+++ b/fs/nfs/pnfs.h
@@ -0,0 +1,189 @@
+/*
+ * pNFS client data structures.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#ifndef FS_NFS_PNFS_H
+#define FS_NFS_PNFS_H
+
+struct pnfs_layout_segment {
+ struct list_head fi_list;
+ struct pnfs_layout_range range;
+ struct kref kref;
+ struct pnfs_layout_hdr *layout;
+};
+
+#ifdef CONFIG_NFS_V4_1
+
+#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
+
+enum {
+ NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
+ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
+ NFS_LAYOUT_STATEID_SET, /* have a valid layout stateid */
+};
+
+/* Per-layout driver specific registration structure */
+struct pnfs_layoutdriver_type {
+ struct list_head pnfs_tblid;
+ const u32 id;
+ const char *name;
+ struct module *owner;
+ int (*set_layoutdriver) (struct nfs_server *);
+ int (*clear_layoutdriver) (struct nfs_server *);
+ struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr);
+ void (*free_lseg) (struct pnfs_layout_segment *lseg);
+};
+
+struct pnfs_layout_hdr {
+ unsigned long refcount;
+ struct list_head layouts; /* other client layouts */
+ struct list_head segs; /* layout segments list */
+ seqlock_t seqlock; /* Protects the stateid */
+ nfs4_stateid stateid;
+ unsigned long state;
+ struct inode *inode;
+};
+
+struct pnfs_device {
+ struct nfs4_deviceid dev_id;
+ unsigned int layout_type;
+ unsigned int mincount;
+ struct page **pages;
+ void *area;
+ unsigned int pgbase;
+ unsigned int pglen;
+};
+
+/*
+ * Device ID RCU cache. A device ID is unique per client ID and layout type.
+ */
+#define NFS4_DEVICE_ID_HASH_BITS 5
+#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
+#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
+
+static inline u32
+nfs4_deviceid_hash(struct nfs4_deviceid *id)
+{
+ unsigned char *cptr = (unsigned char *)id->data;
+ unsigned int nbytes = NFS4_DEVICEID4_SIZE;
+ u32 x = 0;
+
+ while (nbytes--) {
+ x *= 37;
+ x += *cptr++;
+ }
+ return x & NFS4_DEVICE_ID_HASH_MASK;
+}
+
+struct pnfs_deviceid_node {
+ struct hlist_node de_node;
+ struct nfs4_deviceid de_id;
+ atomic_t de_ref;
+};
+
+struct pnfs_deviceid_cache {
+ spinlock_t dc_lock;
+ atomic_t dc_ref;
+ void (*dc_free_callback)(struct pnfs_deviceid_node *);
+ struct hlist_head dc_deviceids[NFS4_DEVICE_ID_HASH_SIZE];
+};
+
+extern int pnfs_alloc_init_deviceid_cache(struct nfs_client *,
+ void (*free_callback)(struct pnfs_deviceid_node *));
+extern void pnfs_put_deviceid_cache(struct nfs_client *);
+extern struct pnfs_deviceid_node *pnfs_find_get_deviceid(
+ struct pnfs_deviceid_cache *,
+ struct nfs4_deviceid *);
+extern struct pnfs_deviceid_node *pnfs_add_deviceid(
+ struct pnfs_deviceid_cache *,
+ struct pnfs_deviceid_node *);
+extern void pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
+ struct pnfs_deviceid_node *devid);
+
+extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *);
+extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
+
+/* nfs4proc.c */
+extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
+ struct pnfs_device *dev);
+extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+
+/* pnfs.c */
+struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
+ enum pnfs_iomode access_type);
+void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
+void unset_pnfs_layoutdriver(struct nfs_server *);
+int pnfs_layout_process(struct nfs4_layoutget *lgp);
+void pnfs_destroy_layout(struct nfs_inode *);
+void pnfs_destroy_all_layouts(struct nfs_client *);
+void put_layout_hdr(struct inode *inode);
+void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state);
+
+
+static inline int lo_fail_bit(u32 iomode)
+{
+ return iomode == IOMODE_RW ?
+ NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
+}
+
+/* Return true if a layout driver is being used for this mountpoint */
+static inline int pnfs_enabled_sb(struct nfs_server *nfss)
+{
+ return nfss->pnfs_curr_ld != NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
+{
+}
+
+static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+}
+
+static inline struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
+ enum pnfs_iomode access_type)
+{
+ return NULL;
+}
+
+static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
+{
+}
+
+static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
+{
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 79859c81a943..e4b62c6f5a6e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -25,6 +25,7 @@
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -120,6 +121,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
len = nfs_page_length(page);
if (len == 0)
return nfs_return_empty_page(page);
+ pnfs_update_layout(inode, ctx, IOMODE_READ);
new = nfs_create_request(ctx, inode, page, 0, len);
if (IS_ERR(new)) {
unlock_page(page);
@@ -624,6 +626,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
+ pnfs_update_layout(inode, desc.ctx, IOMODE_READ);
if (rsize < PAGE_CACHE_SIZE)
nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
else
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 605e292501f4..4c14c17a5276 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -290,9 +290,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
nfs_pageio_cond_complete(pgio, page->index);
- ret = nfs_page_async_flush(pgio, page,
- wbc->sync_mode == WB_SYNC_NONE ||
- wbc->nonblocking != 0);
+ ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
ret = 0;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 7cf4ddafb4ab..18b3e8975fe0 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -2,7 +2,6 @@ config NFSD
tristate "NFS server support"
depends on INET
depends on FILE_LOCKING
- depends on BKL # fix as soon as lockd is done
select LOCKD
select SUNRPC
select EXPORTFS
@@ -29,6 +28,18 @@ config NFSD
If unsure, say N.
+config NFSD_DEPRECATED
+ bool "Include support for deprecated syscall interface to NFSD"
+ depends on NFSD
+ default y
+ help
+ The syscall interface to nfsd was obsoleted in 2.6.0 by a new
+ filesystem based interface. The old interface is due for removal
+ in 2.6.40. If you wish to remove the interface before then
+ say N.
+
+ In unsure, say Y.
+
config NFSD_V2_ACL
bool
depends on NFSD
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c2a4f71d87dd..c0fcb7ab7f6d 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -28,9 +28,6 @@
typedef struct auth_domain svc_client;
typedef struct svc_export svc_export;
-static void exp_do_unexport(svc_export *unexp);
-static int exp_verify_string(char *cp, int max);
-
/*
* We have two caches.
* One maps client+vfsmnt+dentry to export options - the export map
@@ -802,6 +799,7 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
return ek;
}
+#ifdef CONFIG_NFSD_DEPRECATED
static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv,
struct svc_export *exp)
{
@@ -852,6 +850,7 @@ exp_get_fsid_key(svc_client *clp, int fsid)
return exp_find_key(clp, FSID_NUM, fsidv, NULL);
}
+#endif
static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
struct cache_req *reqp)
@@ -893,6 +892,7 @@ static struct svc_export *exp_parent(svc_client *clp, struct path *path)
return exp;
}
+#ifdef CONFIG_NFSD_DEPRECATED
/*
* Hashtable locking. Write locks are placed only by user processes
* wanting to modify export information.
@@ -925,6 +925,19 @@ exp_writeunlock(void)
{
up_write(&hash_sem);
}
+#else
+
+/* hash_sem not needed once deprecated interface is removed */
+void exp_readlock(void) {}
+static inline void exp_writelock(void){}
+void exp_readunlock(void) {}
+static inline void exp_writeunlock(void){}
+
+#endif
+
+#ifdef CONFIG_NFSD_DEPRECATED
+static void exp_do_unexport(svc_export *unexp);
+static int exp_verify_string(char *cp, int max);
static void exp_fsid_unhash(struct svc_export *exp)
{
@@ -935,10 +948,9 @@ static void exp_fsid_unhash(struct svc_export *exp)
ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid);
if (!IS_ERR(ek)) {
- ek->h.expiry_time = get_seconds()-1;
+ sunrpc_invalidate(&ek->h, &svc_expkey_cache);
cache_put(&ek->h, &svc_expkey_cache);
}
- svc_expkey_cache.nextcheck = get_seconds();
}
static int exp_fsid_hash(svc_client *clp, struct svc_export *exp)
@@ -973,10 +985,9 @@ static void exp_unhash(struct svc_export *exp)
ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
if (!IS_ERR(ek)) {
- ek->h.expiry_time = get_seconds()-1;
+ sunrpc_invalidate(&ek->h, &svc_expkey_cache);
cache_put(&ek->h, &svc_expkey_cache);
}
- svc_expkey_cache.nextcheck = get_seconds();
}
/*
@@ -1097,8 +1108,7 @@ out:
static void
exp_do_unexport(svc_export *unexp)
{
- unexp->h.expiry_time = get_seconds()-1;
- svc_export_cache.nextcheck = get_seconds();
+ sunrpc_invalidate(&unexp->h, &svc_export_cache);
exp_unhash(unexp);
exp_fsid_unhash(unexp);
}
@@ -1150,6 +1160,7 @@ out_unlock:
exp_writeunlock();
return err;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
/*
* Obtain the root fh on behalf of a client.
@@ -1459,25 +1470,43 @@ static void show_secinfo_flags(struct seq_file *m, int flags)
show_expflags(m, flags, NFSEXP_SECINFO_FLAGS);
}
+static bool secinfo_flags_equal(int f, int g)
+{
+ f &= NFSEXP_SECINFO_FLAGS;
+ g &= NFSEXP_SECINFO_FLAGS;
+ return f == g;
+}
+
+static int show_secinfo_run(struct seq_file *m, struct exp_flavor_info **fp, struct exp_flavor_info *end)
+{
+ int flags;
+
+ flags = (*fp)->flags;
+ seq_printf(m, ",sec=%d", (*fp)->pseudoflavor);
+ (*fp)++;
+ while (*fp != end && secinfo_flags_equal(flags, (*fp)->flags)) {
+ seq_printf(m, ":%d", (*fp)->pseudoflavor);
+ (*fp)++;
+ }
+ return flags;
+}
+
static void show_secinfo(struct seq_file *m, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
- int lastflags = 0, first = 0;
+ int flags;
if (exp->ex_nflavors == 0)
return;
- for (f = exp->ex_flavors; f < end; f++) {
- if (first || f->flags != lastflags) {
- if (!first)
- show_secinfo_flags(m, lastflags);
- seq_printf(m, ",sec=%d", f->pseudoflavor);
- lastflags = f->flags;
- } else {
- seq_printf(m, ":%d", f->pseudoflavor);
- }
+ f = exp->ex_flavors;
+ flags = show_secinfo_run(m, &f, end);
+ if (!secinfo_flags_equal(flags, exp->ex_flags))
+ show_secinfo_flags(m, flags);
+ while (f != end) {
+ flags = show_secinfo_run(m, &f, end);
+ show_secinfo_flags(m, flags);
}
- show_secinfo_flags(m, lastflags);
}
static void exp_flags(struct seq_file *m, int flag, int fsid,
@@ -1532,6 +1561,7 @@ const struct seq_operations nfs_exports_op = {
.show = e_show,
};
+#ifdef CONFIG_NFSD_DEPRECATED
/*
* Add or modify a client.
* Change requests may involve the list of host addresses. The list of
@@ -1563,7 +1593,7 @@ exp_addclient(struct nfsctl_client *ncp)
/* Insert client into hashtable. */
for (i = 0; i < ncp->cl_naddr; i++) {
ipv6_addr_set_v4mapped(ncp->cl_addrlist[i].s_addr, &addr6);
- auth_unix_add_addr(&addr6, dom);
+ auth_unix_add_addr(&init_net, &addr6, dom);
}
auth_unix_forget_old(dom);
auth_domain_put(dom);
@@ -1621,6 +1651,7 @@ exp_verify_string(char *cp, int max)
printk(KERN_NOTICE "nfsd: couldn't validate string %s\n", cp);
return 0;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
/*
* Initialize the exports module.
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 988cbb3a19b6..143da2eecd7b 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -41,7 +41,6 @@
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
-#define NFS4_STATEID_SIZE 16
/* Index of predefined Linux callback client operations */
@@ -248,10 +247,11 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
}
static void
-encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args,
+encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
+ struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
if (hdr->minorversion == 0)
return;
@@ -259,8 +259,8 @@ encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args,
RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
WRITE32(OP_CB_SEQUENCE);
- WRITEMEM(args->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN);
- WRITE32(args->cbs_clp->cl_cb_seq_nr);
+ WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
+ WRITE32(ses->se_cb_seq_nr);
WRITE32(0); /* slotid, always 0 */
WRITE32(0); /* highest slotid always 0 */
WRITE32(0); /* cachethis always 0 */
@@ -280,18 +280,18 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
static int
nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
- struct nfs4_rpc_args *rpc_args)
+ struct nfsd4_callback *cb)
{
struct xdr_stream xdr;
- struct nfs4_delegation *args = rpc_args->args_op;
+ struct nfs4_delegation *args = cb->cb_op;
struct nfs4_cb_compound_hdr hdr = {
- .ident = args->dl_ident,
- .minorversion = rpc_args->args_seq.cbs_minorversion,
+ .ident = cb->cb_clp->cl_cb_ident,
+ .minorversion = cb->cb_minorversion,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
- encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
+ encode_cb_sequence(&xdr, cb, &hdr);
encode_cb_recall(&xdr, args, &hdr);
encode_cb_nops(&hdr);
return 0;
@@ -339,15 +339,16 @@ decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
* with a single slot.
*/
static int
-decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res,
+decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
struct rpc_rqst *rqstp)
{
+ struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
struct nfs4_sessionid id;
int status;
u32 dummy;
__be32 *p;
- if (res->cbs_minorversion == 0)
+ if (cb->cb_minorversion == 0)
return 0;
status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
@@ -363,13 +364,12 @@ decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res,
READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
- if (memcmp(id.data, res->cbs_clp->cl_sessionid.data,
- NFS4_MAX_SESSIONID_LEN)) {
+ if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
dprintk("%s Invalid session id\n", __func__);
goto out;
}
READ32(dummy);
- if (dummy != res->cbs_clp->cl_cb_seq_nr) {
+ if (dummy != ses->se_cb_seq_nr) {
dprintk("%s Invalid sequence number\n", __func__);
goto out;
}
@@ -393,7 +393,7 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
static int
nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
- struct nfsd4_cb_sequence *seq)
+ struct nfsd4_callback *cb)
{
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr;
@@ -403,8 +403,8 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
status = decode_cb_compound_hdr(&xdr, &hdr);
if (status)
goto out;
- if (seq) {
- status = decode_cb_sequence(&xdr, seq, rqstp);
+ if (cb) {
+ status = decode_cb_sequence(&xdr, cb, rqstp);
if (status)
goto out;
}
@@ -473,30 +473,34 @@ static int max_cb_time(void)
/* Reference counting, callback cleanup, etc., all look racy as heck.
* And why is cl_cb_set an atomic? */
-int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
+int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
{
struct rpc_timeout timeparms = {
.to_initval = max_cb_time(),
.to_retries = 0,
};
struct rpc_create_args args = {
- .protocol = XPRT_TRANSPORT_TCP,
- .address = (struct sockaddr *) &cb->cb_addr,
- .addrsize = cb->cb_addrlen,
+ .net = &init_net,
+ .address = (struct sockaddr *) &conn->cb_addr,
+ .addrsize = conn->cb_addrlen,
.timeout = &timeparms,
.program = &cb_program,
- .prognumber = cb->cb_prog,
.version = 0,
.authflavor = clp->cl_flavor,
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
- .client_name = clp->cl_principal,
};
struct rpc_clnt *client;
- if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
- return -EINVAL;
- if (cb->cb_minorversion) {
- args.bc_xprt = cb->cb_xprt;
+ if (clp->cl_minorversion == 0) {
+ if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+ return -EINVAL;
+ args.client_name = clp->cl_principal;
+ args.prognumber = conn->cb_prog,
+ args.protocol = XPRT_TRANSPORT_TCP;
+ clp->cl_cb_ident = conn->cb_ident;
+ } else {
+ args.bc_xprt = conn->cb_xprt;
+ args.prognumber = clp->cl_cb_session->se_cb_prog;
args.protocol = XPRT_TRANSPORT_BC_TCP;
}
/* Create RPC client */
@@ -506,7 +510,7 @@ int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
PTR_ERR(client));
return PTR_ERR(client);
}
- nfsd4_set_callback_client(clp, client);
+ clp->cl_cb_client = client;
return 0;
}
@@ -519,7 +523,7 @@ static void warn_no_callback_path(struct nfs4_client *clp, int reason)
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
- struct nfs4_client *clp = calldata;
+ struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
if (task->tk_status)
warn_no_callback_path(clp, task->tk_status);
@@ -528,6 +532,8 @@ static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
}
static const struct rpc_call_ops nfsd4_cb_probe_ops = {
+ /* XXX: release method to ensure we set the cb channel down if
+ * necessary on early failure? */
.rpc_call_done = nfsd4_cb_probe_done,
};
@@ -543,38 +549,42 @@ int set_callback_cred(void)
return 0;
}
+static struct workqueue_struct *callback_wq;
-void do_probe_callback(struct nfs4_client *clp)
+static void do_probe_callback(struct nfs4_client *clp)
{
- struct rpc_message msg = {
- .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
- .rpc_argp = clp,
- .rpc_cred = callback_cred
- };
- int status;
+ struct nfsd4_callback *cb = &clp->cl_cb_null;
- status = rpc_call_async(clp->cl_cb_client, &msg,
- RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
- &nfsd4_cb_probe_ops, (void *)clp);
- if (status)
- warn_no_callback_path(clp, status);
+ cb->cb_op = NULL;
+ cb->cb_clp = clp;
+
+ cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
+ cb->cb_msg.rpc_argp = NULL;
+ cb->cb_msg.rpc_resp = NULL;
+ cb->cb_msg.rpc_cred = callback_cred;
+
+ cb->cb_ops = &nfsd4_cb_probe_ops;
+
+ queue_work(callback_wq, &cb->cb_work);
}
/*
- * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
+ * Poke the callback thread to process any updates to the callback
+ * parameters, and send a null probe.
*/
-void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
+void nfsd4_probe_callback(struct nfs4_client *clp)
{
- int status;
+ set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
+ do_probe_callback(clp);
+}
+void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
+{
BUG_ON(atomic_read(&clp->cl_cb_set));
- status = setup_callback_client(clp, cb);
- if (status) {
- warn_no_callback_path(clp, status);
- return;
- }
- do_probe_callback(clp);
+ spin_lock(&clp->cl_lock);
+ memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
+ spin_unlock(&clp->cl_lock);
}
/*
@@ -585,8 +595,7 @@ void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
struct rpc_task *task)
{
- struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
- u32 *ptr = (u32 *)clp->cl_sessionid.data;
+ u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
int status = 0;
dprintk("%s: %u:%u:%u:%u\n", __func__,
@@ -598,14 +607,6 @@ static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
status = -EAGAIN;
goto out;
}
-
- /*
- * We'll need the clp during XDR encoding and decoding,
- * and the sequence during decoding to verify the reply
- */
- args->args_seq.cbs_clp = clp;
- task->tk_msg.rpc_resp = &args->args_seq;
-
out:
dprintk("%s status=%d\n", __func__, status);
return status;
@@ -617,13 +618,13 @@ out:
*/
static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
- struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
- u32 minorversion = clp->cl_cb_conn.cb_minorversion;
+ u32 minorversion = clp->cl_minorversion;
int status = 0;
- args->args_seq.cbs_minorversion = minorversion;
+ cb->cb_minorversion = minorversion;
if (minorversion) {
status = nfsd41_cb_setup_sequence(clp, task);
if (status) {
@@ -640,19 +641,20 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
dprintk("%s: minorversion=%d\n", __func__,
- clp->cl_cb_conn.cb_minorversion);
+ clp->cl_minorversion);
- if (clp->cl_cb_conn.cb_minorversion) {
+ if (clp->cl_minorversion) {
/* No need for lock, access serialized in nfsd4_cb_prepare */
- ++clp->cl_cb_seq_nr;
+ ++clp->cl_cb_session->se_cb_seq_nr;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
- clp->cl_cb_seq_nr);
+ clp->cl_cb_session->se_cb_seq_nr);
/* We're done looking into the sequence information */
task->tk_msg.rpc_resp = NULL;
@@ -662,7 +664,8 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
@@ -707,7 +710,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
static void nfsd4_cb_recall_release(void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
nfs4_put_delegation(dp);
}
@@ -718,8 +722,6 @@ static const struct rpc_call_ops nfsd4_cb_recall_ops = {
.rpc_release = nfsd4_cb_recall_release,
};
-static struct workqueue_struct *callback_wq;
-
int nfsd4_create_callback_queue(void)
{
callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
@@ -734,57 +736,88 @@ void nfsd4_destroy_callback_queue(void)
}
/* must be called under the state lock */
-void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt *new)
+void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
- struct rpc_clnt *old = clp->cl_cb_client;
-
- clp->cl_cb_client = new;
+ set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
/*
- * After this, any work that saw the old value of cl_cb_client will
- * be gone:
+ * Note this won't actually result in a null callback;
+ * instead, nfsd4_do_callback_rpc() will detect the killed
+ * client, destroy the rpc client, and stop:
*/
+ do_probe_callback(clp);
flush_workqueue(callback_wq);
- /* So we can safely shut it down: */
- if (old)
- rpc_shutdown_client(old);
}
-/*
- * called with dp->dl_count inc'ed.
- */
-static void _nfsd4_cb_recall(struct nfs4_delegation *dp)
+void nfsd4_release_cb(struct nfsd4_callback *cb)
{
- struct nfs4_client *clp = dp->dl_client;
- struct rpc_clnt *clnt = clp->cl_cb_client;
- struct nfs4_rpc_args *args = &dp->dl_recall.cb_args;
- struct rpc_message msg = {
- .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
- .rpc_cred = callback_cred
- };
+ if (cb->cb_ops->rpc_release)
+ cb->cb_ops->rpc_release(cb);
+}
- if (clnt == NULL) {
- nfs4_put_delegation(dp);
- return; /* Client is shutting down; give up. */
+void nfsd4_process_cb_update(struct nfsd4_callback *cb)
+{
+ struct nfs4_cb_conn conn;
+ struct nfs4_client *clp = cb->cb_clp;
+ int err;
+
+ /*
+ * This is either an update, or the client dying; in either case,
+ * kill the old client:
+ */
+ if (clp->cl_cb_client) {
+ rpc_shutdown_client(clp->cl_cb_client);
+ clp->cl_cb_client = NULL;
}
+ if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
+ return;
+ spin_lock(&clp->cl_lock);
+ /*
+ * Only serialized callback code is allowed to clear these
+ * flags; main nfsd code can only set them:
+ */
+ BUG_ON(!clp->cl_cb_flags);
+ clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
+ memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
+ spin_unlock(&clp->cl_lock);
- args->args_op = dp;
- msg.rpc_argp = args;
- dp->dl_retries = 1;
- rpc_call_async(clnt, &msg, RPC_TASK_SOFT, &nfsd4_cb_recall_ops, dp);
+ err = setup_callback_client(clp, &conn);
+ if (err)
+ warn_no_callback_path(clp, err);
}
void nfsd4_do_callback_rpc(struct work_struct *w)
{
- /* XXX: for now, just send off delegation recall. */
- /* In future, generalize to handle any sort of callback. */
- struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work);
- struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall);
+ struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
+ struct nfs4_client *clp = cb->cb_clp;
+ struct rpc_clnt *clnt;
- _nfsd4_cb_recall(dp);
-}
+ if (clp->cl_cb_flags)
+ nfsd4_process_cb_update(cb);
+ clnt = clp->cl_cb_client;
+ if (!clnt) {
+ /* Callback channel broken, or client killed; give up: */
+ nfsd4_release_cb(cb);
+ return;
+ }
+ rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
+ cb->cb_ops, cb);
+}
void nfsd4_cb_recall(struct nfs4_delegation *dp)
{
+ struct nfsd4_callback *cb = &dp->dl_recall;
+
+ dp->dl_retries = 1;
+ cb->cb_op = dp;
+ cb->cb_clp = dp->dl_client;
+ cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
+ cb->cb_msg.rpc_argp = cb;
+ cb->cb_msg.rpc_resp = cb;
+ cb->cb_msg.rpc_cred = callback_cred;
+
+ cb->cb_ops = &nfsd4_cb_recall_ops;
+ dp->dl_retries = 1;
+
queue_work(callback_wq, &dp->dl_recall.cb_work);
}
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index c78dbf493424..f0695e815f0e 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void)
cache_unregister(&nametoid_cache);
}
-/*
- * Deferred request handling
- */
-
-struct idmap_defer_req {
- struct cache_req req;
- struct cache_deferred_req deferred_req;
- wait_queue_head_t waitq;
- atomic_t count;
-};
-
-static inline void
-put_mdr(struct idmap_defer_req *mdr)
-{
- if (atomic_dec_and_test(&mdr->count))
- kfree(mdr);
-}
-
-static inline void
-get_mdr(struct idmap_defer_req *mdr)
-{
- atomic_inc(&mdr->count);
-}
-
-static void
-idmap_revisit(struct cache_deferred_req *dreq, int toomany)
-{
- struct idmap_defer_req *mdr =
- container_of(dreq, struct idmap_defer_req, deferred_req);
-
- wake_up(&mdr->waitq);
- put_mdr(mdr);
-}
-
-static struct cache_deferred_req *
-idmap_defer(struct cache_req *req)
-{
- struct idmap_defer_req *mdr =
- container_of(req, struct idmap_defer_req, req);
-
- mdr->deferred_req.revisit = idmap_revisit;
- get_mdr(mdr);
- return (&mdr->deferred_req);
-}
-
-static inline int
-do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
- struct cache_detail *detail, struct ent **item,
- struct idmap_defer_req *mdr)
-{
- *item = lookup_fn(key);
- if (!*item)
- return -ENOMEM;
- return cache_check(detail, &(*item)->h, &mdr->req);
-}
-
-static inline int
-do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
- struct ent *key, struct cache_detail *detail,
- struct ent **item)
-{
- int ret = -ENOMEM;
-
- *item = lookup_fn(key);
- if (!*item)
- goto out_err;
- ret = -ETIMEDOUT;
- if (!test_bit(CACHE_VALID, &(*item)->h.flags)
- || (*item)->h.expiry_time < get_seconds()
- || detail->flush_time > (*item)->h.last_refresh)
- goto out_put;
- ret = -ENOENT;
- if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
- goto out_put;
- return 0;
-out_put:
- cache_put(&(*item)->h, detail);
-out_err:
- *item = NULL;
- return ret;
-}
-
static int
idmap_lookup(struct svc_rqst *rqstp,
struct ent *(*lookup_fn)(struct ent *), struct ent *key,
struct cache_detail *detail, struct ent **item)
{
- struct idmap_defer_req *mdr;
int ret;
- mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
- if (!mdr)
+ *item = lookup_fn(key);
+ if (!*item)
return -ENOMEM;
- atomic_set(&mdr->count, 1);
- init_waitqueue_head(&mdr->waitq);
- mdr->req.defer = idmap_defer;
- ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
- if (ret == -EAGAIN) {
- wait_event_interruptible_timeout(mdr->waitq,
- test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
- ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
+ retry:
+ ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
+
+ if (ret == -ETIMEDOUT) {
+ struct ent *prev_item = *item;
+ *item = lookup_fn(key);
+ if (*item != prev_item)
+ goto retry;
+ cache_put(&(*item)->h, detail);
}
- put_mdr(mdr);
return ret;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 59ec449b0c7f..0cdfd022bb7b 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1031,8 +1031,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
resp->cstate.session = NULL;
fh_init(&resp->cstate.current_fh, NFS4_FHSIZE);
fh_init(&resp->cstate.save_fh, NFS4_FHSIZE);
- /* Use the deferral mechanism only for NFSv4.0 compounds */
- rqstp->rq_usedeferral = (args->minorversion == 0);
+ /*
+ * Don't use the deferral mechanism for NFSv4; compounds make it
+ * too hard to avoid non-idempotency problems.
+ */
+ rqstp->rq_usedeferral = 0;
/*
* According to RFC3010, this takes precedence over all other errors.
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a7292fcf7718..56347e0ac88d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -207,7 +207,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
{
struct nfs4_delegation *dp;
struct nfs4_file *fp = stp->st_file;
- struct nfs4_cb_conn *cb = &stp->st_stateowner->so_client->cl_cb_conn;
dprintk("NFSD alloc_init_deleg\n");
/*
@@ -234,7 +233,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
nfs4_file_get_access(fp, O_RDONLY);
dp->dl_flock = NULL;
dp->dl_type = type;
- dp->dl_ident = cb->cb_ident;
dp->dl_stateid.si_boot = boot_time;
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
@@ -535,171 +533,258 @@ gen_sessionid(struct nfsd4_session *ses)
*/
#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
+static void
+free_session_slots(struct nfsd4_session *ses)
+{
+ int i;
+
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++)
+ kfree(ses->se_slots[i]);
+}
+
/*
- * Give the client the number of ca_maxresponsesize_cached slots it
- * requests, of size bounded by NFSD_SLOT_CACHE_SIZE,
- * NFSD_MAX_MEM_PER_SESSION, and nfsd_drc_max_mem. Do not allow more
- * than NFSD_MAX_SLOTS_PER_SESSION.
- *
- * If we run out of reserved DRC memory we should (up to a point)
+ * We don't actually need to cache the rpc and session headers, so we
+ * can allocate a little less for each slot:
+ */
+static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
+{
+ return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+}
+
+static int nfsd4_sanitize_slot_size(u32 size)
+{
+ size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
+ size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
+
+ return size;
+}
+
+/*
+ * XXX: If we run out of reserved DRC memory we could (up to a point)
* re-negotiate active sessions and reduce their slot usage to make
* rooom for new connections. For now we just fail the create session.
*/
-static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan)
+static int nfsd4_get_drc_mem(int slotsize, u32 num)
{
- int mem, size = fchan->maxresp_cached;
+ int avail;
- if (fchan->maxreqs < 1)
- return nfserr_inval;
+ num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
- if (size < NFSD_MIN_HDR_SEQ_SZ)
- size = NFSD_MIN_HDR_SEQ_SZ;
- size -= NFSD_MIN_HDR_SEQ_SZ;
- if (size > NFSD_SLOT_CACHE_SIZE)
- size = NFSD_SLOT_CACHE_SIZE;
-
- /* bound the maxreqs by NFSD_MAX_MEM_PER_SESSION */
- mem = fchan->maxreqs * size;
- if (mem > NFSD_MAX_MEM_PER_SESSION) {
- fchan->maxreqs = NFSD_MAX_MEM_PER_SESSION / size;
- if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
- fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
- mem = fchan->maxreqs * size;
- }
+ spin_lock(&nfsd_drc_lock);
+ avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
+ nfsd_drc_max_mem - nfsd_drc_mem_used);
+ num = min_t(int, num, avail / slotsize);
+ nfsd_drc_mem_used += num * slotsize;
+ spin_unlock(&nfsd_drc_lock);
+ return num;
+}
+
+static void nfsd4_put_drc_mem(int slotsize, int num)
+{
spin_lock(&nfsd_drc_lock);
- /* bound the total session drc memory ussage */
- if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) {
- fchan->maxreqs = (nfsd_drc_max_mem - nfsd_drc_mem_used) / size;
- mem = fchan->maxreqs * size;
- }
- nfsd_drc_mem_used += mem;
+ nfsd_drc_mem_used -= slotsize * num;
spin_unlock(&nfsd_drc_lock);
+}
- if (fchan->maxreqs == 0)
- return nfserr_jukebox;
+static struct nfsd4_session *alloc_session(int slotsize, int numslots)
+{
+ struct nfsd4_session *new;
+ int mem, i;
- fchan->maxresp_cached = size + NFSD_MIN_HDR_SEQ_SZ;
- return 0;
+ BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
+ + sizeof(struct nfsd4_session) > PAGE_SIZE);
+ mem = numslots * sizeof(struct nfsd4_slot *);
+
+ new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
+ if (!new)
+ return NULL;
+ /* allocate each struct nfsd4_slot and data cache in one piece */
+ for (i = 0; i < numslots; i++) {
+ mem = sizeof(struct nfsd4_slot) + slotsize;
+ new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
+ if (!new->se_slots[i])
+ goto out_free;
+ }
+ return new;
+out_free:
+ while (i--)
+ kfree(new->se_slots[i]);
+ kfree(new);
+ return NULL;
}
-/*
- * fchan holds the client values on input, and the server values on output
- * sv_max_mesg is the maximum payload plus one page for overhead.
- */
-static int init_forechannel_attrs(struct svc_rqst *rqstp,
- struct nfsd4_channel_attrs *session_fchan,
- struct nfsd4_channel_attrs *fchan)
+static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
{
- int status = 0;
- __u32 maxcount = nfsd_serv->sv_max_mesg;
+ u32 maxrpc = nfsd_serv->sv_max_mesg;
- /* headerpadsz set to zero in encode routine */
+ new->maxreqs = numslots;
+ new->maxresp_cached = slotsize + NFSD_MIN_HDR_SEQ_SZ;
+ new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
+ new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
+ new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
+}
- /* Use the client's max request and max response size if possible */
- if (fchan->maxreq_sz > maxcount)
- fchan->maxreq_sz = maxcount;
- session_fchan->maxreq_sz = fchan->maxreq_sz;
+static void free_conn(struct nfsd4_conn *c)
+{
+ svc_xprt_put(c->cn_xprt);
+ kfree(c);
+}
- if (fchan->maxresp_sz > maxcount)
- fchan->maxresp_sz = maxcount;
- session_fchan->maxresp_sz = fchan->maxresp_sz;
+static void nfsd4_conn_lost(struct svc_xpt_user *u)
+{
+ struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
+ struct nfs4_client *clp = c->cn_session->se_client;
- /* Use the client's maxops if possible */
- if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND)
- fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND;
- session_fchan->maxops = fchan->maxops;
+ spin_lock(&clp->cl_lock);
+ if (!list_empty(&c->cn_persession)) {
+ list_del(&c->cn_persession);
+ free_conn(c);
+ }
+ spin_unlock(&clp->cl_lock);
+}
- /* FIXME: Error means no more DRC pages so the server should
- * recover pages from existing sessions. For now fail session
- * creation.
- */
- status = set_forechannel_drc_size(fchan);
+static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
+{
+ struct nfsd4_conn *conn;
- session_fchan->maxresp_cached = fchan->maxresp_cached;
- session_fchan->maxreqs = fchan->maxreqs;
+ conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
+ if (!conn)
+ return NULL;
+ svc_xprt_get(rqstp->rq_xprt);
+ conn->cn_xprt = rqstp->rq_xprt;
+ conn->cn_flags = flags;
+ INIT_LIST_HEAD(&conn->cn_xpt_user.list);
+ return conn;
+}
- dprintk("%s status %d\n", __func__, status);
- return status;
+static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
+{
+ conn->cn_session = ses;
+ list_add(&conn->cn_persession, &ses->se_conns);
}
-static void
-free_session_slots(struct nfsd4_session *ses)
+static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
- int i;
+ struct nfs4_client *clp = ses->se_client;
- for (i = 0; i < ses->se_fchannel.maxreqs; i++)
- kfree(ses->se_slots[i]);
+ spin_lock(&clp->cl_lock);
+ __nfsd4_hash_conn(conn, ses);
+ spin_unlock(&clp->cl_lock);
}
-/*
- * We don't actually need to cache the rpc and session headers, so we
- * can allocate a little less for each slot:
- */
-static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
+static void nfsd4_register_conn(struct nfsd4_conn *conn)
{
- return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+ conn->cn_xpt_user.callback = nfsd4_conn_lost;
+ register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
}
-static int
-alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
- struct nfsd4_create_session *cses)
+static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
{
- struct nfsd4_session *new, tmp;
- struct nfsd4_slot *sp;
- int idx, slotsize, cachesize, i;
- int status;
+ struct nfsd4_conn *conn;
+ u32 flags = NFS4_CDFC4_FORE;
- memset(&tmp, 0, sizeof(tmp));
+ if (ses->se_flags & SESSION4_BACK_CHAN)
+ flags |= NFS4_CDFC4_BACK;
+ conn = alloc_conn(rqstp, flags);
+ if (!conn)
+ return nfserr_jukebox;
+ nfsd4_hash_conn(conn, ses);
+ nfsd4_register_conn(conn);
+ return nfs_ok;
+}
- /* FIXME: For now, we just accept the client back channel attributes. */
- tmp.se_bchannel = cses->back_channel;
- status = init_forechannel_attrs(rqstp, &tmp.se_fchannel,
- &cses->fore_channel);
- if (status)
- goto out;
+static void nfsd4_del_conns(struct nfsd4_session *s)
+{
+ struct nfs4_client *clp = s->se_client;
+ struct nfsd4_conn *c;
- BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot)
- + sizeof(struct nfsd4_session) > PAGE_SIZE);
+ spin_lock(&clp->cl_lock);
+ while (!list_empty(&s->se_conns)) {
+ c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
+ list_del_init(&c->cn_persession);
+ spin_unlock(&clp->cl_lock);
- status = nfserr_jukebox;
- /* allocate struct nfsd4_session and slot table pointers in one piece */
- slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *);
- new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL);
- if (!new)
- goto out;
+ unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
+ free_conn(c);
- memcpy(new, &tmp, sizeof(*new));
+ spin_lock(&clp->cl_lock);
+ }
+ spin_unlock(&clp->cl_lock);
+}
- /* allocate each struct nfsd4_slot and data cache in one piece */
- cachesize = slot_bytes(&new->se_fchannel);
- for (i = 0; i < new->se_fchannel.maxreqs; i++) {
- sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL);
- if (!sp)
- goto out_free;
- new->se_slots[i] = sp;
+void free_session(struct kref *kref)
+{
+ struct nfsd4_session *ses;
+ int mem;
+
+ ses = container_of(kref, struct nfsd4_session, se_ref);
+ nfsd4_del_conns(ses);
+ spin_lock(&nfsd_drc_lock);
+ mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
+ nfsd_drc_mem_used -= mem;
+ spin_unlock(&nfsd_drc_lock);
+ free_session_slots(ses);
+ kfree(ses);
+}
+
+static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
+{
+ struct nfsd4_session *new;
+ struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
+ int numslots, slotsize;
+ int status;
+ int idx;
+
+ /*
+ * Note decreasing slot size below client's request may
+ * make it difficult for client to function correctly, whereas
+ * decreasing the number of slots will (just?) affect
+ * performance. When short on memory we therefore prefer to
+ * decrease number of slots instead of their size.
+ */
+ slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
+ numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
+
+ new = alloc_session(slotsize, numslots);
+ if (!new) {
+ nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
+ return NULL;
}
+ init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
new->se_client = clp;
gen_sessionid(new);
- idx = hash_sessionid(&new->se_sessionid);
- memcpy(clp->cl_sessionid.data, new->se_sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
+ INIT_LIST_HEAD(&new->se_conns);
+
+ new->se_cb_seq_nr = 1;
new->se_flags = cses->flags;
+ new->se_cb_prog = cses->callback_prog;
kref_init(&new->se_ref);
+ idx = hash_sessionid(&new->se_sessionid);
spin_lock(&client_lock);
list_add(&new->se_hash, &sessionid_hashtbl[idx]);
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&client_lock);
- status = nfs_ok;
-out:
- return status;
-out_free:
- free_session_slots(new);
- kfree(new);
- goto out;
+ status = nfsd4_new_conn(rqstp, new);
+ /* whoops: benny points out, status is ignored! (err, or bogus) */
+ if (status) {
+ free_session(&new->se_ref);
+ return NULL;
+ }
+ if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) {
+ struct sockaddr *sa = svc_addr(rqstp);
+
+ clp->cl_cb_session = new;
+ clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
+ svc_xprt_get(rqstp->rq_xprt);
+ rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
+ clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
+ nfsd4_probe_callback(clp);
+ }
+ return new;
}
/* caller must hold client_lock */
@@ -731,21 +816,6 @@ unhash_session(struct nfsd4_session *ses)
list_del(&ses->se_perclnt);
}
-void
-free_session(struct kref *kref)
-{
- struct nfsd4_session *ses;
- int mem;
-
- ses = container_of(kref, struct nfsd4_session, se_ref);
- spin_lock(&nfsd_drc_lock);
- mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
- nfsd_drc_mem_used -= mem;
- spin_unlock(&nfsd_drc_lock);
- free_session_slots(ses);
- kfree(ses);
-}
-
/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
@@ -812,6 +882,13 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
static inline void
free_client(struct nfs4_client *clp)
{
+ while (!list_empty(&clp->cl_sessions)) {
+ struct nfsd4_session *ses;
+ ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
+ se_perclnt);
+ list_del(&ses->se_perclnt);
+ nfsd4_put_session(ses);
+ }
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
kfree(clp->cl_principal);
@@ -838,15 +915,12 @@ release_session_client(struct nfsd4_session *session)
static inline void
unhash_client_locked(struct nfs4_client *clp)
{
+ struct nfsd4_session *ses;
+
mark_client_expired(clp);
list_del(&clp->cl_lru);
- while (!list_empty(&clp->cl_sessions)) {
- struct nfsd4_session *ses;
- ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
- se_perclnt);
- unhash_session(ses);
- nfsd4_put_session(ses);
- }
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ list_del_init(&ses->se_hash);
}
static void
@@ -875,7 +949,7 @@ expire_client(struct nfs4_client *clp)
sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient);
release_openowner(sop);
}
- nfsd4_set_callback_client(clp, NULL);
+ nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
list_del(&clp->cl_idhash);
@@ -960,6 +1034,8 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
if (clp == NULL)
return NULL;
+ INIT_LIST_HEAD(&clp->cl_sessions);
+
princ = svc_gss_principal(rqstp);
if (princ) {
clp->cl_principal = kstrdup(princ, GFP_KERNEL);
@@ -976,8 +1052,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
- INIT_LIST_HEAD(&clp->cl_sessions);
INIT_LIST_HEAD(&clp->cl_lru);
+ spin_lock_init(&clp->cl_lock);
+ INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
clp->cl_time = get_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
@@ -986,7 +1063,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
clp->cl_flavor = rqstp->rq_flavor;
copy_cred(&clp->cl_cred, &rqstp->rq_cred);
gen_confirm(clp);
-
+ clp->cl_cb_session = NULL;
return clp;
}
@@ -1098,7 +1175,7 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval,
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
{
- struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
+ struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
unsigned short expected_family;
/* Currently, we only support tcp and tcp6 for the callback channel */
@@ -1111,24 +1188,23 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
else
goto out_err;
- cb->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
+ conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
se->se_callback_addr_len,
- (struct sockaddr *) &cb->cb_addr,
- sizeof(cb->cb_addr));
+ (struct sockaddr *)&conn->cb_addr,
+ sizeof(conn->cb_addr));
- if (!cb->cb_addrlen || cb->cb_addr.ss_family != expected_family)
+ if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
goto out_err;
- if (cb->cb_addr.ss_family == AF_INET6)
- ((struct sockaddr_in6 *) &cb->cb_addr)->sin6_scope_id = scopeid;
+ if (conn->cb_addr.ss_family == AF_INET6)
+ ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
- cb->cb_minorversion = 0;
- cb->cb_prog = se->se_callback_prog;
- cb->cb_ident = se->se_callback_ident;
+ conn->cb_prog = se->se_callback_prog;
+ conn->cb_ident = se->se_callback_ident;
return;
out_err:
- cb->cb_addr.ss_family = AF_UNSPEC;
- cb->cb_addrlen = 0;
+ conn->cb_addr.ss_family = AF_UNSPEC;
+ conn->cb_addrlen = 0;
dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
"will not receive delegations\n",
clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
@@ -1415,7 +1491,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
{
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
+ struct nfsd4_session *new;
struct nfsd4_clid_slot *cs_slot = NULL;
+ bool confirm_me = false;
int status = 0;
nfs4_lock_state();
@@ -1438,7 +1516,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cs_slot->sl_seqid, cr_ses->seqid);
goto out;
}
- cs_slot->sl_seqid++;
} else if (unconf) {
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
@@ -1451,25 +1528,10 @@ nfsd4_create_session(struct svc_rqst *rqstp,
if (status) {
/* an unconfirmed replay returns misordered */
status = nfserr_seq_misordered;
- goto out_cache;
+ goto out;
}
- cs_slot->sl_seqid++; /* from 0 to 1 */
- move_to_confirmed(unconf);
-
- if (cr_ses->flags & SESSION4_BACK_CHAN) {
- unconf->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
- svc_xprt_get(rqstp->rq_xprt);
- rpc_copy_addr(
- (struct sockaddr *)&unconf->cl_cb_conn.cb_addr,
- sa);
- unconf->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
- unconf->cl_cb_conn.cb_minorversion =
- cstate->minorversion;
- unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog;
- unconf->cl_cb_seq_nr = 1;
- nfsd4_probe_callback(unconf, &unconf->cl_cb_conn);
- }
+ confirm_me = true;
conf = unconf;
} else {
status = nfserr_stale_clientid;
@@ -1477,22 +1539,30 @@ nfsd4_create_session(struct svc_rqst *rqstp,
}
/*
+ * XXX: we should probably set this at creation time, and check
+ * for consistent minorversion use throughout:
+ */
+ conf->cl_minorversion = 1;
+ /*
* We do not support RDMA or persistent sessions
*/
cr_ses->flags &= ~SESSION4_PERSIST;
cr_ses->flags &= ~SESSION4_RDMA;
- status = alloc_init_session(rqstp, conf, cr_ses);
- if (status)
+ status = nfserr_jukebox;
+ new = alloc_init_session(rqstp, conf, cr_ses);
+ if (!new)
goto out;
-
- memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data,
+ status = nfs_ok;
+ memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
+ cs_slot->sl_seqid++;
cr_ses->seqid = cs_slot->sl_seqid;
-out_cache:
/* cache solo and embedded create sessions under the state lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
+ if (confirm_me)
+ move_to_confirmed(conf);
out:
nfs4_unlock_state();
dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1546,8 +1616,11 @@ nfsd4_destroy_session(struct svc_rqst *r,
nfs4_lock_state();
/* wait for callbacks */
- nfsd4_set_callback_client(ses->se_client, NULL);
+ nfsd4_shutdown_callback(ses->se_client);
nfs4_unlock_state();
+
+ nfsd4_del_conns(ses);
+
nfsd4_put_session(ses);
status = nfs_ok;
out:
@@ -1555,6 +1628,36 @@ out:
return status;
}
+static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
+{
+ struct nfsd4_conn *c;
+
+ list_for_each_entry(c, &s->se_conns, cn_persession) {
+ if (c->cn_xprt == xpt) {
+ return c;
+ }
+ }
+ return NULL;
+}
+
+static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
+{
+ struct nfs4_client *clp = ses->se_client;
+ struct nfsd4_conn *c;
+
+ spin_lock(&clp->cl_lock);
+ c = __nfsd4_find_conn(new->cn_xprt, ses);
+ if (c) {
+ spin_unlock(&clp->cl_lock);
+ free_conn(new);
+ return;
+ }
+ __nfsd4_hash_conn(new, ses);
+ spin_unlock(&clp->cl_lock);
+ nfsd4_register_conn(new);
+ return;
+}
+
__be32
nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -1563,11 +1666,20 @@ nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_session *session;
struct nfsd4_slot *slot;
+ struct nfsd4_conn *conn;
int status;
if (resp->opcnt != 1)
return nfserr_sequence_pos;
+ /*
+ * Will be either used or freed by nfsd4_sequence_check_conn
+ * below.
+ */
+ conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
+ if (!conn)
+ return nfserr_jukebox;
+
spin_lock(&client_lock);
status = nfserr_badsession;
session = find_in_sessionid_hashtbl(&seq->sessionid);
@@ -1599,6 +1711,9 @@ nfsd4_sequence(struct svc_rqst *rqstp,
if (status)
goto out;
+ nfsd4_sequence_check_conn(conn, session);
+ conn = NULL;
+
/* Success! bump slot seqid */
slot->sl_inuse = true;
slot->sl_seqid = seq->seqid;
@@ -1613,6 +1728,7 @@ out:
nfsd4_get_session(cstate->session);
atomic_inc(&session->se_client->cl_refcount);
}
+ kfree(conn);
spin_unlock(&client_lock);
dprintk("%s: return %d\n", __func__, ntohl(status));
return status;
@@ -1747,6 +1863,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
gen_clid(new);
}
+ /*
+ * XXX: we should probably set this at creation time, and check
+ * for consistent minorversion use throughout:
+ */
+ new->cl_minorversion = 0;
gen_callback(new, setclid, rpc_get_scope_id(sa));
add_to_unconfirmed(new, strhashval);
setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
@@ -1807,7 +1928,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
status = nfserr_clid_inuse;
else {
atomic_set(&conf->cl_cb_set, 0);
- nfsd4_probe_callback(conf, &unconf->cl_cb_conn);
+ nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+ nfsd4_probe_callback(conf);
expire_client(unconf);
status = nfs_ok;
@@ -1841,7 +1963,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
move_to_confirmed(unconf);
conf = unconf;
- nfsd4_probe_callback(conf, &conf->cl_cb_conn);
+ nfsd4_probe_callback(conf);
status = nfs_ok;
}
} else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
@@ -2492,7 +2614,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
struct nfs4_delegation *dp;
struct nfs4_stateowner *sop = stp->st_stateowner;
int cb_up = atomic_read(&sop->so_client->cl_cb_set);
- struct file_lock fl, *flp = &fl;
+ struct file_lock *fl;
int status, flag = 0;
flag = NFS4_OPEN_DELEGATE_NONE;
@@ -2526,20 +2648,24 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
flag = NFS4_OPEN_DELEGATE_NONE;
goto out;
}
- locks_init_lock(&fl);
- fl.fl_lmops = &nfsd_lease_mng_ops;
- fl.fl_flags = FL_LEASE;
- fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
- fl.fl_end = OFFSET_MAX;
- fl.fl_owner = (fl_owner_t)dp;
- fl.fl_file = find_readable_file(stp->st_file);
- BUG_ON(!fl.fl_file);
- fl.fl_pid = current->tgid;
+ status = -ENOMEM;
+ fl = locks_alloc_lock();
+ if (!fl)
+ goto out;
+ locks_init_lock(fl);
+ fl->fl_lmops = &nfsd_lease_mng_ops;
+ fl->fl_flags = FL_LEASE;
+ fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+ fl->fl_end = OFFSET_MAX;
+ fl->fl_owner = (fl_owner_t)dp;
+ fl->fl_file = find_readable_file(stp->st_file);
+ BUG_ON(!fl->fl_file);
+ fl->fl_pid = current->tgid;
/* vfs_setlease checks to see if delegation should be handed out.
* the lock_manager callbacks fl_mylease and fl_change are used
*/
- if ((status = vfs_setlease(fl.fl_file, fl.fl_type, &flp))) {
+ if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
dprintk("NFSD: setlease failed [%d], no delegation\n", status);
unhash_delegation(dp);
flag = NFS4_OPEN_DELEGATE_NONE;
@@ -2944,7 +3070,11 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
if (STALE_STATEID(stateid))
goto out;
- status = nfserr_bad_stateid;
+ /*
+ * We assume that any stateid that has the current boot time,
+ * but that we can't find, is expired:
+ */
+ status = nfserr_expired;
if (is_delegation_stateid(stateid)) {
dp = find_delegation_stateid(ino, stateid);
if (!dp)
@@ -2964,6 +3094,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
stp = find_stateid(stateid, flags);
if (!stp)
goto out;
+ status = nfserr_bad_stateid;
if (nfs4_check_fh(current_fh, stp))
goto out;
if (!stp->st_stateowner->so_confirmed)
@@ -3038,8 +3169,9 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
* a replayed close:
*/
sop = search_close_lru(stateid->si_stateownerid, flags);
+ /* It's not stale; let's assume it's expired: */
if (sop == NULL)
- return nfserr_bad_stateid;
+ return nfserr_expired;
*sopp = sop;
goto check_replay;
}
@@ -3304,6 +3436,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_bad_stateid;
if (!is_delegation_stateid(stateid))
goto out;
+ status = nfserr_expired;
dp = find_delegation_stateid(inode, stateid);
if (!dp)
goto out;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 1a468bbd330f..f35a94a04026 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1805,19 +1805,23 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
goto out_nfserr;
}
}
- if ((buflen -= 16) < 0)
- goto out_resource;
- if (unlikely(bmval2)) {
+ if (bmval2) {
+ if ((buflen -= 16) < 0)
+ goto out_resource;
WRITE32(3);
WRITE32(bmval0);
WRITE32(bmval1);
WRITE32(bmval2);
- } else if (likely(bmval1)) {
+ } else if (bmval1) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
WRITE32(2);
WRITE32(bmval0);
WRITE32(bmval1);
} else {
+ if ((buflen -= 8) < 0)
+ goto out_resource;
WRITE32(1);
WRITE32(bmval0);
}
@@ -1828,15 +1832,17 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
u32 word1 = nfsd_suppattrs1(minorversion);
u32 word2 = nfsd_suppattrs2(minorversion);
- if ((buflen -= 12) < 0)
- goto out_resource;
if (!aclsupport)
word0 &= ~FATTR4_WORD0_ACL;
if (!word2) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
WRITE32(2);
WRITE32(word0);
WRITE32(word1);
} else {
+ if ((buflen -= 16) < 0)
+ goto out_resource;
WRITE32(3);
WRITE32(word0);
WRITE32(word1);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 06fa87e52e82..d6dc3f61f8ba 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -22,6 +22,7 @@
*/
enum {
NFSD_Root = 1,
+#ifdef CONFIG_NFSD_DEPRECATED
NFSD_Svc,
NFSD_Add,
NFSD_Del,
@@ -29,6 +30,7 @@ enum {
NFSD_Unexport,
NFSD_Getfd,
NFSD_Getfs,
+#endif
NFSD_List,
NFSD_Export_features,
NFSD_Fh,
@@ -54,6 +56,7 @@ enum {
/*
* write() for these nodes.
*/
+#ifdef CONFIG_NFSD_DEPRECATED
static ssize_t write_svc(struct file *file, char *buf, size_t size);
static ssize_t write_add(struct file *file, char *buf, size_t size);
static ssize_t write_del(struct file *file, char *buf, size_t size);
@@ -61,6 +64,7 @@ static ssize_t write_export(struct file *file, char *buf, size_t size);
static ssize_t write_unexport(struct file *file, char *buf, size_t size);
static ssize_t write_getfd(struct file *file, char *buf, size_t size);
static ssize_t write_getfs(struct file *file, char *buf, size_t size);
+#endif
static ssize_t write_filehandle(struct file *file, char *buf, size_t size);
static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size);
static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size);
@@ -76,6 +80,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
#endif
static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+#ifdef CONFIG_NFSD_DEPRECATED
[NFSD_Svc] = write_svc,
[NFSD_Add] = write_add,
[NFSD_Del] = write_del,
@@ -83,6 +88,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_Unexport] = write_unexport,
[NFSD_Getfd] = write_getfd,
[NFSD_Getfs] = write_getfs,
+#endif
[NFSD_Fh] = write_filehandle,
[NFSD_FO_UnlockIP] = write_unlock_ip,
[NFSD_FO_UnlockFS] = write_unlock_fs,
@@ -121,6 +127,14 @@ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *bu
static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
{
+ static int warned;
+ if (file->f_dentry->d_name.name[0] == '.' && !warned) {
+ printk(KERN_INFO
+ "Warning: \"%s\" uses deprecated NFSD interface: %s."
+ " This will be removed in 2.6.40\n",
+ current->comm, file->f_dentry->d_name.name);
+ warned = 1;
+ }
if (! file->private_data) {
/* An attempt to read a transaction file without writing
* causes a 0-byte write so that the file can return
@@ -187,6 +201,7 @@ static const struct file_operations pool_stats_operations = {
* payload - write methods
*/
+#ifdef CONFIG_NFSD_DEPRECATED
/**
* write_svc - Start kernel's NFSD server
*
@@ -402,7 +417,7 @@ static ssize_t write_getfs(struct file *file, char *buf, size_t size)
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &in6);
- clp = auth_unix_lookup(&in6);
+ clp = auth_unix_lookup(&init_net, &in6);
if (!clp)
err = -EPERM;
else {
@@ -465,7 +480,7 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &in6);
- clp = auth_unix_lookup(&in6);
+ clp = auth_unix_lookup(&init_net, &in6);
if (!clp)
err = -EPERM;
else {
@@ -482,6 +497,7 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
out:
return err;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
/**
* write_unlock_ip - Release all locks used by a client
@@ -1000,12 +1016,12 @@ static ssize_t __write_ports_addxprt(char *buf)
if (err != 0)
return err;
- err = svc_create_xprt(nfsd_serv, transport,
+ err = svc_create_xprt(nfsd_serv, transport, &init_net,
PF_INET, port, SVC_SOCK_ANONYMOUS);
if (err < 0)
goto out_err;
- err = svc_create_xprt(nfsd_serv, transport,
+ err = svc_create_xprt(nfsd_serv, transport, &init_net,
PF_INET6, port, SVC_SOCK_ANONYMOUS);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_close;
@@ -1356,6 +1372,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
{
static struct tree_descr nfsd_files[] = {
+#ifdef CONFIG_NFSD_DEPRECATED
[NFSD_Svc] = {".svc", &transaction_ops, S_IWUSR},
[NFSD_Add] = {".add", &transaction_ops, S_IWUSR},
[NFSD_Del] = {".del", &transaction_ops, S_IWUSR},
@@ -1363,6 +1380,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_Unexport] = {".unexport", &transaction_ops, S_IWUSR},
[NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
+#endif
[NFSD_List] = {"exports", &exports_operations, S_IRUGO},
[NFSD_Export_features] = {"export_features",
&export_features_operations, S_IRUGO},
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index b76ac3a82e39..6b641cf2c19a 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -249,7 +249,7 @@ extern time_t nfsd4_grace;
#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
-#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */
+#define NFSD_LAUNDROMAT_MINTIMEOUT 1 /* seconds */
/*
* The following attributes are currently not supported by the NFSv4 server:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index e2c43464f237..2bae1d86f5f2 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -16,6 +16,7 @@
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
#include <linux/seq_file.h>
+#include <net/net_namespace.h>
#include "nfsd.h"
#include "cache.h"
#include "vfs.h"
@@ -186,12 +187,12 @@ static int nfsd_init_socks(int port)
if (!list_empty(&nfsd_serv->sv_permsocks))
return 0;
- error = svc_create_xprt(nfsd_serv, "udp", PF_INET, port,
+ error = svc_create_xprt(nfsd_serv, "udp", &init_net, PF_INET, port,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
- error = svc_create_xprt(nfsd_serv, "tcp", PF_INET, port,
+ error = svc_create_xprt(nfsd_serv, "tcp", &init_net, PF_INET, port,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 322518c88e4b..39adc27b0685 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -35,6 +35,7 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
+#include <linux/sunrpc/svc_xprt.h>
#include <linux/nfsd/nfsfh.h>
#include "nfsfh.h"
@@ -64,19 +65,12 @@ typedef struct {
(s)->si_fileid, \
(s)->si_generation
-struct nfsd4_cb_sequence {
- /* args/res */
- u32 cbs_minorversion;
- struct nfs4_client *cbs_clp;
-};
-
-struct nfs4_rpc_args {
- void *args_op;
- struct nfsd4_cb_sequence args_seq;
-};
-
struct nfsd4_callback {
- struct nfs4_rpc_args cb_args;
+ void *cb_op;
+ struct nfs4_client *cb_clp;
+ u32 cb_minorversion;
+ struct rpc_message cb_msg;
+ const struct rpc_call_ops *cb_ops;
struct work_struct cb_work;
};
@@ -91,7 +85,6 @@ struct nfs4_delegation {
u32 dl_type;
time_t dl_time;
/* For recall: */
- u32 dl_ident;
stateid_t dl_stateid;
struct knfsd_fh dl_fh;
int dl_retries;
@@ -103,8 +96,8 @@ struct nfs4_cb_conn {
/* SETCLIENTID info */
struct sockaddr_storage cb_addr;
size_t cb_addrlen;
- u32 cb_prog;
- u32 cb_minorversion;
+ u32 cb_prog; /* used only in 4.0 case;
+ per-session otherwise */
u32 cb_ident; /* minorversion 0 only */
struct svc_xprt *cb_xprt; /* minorversion 1 only */
};
@@ -160,6 +153,15 @@ struct nfsd4_clid_slot {
struct nfsd4_create_session sl_cr_ses;
};
+struct nfsd4_conn {
+ struct list_head cn_persession;
+ struct svc_xprt *cn_xprt;
+ struct svc_xpt_user cn_xpt_user;
+ struct nfsd4_session *cn_session;
+/* CDFC4_FORE, CDFC4_BACK: */
+ unsigned char cn_flags;
+};
+
struct nfsd4_session {
struct kref se_ref;
struct list_head se_hash; /* hash by sessionid */
@@ -169,6 +171,9 @@ struct nfsd4_session {
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
struct nfsd4_channel_attrs se_bchannel;
+ struct list_head se_conns;
+ u32 se_cb_prog;
+ u32 se_cb_seq_nr;
struct nfsd4_slot *se_slots[]; /* forward channel slots */
};
@@ -221,24 +226,32 @@ struct nfs4_client {
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
u32 cl_firststate; /* recovery dir creation */
+ u32 cl_minorversion;
/* for v4.0 and v4.1 callbacks: */
struct nfs4_cb_conn cl_cb_conn;
+#define NFSD4_CLIENT_CB_UPDATE 1
+#define NFSD4_CLIENT_KILL 2
+ unsigned long cl_cb_flags;
struct rpc_clnt *cl_cb_client;
+ u32 cl_cb_ident;
atomic_t cl_cb_set;
+ struct nfsd4_callback cl_cb_null;
+ struct nfsd4_session *cl_cb_session;
+
+ /* for all client information that callback code might need: */
+ spinlock_t cl_lock;
/* for nfs41 */
struct list_head cl_sessions;
struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */
u32 cl_exchange_flags;
- struct nfs4_sessionid cl_sessionid;
/* number of rpc's in progress over an associated session: */
atomic_t cl_refcount;
/* for nfs41 callbacks */
/* We currently support a single back channel with a single slot */
unsigned long cl_cb_slot_busy;
- u32 cl_cb_seq_nr;
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
};
@@ -440,12 +453,13 @@ extern int nfs4_in_grace(void);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
extern void nfs4_free_stateowner(struct kref *kref);
extern int set_callback_cred(void);
-extern void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
+extern void nfsd4_probe_callback(struct nfs4_client *clp);
+extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
extern void nfsd4_do_callback_rpc(struct work_struct *);
extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
extern int nfsd4_create_callback_queue(void);
extern void nfsd4_destroy_callback_queue(void);
-extern void nfsd4_set_callback_client(struct nfs4_client *, struct rpc_clnt *);
+extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
extern void nfsd4_init_recdir(char *recdir_name);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 661a6cf8e826..184938fcff04 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -281,23 +281,13 @@ commit_metadata(struct svc_fh *fhp)
{
struct inode *inode = fhp->fh_dentry->d_inode;
const struct export_operations *export_ops = inode->i_sb->s_export_op;
- int error = 0;
if (!EX_ISSYNC(fhp->fh_export))
return 0;
- if (export_ops->commit_metadata) {
- error = export_ops->commit_metadata(inode);
- } else {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0, /* metadata only */
- };
-
- error = sync_inode(inode, &wbc);
- }
-
- return error;
+ if (export_ops->commit_metadata)
+ return export_ops->commit_metadata(inode);
+ return sync_inode_metadata(inode, 1);
}
/*
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 185d1607cb00..6e9557ecf161 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -207,7 +207,7 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
inode->i_ctime = CURRENT_TIME;
inode_inc_link_count(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
err = nilfs_add_nondir(dentry, inode);
if (!err)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index d926af626177..687d090cea34 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1609,7 +1609,7 @@ nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
kunmap_atomic(kaddr, KM_USER0);
if (!TestSetPageWriteback(clone_page))
- inc_zone_page_state(clone_page, NR_WRITEBACK);
+ account_page_writeback(clone_page);
unlock_page(clone_page);
return 0;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 36802420d69a..4498a208df94 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -88,8 +88,6 @@ void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
- bool send = false;
- bool should_update_children = false;
if (!dentry)
dentry = path->dentry;
@@ -97,29 +95,12 @@ void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
- spin_lock(&dentry->d_lock);
- parent = dentry->d_parent;
+ parent = dget_parent(dentry);
p_inode = parent->d_inode;
- if (fsnotify_inode_watches_children(p_inode)) {
- if (p_inode->i_fsnotify_mask & mask) {
- dget(parent);
- send = true;
- }
- } else {
- /*
- * The parent doesn't care about events on it's children but
- * at least one child thought it did. We need to run all the
- * children and update their d_flags to let them know p_inode
- * doesn't care about them any more.
- */
- dget(parent);
- should_update_children = true;
- }
-
- spin_unlock(&dentry->d_lock);
-
- if (send) {
+ if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+ __fsnotify_update_child_dentry_flags(p_inode);
+ else if (p_inode->i_fsnotify_mask & mask) {
/* we are notifying a parent so come up with the new mask which
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
@@ -130,13 +111,9 @@ void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
else
fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
dentry->d_name.name, 0);
- dput(parent);
}
- if (unlikely(should_update_children)) {
- __fsnotify_update_child_dentry_flags(p_inode);
- dput(parent);
- }
+ dput(parent);
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 33297c005060..21ed10660b80 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -240,6 +240,7 @@ void fsnotify_unmount_inodes(struct list_head *list)
{
struct inode *inode, *next_i, *need_iput = NULL;
+ spin_lock(&inode_lock);
list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
struct inode *need_iput_tmp;
@@ -297,4 +298,5 @@ void fsnotify_unmount_inodes(struct list_head *list)
spin_lock(&inode_lock);
}
+ spin_unlock(&inode_lock);
}
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 19c5180f8a28..d3fbe5730bfc 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2911,8 +2911,8 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
goto unl_upcase_iput_tmp_ino_err_out_now;
}
if ((sb->s_root = d_alloc_root(vol->root_ino))) {
- /* We increment i_count simulating an ntfs_iget(). */
- atomic_inc(&vol->root_ino->i_count);
+ /* We grab a reference, simulating an ntfs_iget(). */
+ ihold(vol->root_ino);
ntfs_debug("Exiting, status successful.");
/* Release the default upcase if it has no users. */
mutex_lock(&ntfs_lock);
@@ -3021,21 +3021,6 @@ iput_tmp_ino_err_out_now:
if (vol->mft_ino && vol->mft_ino != tmp_ino)
iput(vol->mft_ino);
vol->mft_ino = NULL;
- /*
- * This is needed to get ntfs_clear_extent_inode() called for each
- * inode we have ever called ntfs_iget()/iput() on, otherwise we A)
- * leak resources and B) a subsequent mount fails automatically due to
- * ntfs_iget() never calling down into our ntfs_read_locked_inode()
- * method again... FIXME: Do we need to do this twice now because of
- * attribute inodes? I think not, so leave as is for now... (AIA)
- */
- if (invalidate_inodes(sb)) {
- ntfs_error(sb, "Busy inodes left. This is most likely a NTFS "
- "driver bug.");
- /* Copied from fs/super.c. I just love this message. (-; */
- printk("NTFS: Busy inodes after umount. Self-destruct in 5 "
- "seconds. Have a nice day...\n");
- }
/* Errors at this stage are irrelevant. */
err_out_now:
sb->s_fs_info = NULL;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 5cfeee118158..f1e962cb3b73 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -165,7 +165,7 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
* ocfs2 never allocates in this function - the only time we
* need to use BH_New is when we're extending i_size on a file
* system which doesn't support holes, in which case BH_New
- * allows block_prepare_write() to zero.
+ * allows __block_write_begin() to zero.
*
* If we see this on a sparse file system, then a truncate has
* raced us and removed the cluster. In this case, we clear
@@ -407,21 +407,6 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
return ret;
}
-/*
- * This is called from ocfs2_write_zero_page() which has handled it's
- * own cluster locking and has ensured allocation exists for those
- * blocks to be written.
- */
-int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
- unsigned from, unsigned to)
-{
- int ret;
-
- ret = block_prepare_write(page, from, to, ocfs2_get_block);
-
- return ret;
-}
-
/* Taken from ext3. We don't necessarily need the full blown
* functionality yet, but IMHO it's better to cut and paste the whole
* thing so we can avoid introducing our own bugs (and easily pick up
@@ -732,7 +717,7 @@ static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
}
/*
- * Some of this taken from block_prepare_write(). We already have our
+ * Some of this taken from __block_write_begin(). We already have our
* mapping by now though, and the entire write will be allocating or
* it won't, so not much need to use BH_New.
*
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 7606f663da6d..76bfdfda691a 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,9 +22,6 @@
#ifndef OCFS2_AOPS_H
#define OCFS2_AOPS_H
-int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
- unsigned from, unsigned to);
-
handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
unsigned from,
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index a7ebd9d42dc8..75e115f1bd73 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -400,6 +400,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
if (inode) {
ip = DLMFS_I(inode);
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
@@ -425,6 +426,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
if (!inode)
return NULL;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 1ca6867935bb..77b4c04a2809 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -796,13 +796,12 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
block_end = block_start + (1 << inode->i_blkbits);
/*
- * block_start is block-aligned. Bump it by one to
- * force ocfs2_{prepare,commit}_write() to zero the
+ * block_start is block-aligned. Bump it by one to force
+ * __block_write_begin and block_commit_write to zero the
* whole block.
*/
- ret = ocfs2_prepare_write_nolock(inode, page,
- block_start + 1,
- block_start + 1);
+ ret = __block_write_begin(page, block_start + 1, 0,
+ ocfs2_get_block);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index e7bde21149ae..ff5744e1e36f 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -742,7 +742,7 @@ static int ocfs2_link(struct dentry *old_dentry,
goto out_commit;
}
- atomic_inc(&inode->i_count);
+ ihold(inode);
dentry->d_op = &ocfs2_dentry_ops;
d_instantiate(dentry, inode);
diff --git a/fs/pipe.c b/fs/pipe.c
index 37eb1ebeaa90..d2d7566ce68e 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -954,6 +954,8 @@ static struct inode * get_pipe_inode(void)
if (!inode)
goto fail_inode;
+ inode->i_ino = get_next_ino();
+
pipe = alloc_pipe_info(inode);
if (!pipe)
goto fail_iput;
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 50f8f0600f06..6a0068841d96 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -33,8 +33,8 @@ config PROC_KCORE
depends on PROC_FS && MMU
config PROC_VMCORE
- bool "/proc/vmcore support (EXPERIMENTAL)"
- depends on PROC_FS && CRASH_DUMP
+ bool "/proc/vmcore support"
+ depends on PROC_FS && CRASH_DUMP
default y
help
Exports the dump image of crashed kernel in ELF format.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dc5d5f51f3fe..f3d02ca461ec 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -226,7 +226,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
{
struct mm_struct *mm;
- if (mutex_lock_killable(&task->cred_guard_mutex))
+ if (mutex_lock_killable(&task->signal->cred_guard_mutex))
return NULL;
mm = get_task_mm(task);
@@ -235,7 +235,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
mmput(mm);
mm = NULL;
}
- mutex_unlock(&task->cred_guard_mutex);
+ mutex_unlock(&task->signal->cred_guard_mutex);
return mm;
}
@@ -771,6 +771,8 @@ static const struct file_operations proc_single_file_operations = {
static int mem_open(struct inode* inode, struct file* file)
{
file->private_data = (void*)((long)current->self_exec_id);
+ /* OK to pass negative loff_t, we can catch out-of-range */
+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
return 0;
}
@@ -1023,28 +1025,47 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
- if (copy_from_user(buffer, buf, count))
- return -EFAULT;
+ if (copy_from_user(buffer, buf, count)) {
+ err = -EFAULT;
+ goto out;
+ }
err = strict_strtol(strstrip(buffer), 0, &oom_adjust);
if (err)
- return -EINVAL;
+ goto out;
if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
- oom_adjust != OOM_DISABLE)
- return -EINVAL;
+ oom_adjust != OOM_DISABLE) {
+ err = -EINVAL;
+ goto out;
+ }
task = get_proc_task(file->f_path.dentry->d_inode);
- if (!task)
- return -ESRCH;
+ if (!task) {
+ err = -ESRCH;
+ goto out;
+ }
+
+ task_lock(task);
+ if (!task->mm) {
+ err = -EINVAL;
+ goto err_task_lock;
+ }
+
if (!lock_task_sighand(task, &flags)) {
- put_task_struct(task);
- return -ESRCH;
+ err = -ESRCH;
+ goto err_task_lock;
}
if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
- unlock_task_sighand(task, &flags);
- put_task_struct(task);
- return -EACCES;
+ err = -EACCES;
+ goto err_sighand;
+ }
+
+ if (oom_adjust != task->signal->oom_adj) {
+ if (oom_adjust == OOM_DISABLE)
+ atomic_inc(&task->mm->oom_disable_count);
+ if (task->signal->oom_adj == OOM_DISABLE)
+ atomic_dec(&task->mm->oom_disable_count);
}
/*
@@ -1065,10 +1086,13 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
else
task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
-OOM_DISABLE;
+err_sighand:
unlock_task_sighand(task, &flags);
+err_task_lock:
+ task_unlock(task);
put_task_struct(task);
-
- return count;
+out:
+ return err < 0 ? err : count;
}
static const struct file_operations proc_oom_adjust_operations = {
@@ -1109,30 +1133,49 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
- if (copy_from_user(buffer, buf, count))
- return -EFAULT;
+ if (copy_from_user(buffer, buf, count)) {
+ err = -EFAULT;
+ goto out;
+ }
err = strict_strtol(strstrip(buffer), 0, &oom_score_adj);
if (err)
- return -EINVAL;
+ goto out;
if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
- oom_score_adj > OOM_SCORE_ADJ_MAX)
- return -EINVAL;
+ oom_score_adj > OOM_SCORE_ADJ_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
task = get_proc_task(file->f_path.dentry->d_inode);
- if (!task)
- return -ESRCH;
+ if (!task) {
+ err = -ESRCH;
+ goto out;
+ }
+
+ task_lock(task);
+ if (!task->mm) {
+ err = -EINVAL;
+ goto err_task_lock;
+ }
+
if (!lock_task_sighand(task, &flags)) {
- put_task_struct(task);
- return -ESRCH;
+ err = -ESRCH;
+ goto err_task_lock;
}
+
if (oom_score_adj < task->signal->oom_score_adj &&
!capable(CAP_SYS_RESOURCE)) {
- unlock_task_sighand(task, &flags);
- put_task_struct(task);
- return -EACCES;
+ err = -EACCES;
+ goto err_sighand;
}
+ if (oom_score_adj != task->signal->oom_score_adj) {
+ if (oom_score_adj == OOM_SCORE_ADJ_MIN)
+ atomic_inc(&task->mm->oom_disable_count);
+ if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ atomic_dec(&task->mm->oom_disable_count);
+ }
task->signal->oom_score_adj = oom_score_adj;
/*
* Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
@@ -1143,9 +1186,13 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
else
task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) /
OOM_SCORE_ADJ_MAX;
+err_sighand:
unlock_task_sighand(task, &flags);
+err_task_lock:
+ task_unlock(task);
put_task_struct(task);
- return count;
+out:
+ return err < 0 ? err : count;
}
static const struct file_operations proc_oom_score_adj_operations = {
@@ -1601,6 +1648,7 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
/* Common stuff */
ei = PROC_I(inode);
+ inode->i_ino = get_next_ino();
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode->i_op = &proc_def_inode_operations;
@@ -2306,14 +2354,14 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
goto out_free;
/* Guard against adverse ptrace interaction */
- length = mutex_lock_interruptible(&task->cred_guard_mutex);
+ length = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
if (length < 0)
goto out_free;
length = security_setprocattr(task,
(char*)file->f_path.dentry->d_name.name,
(void*)page, count);
- mutex_unlock(&task->cred_guard_mutex);
+ mutex_unlock(&task->signal->cred_guard_mutex);
out_free:
free_page((unsigned long) page);
out:
@@ -2547,6 +2595,7 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
/* Initialize the inode */
ei = PROC_I(inode);
+ inode->i_ino = get_next_ino();
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
/*
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 2fc52552271d..b652cb00906b 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -23,6 +23,8 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
if (!inode)
goto out;
+ inode->i_ino = get_next_ino();
+
sysctl_head_get(head);
ei = PROC_I(inode);
ei->sysctl = head;
diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index 1807c2419f17..37994737c983 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -10,13 +10,13 @@ static int show_softirqs(struct seq_file *p, void *v)
{
int i, j;
- seq_printf(p, " ");
+ seq_printf(p, " ");
for_each_possible_cpu(i)
seq_printf(p, "CPU%-8d", i);
seq_printf(p, "\n");
for (i = 0; i < NR_SOFTIRQS; i++) {
- seq_printf(p, "%8s:", softirq_to_name[i]);
+ seq_printf(p, "%12s:", softirq_to_name[i]);
for_each_possible_cpu(j)
seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
seq_printf(p, "\n");
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index bf31b03fc275..e15a19c93bae 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -31,7 +31,6 @@ static int show_stat(struct seq_file *p, void *v)
u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
struct timespec boottime;
- unsigned int per_irq_sum;
user = nice = system = idle = iowait =
irq = softirq = steal = cputime64_zero;
@@ -52,9 +51,7 @@ static int show_stat(struct seq_file *p, void *v)
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
guest_nice = cputime64_add(guest_nice,
kstat_cpu(i).cpustat.guest_nice);
- for_each_irq_nr(j) {
- sum += kstat_irqs_cpu(j, i);
- }
+ sum += kstat_cpu_irqs_sum(i);
sum += arch_irq_stat_cpu(i);
for (j = 0; j < NR_SOFTIRQS; j++) {
@@ -110,13 +107,8 @@ static int show_stat(struct seq_file *p, void *v)
seq_printf(p, "intr %llu", (unsigned long long)sum);
/* sum again ? it could be updated? */
- for_each_irq_nr(j) {
- per_irq_sum = 0;
- for_each_possible_cpu(i)
- per_irq_sum += kstat_irqs_cpu(j, i);
-
- seq_printf(p, " %u", per_irq_sum);
- }
+ for_each_irq_nr(j)
+ seq_printf(p, " %u", kstat_irqs(j));
seq_printf(p,
"\nctxt %llu\n"
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 871e25ed0069..da6b01d70f01 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -327,6 +327,7 @@ struct mem_size_stats {
unsigned long private_clean;
unsigned long private_dirty;
unsigned long referenced;
+ unsigned long anonymous;
unsigned long swap;
u64 pss;
};
@@ -357,6 +358,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (!page)
continue;
+ if (PageAnon(page))
+ mss->anonymous += PAGE_SIZE;
+
mss->resident += PAGE_SIZE;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
@@ -410,6 +414,7 @@ static int show_smap(struct seq_file *m, void *v)
"Private_Clean: %8lu kB\n"
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
+ "Anonymous: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n",
@@ -421,6 +426,7 @@ static int show_smap(struct seq_file *m, void *v)
mss.private_clean >> 10,
mss.private_dirty >> 10,
mss.referenced >> 10,
+ mss.anonymous >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10);
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 3e21b1e2ad3a..880fd9884366 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -4,6 +4,7 @@
config QUOTA
bool "Quota support"
+ select QUOTACTL
help
If you say Y here, you will be able to set per user limits for disk
usage (also called disk quotas). Currently, it works for the
@@ -65,8 +66,7 @@ config QFMT_V2
config QUOTACTL
bool
- depends on XFS_QUOTA || QUOTA
- default y
+ default n
config QUOTACTL_COMPAT
bool
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index aad1316a977f..0fed41e6efcd 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1386,6 +1386,9 @@ static void __dquot_initialize(struct inode *inode, int type)
/* Avoid races with quotaoff() */
if (!sb_has_quota_active(sb, cnt))
continue;
+ /* We could race with quotaon or dqget() could have failed */
+ if (!got[cnt])
+ continue;
if (!inode->i_dquot[cnt]) {
inode->i_dquot[cnt] = got[cnt];
got[cnt] = NULL;
@@ -1736,6 +1739,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
qsize_t rsv_space = 0;
struct dquot *transfer_from[MAXQUOTAS] = {};
int cnt, ret = 0;
+ char is_valid[MAXQUOTAS] = {};
char warntype_to[MAXQUOTAS];
char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
@@ -1757,8 +1761,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ /*
+ * Skip changes for same uid or gid or for turned off quota-type.
+ */
if (!transfer_to[cnt])
continue;
+ /* Avoid races with quotaoff() */
+ if (!sb_has_quota_active(inode->i_sb, cnt))
+ continue;
+ is_valid[cnt] = 1;
transfer_from[cnt] = inode->i_dquot[cnt];
ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
if (ret)
@@ -1772,12 +1783,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
* Finally perform the needed transfer from transfer_from to transfer_to
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- /*
- * Skip changes for same uid or gid or for turned off quota-type.
- */
- if (!transfer_to[cnt])
+ if (!is_valid[cnt])
continue;
-
/* Due to IO error we might not have transfer_from[] structure */
if (transfer_from[cnt]) {
warntype_from_inodes[cnt] =
@@ -1801,18 +1808,19 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to);
- /* Pass back references to put */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- transfer_to[cnt] = transfer_from[cnt];
-warn:
flush_warnings(transfer_to, warntype_to);
flush_warnings(transfer_from, warntype_from_inodes);
flush_warnings(transfer_from, warntype_from_space);
- return ret;
+ /* Pass back references to put */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (is_valid[cnt])
+ transfer_to[cnt] = transfer_from[cnt];
+ return 0;
over_quota:
spin_unlock(&dq_data_lock);
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- goto warn;
+ flush_warnings(transfer_to, warntype_to);
+ return ret;
}
EXPORT_SYMBOL(__dquot_transfer);
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index a5ebae70dc6d..67fadb1ad2c1 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -58,6 +58,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
struct inode * inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_mapping->a_ops = &ramfs_aops;
inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
diff --git a/fs/read_write.c b/fs/read_write.c
index e757ef26e4ce..9cd9d148105d 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -31,6 +31,20 @@ const struct file_operations generic_ro_fops = {
EXPORT_SYMBOL(generic_ro_fops);
+static int
+__negative_fpos_check(struct file *file, loff_t pos, size_t count)
+{
+ /*
+ * pos or pos+count is negative here, check overflow.
+ * too big "count" will be caught in rw_verify_area().
+ */
+ if ((pos < 0) && (pos + count < pos))
+ return -EOVERFLOW;
+ if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+ return 0;
+ return -EINVAL;
+}
+
/**
* generic_file_llseek_unlocked - lockless generic llseek implementation
* @file: file structure to seek on
@@ -62,7 +76,9 @@ generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin)
break;
}
- if (offset < 0 || offset > inode->i_sb->s_maxbytes)
+ if (offset < 0 && __negative_fpos_check(file, offset, 0))
+ return -EINVAL;
+ if (offset > inode->i_sb->s_maxbytes)
return -EINVAL;
/* Special lock needed here? */
@@ -137,7 +153,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
offset += file->f_pos;
}
retval = -EINVAL;
- if (offset >= 0) {
+ if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
@@ -221,6 +237,7 @@ bad:
}
#endif
+
/*
* rw_verify_area doesn't like huge counts. We limit
* them to something that fits in "int" so that others
@@ -238,8 +255,11 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
if (unlikely((ssize_t) count < 0))
return retval;
pos = *ppos;
- if (unlikely((pos < 0) || (loff_t) (pos + count) < 0))
- return retval;
+ if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) {
+ retval = __negative_fpos_check(file, pos, count);
+ if (retval)
+ return retval;
+ }
if (unlikely(inode->i_flock && mandatory_lock(inode))) {
retval = locks_mandatory_area(
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index caa758377d66..41656d40dc5c 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -22,8 +22,6 @@
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
-int reiserfs_prepare_write(struct file *f, struct page *page,
- unsigned from, unsigned to);
void reiserfs_evict_inode(struct inode *inode)
{
@@ -165,7 +163,7 @@ inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
** but tail is still sitting in a direct item, and we can't write to
** it. So, look through this page, and check all the mapped buffers
** to make sure they have valid block numbers. Any that don't need
-** to be unmapped, so that block_prepare_write will correctly call
+** to be unmapped, so that __block_write_begin will correctly call
** reiserfs_get_block to convert the tail into an unformatted node
*/
static inline void fix_tail_page_for_writing(struct page *page)
@@ -439,13 +437,13 @@ static int reiserfs_bmap(struct inode *inode, sector_t block,
}
/* special version of get_block that is only used by grab_tail_page right
-** now. It is sent to block_prepare_write, and when you try to get a
+** now. It is sent to __block_write_begin, and when you try to get a
** block past the end of the file (or a block from a hole) it returns
-** -ENOENT instead of a valid buffer. block_prepare_write expects to
+** -ENOENT instead of a valid buffer. __block_write_begin expects to
** be able to do i/o on the buffers returned, unless an error value
** is also returned.
**
-** So, this allows block_prepare_write to be used for reading a single block
+** So, this allows __block_write_begin to be used for reading a single block
** in a page. Where it does not produce a valid page for holes, or past the
** end of the file. This turns out to be exactly what we need for reading
** tails for conversion.
@@ -558,11 +556,12 @@ static int convert_tail_for_hole(struct inode *inode,
**
** We must fix the tail page for writing because it might have buffers
** that are mapped, but have a block number of 0. This indicates tail
- ** data that has been read directly into the page, and block_prepare_write
- ** won't trigger a get_block in this case.
+ ** data that has been read directly into the page, and
+ ** __block_write_begin won't trigger a get_block in this case.
*/
fix_tail_page_for_writing(tail_page);
- retval = reiserfs_prepare_write(NULL, tail_page, tail_start, tail_end);
+ retval = __reiserfs_write_begin(tail_page, tail_start,
+ tail_end - tail_start);
if (retval)
goto unlock;
@@ -2033,7 +2032,7 @@ static int grab_tail_page(struct inode *inode,
/* start within the page of the last block in the file */
start = (offset / blocksize) * blocksize;
- error = block_prepare_write(page, start, offset,
+ error = __block_write_begin(page, start, offset - start,
reiserfs_get_block_create_0);
if (error)
goto unlock;
@@ -2438,7 +2437,7 @@ static int reiserfs_write_full_page(struct page *page,
/* from this point on, we know the buffer is mapped to a
* real block and not a direct item
*/
- if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+ if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else {
if (!trylock_buffer(bh)) {
@@ -2628,8 +2627,7 @@ static int reiserfs_write_begin(struct file *file,
return ret;
}
-int reiserfs_prepare_write(struct file *f, struct page *page,
- unsigned from, unsigned to)
+int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
{
struct inode *inode = page->mapping->host;
int ret;
@@ -2650,7 +2648,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
th->t_refcount++;
}
- ret = block_prepare_write(page, from, to, reiserfs_get_block);
+ ret = __block_write_begin(page, from, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info;
/* this gets a little ugly. If reiserfs_get_block returned an
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 5cbb81e134ac..adf22b485cea 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -160,8 +160,6 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
-int reiserfs_prepare_write(struct file *f, struct page *page,
- unsigned from, unsigned to);
/*
** reiserfs_unpack
** Function try to convert tail from direct item into indirect.
@@ -200,7 +198,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
}
/* we unpack by finding the page with the tail, and calling
- ** reiserfs_prepare_write on that page. This will force a
+ ** __reiserfs_write_begin on that page. This will force a
** reiserfs_get_block to unpack the tail for us.
*/
index = inode->i_size >> PAGE_CACHE_SHIFT;
@@ -210,7 +208,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
if (!page) {
goto out;
}
- retval = reiserfs_prepare_write(NULL, page, write_from, write_from);
+ retval = __reiserfs_write_begin(page, write_from, 0);
if (retval)
goto out_unlock;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index ee78d4a0086a..ba5f51ec3458 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -1156,7 +1156,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
inode->i_ctime = CURRENT_TIME_SEC;
reiserfs_update_sd(&th, inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
d_instantiate(dentry, inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
reiserfs_write_unlock(dir->i_sb);
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 8c4cf273c672..5d04a7828e7a 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -418,13 +418,11 @@ static inline __u32 xattr_hash(const char *msg, int len)
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
-int reiserfs_prepare_write(struct file *f, struct page *page,
- unsigned from, unsigned to);
static void update_ctime(struct inode *inode)
{
struct timespec now = current_fs_time(inode->i_sb);
- if (hlist_unhashed(&inode->i_hash) || !inode->i_nlink ||
+ if (inode_unhashed(inode) || !inode->i_nlink ||
timespec_equal(&inode->i_ctime, &now))
return;
@@ -532,8 +530,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
rxh->h_hash = cpu_to_le32(xahash);
}
- err = reiserfs_prepare_write(NULL, page, page_offset,
- page_offset + chunk + skip);
+ err = __reiserfs_write_begin(page, page_offset, chunk + skip);
if (!err) {
if (buffer)
memcpy(data + skip, buffer + buffer_pos, chunk);
diff --git a/fs/select.c b/fs/select.c
index 500a669f7790..b7b10aa30861 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -67,7 +67,7 @@ static long __estimate_accuracy(struct timespec *tv)
return slack;
}
-static long estimate_accuracy(struct timespec *tv)
+long select_estimate_accuracy(struct timespec *tv)
{
unsigned long ret;
struct timespec now;
@@ -417,7 +417,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
}
if (end_time && !timed_out)
- slack = estimate_accuracy(end_time);
+ slack = select_estimate_accuracy(end_time);
retval = 0;
for (;;) {
@@ -769,7 +769,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
}
if (end_time && !timed_out)
- slack = estimate_accuracy(end_time);
+ slack = select_estimate_accuracy(end_time);
for (;;) {
struct poll_list *walk;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 0e7cb1395a94..05d6b0e78c95 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -462,9 +462,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
if (size) {
char *p;
- spin_lock(&dcache_lock);
p = __d_path(path, root, buf, size);
- spin_unlock(&dcache_lock);
res = PTR_ERR(p);
if (!IS_ERR(p)) {
char *end = mangle_path(buf, p, esc);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 74047304b01a..492465b451dd 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -99,6 +99,16 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
#ifdef __ARCH_SI_TRAPNO
err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno);
#endif
+#ifdef BUS_MCEERR_AO
+ /*
+ * Other callers might not initialize the si_lsb field,
+ * so check explicitly for the right codes here.
+ */
+ if (kinfo->si_code == BUS_MCEERR_AR ||
+ kinfo->si_code == BUS_MCEERR_AO)
+ err |= __put_user((short) kinfo->si_addr_lsb,
+ &uinfo->ssi_addr_lsb);
+#endif
break;
case __SI_CHLD:
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index 00a70cab1f36..f678d421e541 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -406,21 +406,15 @@ void
smb_renew_times(struct dentry * dentry)
{
dget(dentry);
- spin_lock(&dentry->d_lock);
- for (;;) {
- struct dentry *parent;
+ dentry->d_time = jiffies;
- dentry->d_time = jiffies;
- if (IS_ROOT(dentry))
- break;
- parent = dentry->d_parent;
- dget(parent);
- spin_unlock(&dentry->d_lock);
+ while (!IS_ROOT(dentry)) {
+ struct dentry *parent = dget_parent(dentry);
dput(dentry);
dentry = parent;
- spin_lock(&dentry->d_lock);
+
+ dentry->d_time = jiffies;
}
- spin_unlock(&dentry->d_lock);
dput(dentry);
}
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 8fc5e50e142f..f6e9ee59757e 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -229,7 +229,6 @@ smb_invalidate_inodes(struct smb_sb_info *server)
{
VERBOSE("\n");
shrink_dcache_sb(SB_of(server));
- invalidate_inodes(SB_of(server));
}
/*
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index 71c29b6670b4..3dcf638d4d3a 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -332,16 +332,15 @@ static int smb_build_path(struct smb_sb_info *server, unsigned char *buf,
* and store it in reversed order [see reverse_string()]
*/
dget(entry);
- spin_lock(&entry->d_lock);
while (!IS_ROOT(entry)) {
struct dentry *parent;
if (maxlen < (3<<unicode)) {
- spin_unlock(&entry->d_lock);
dput(entry);
return -ENAMETOOLONG;
}
+ spin_lock(&entry->d_lock);
len = server->ops->convert(path, maxlen-2,
entry->d_name.name, entry->d_name.len,
server->local_nls, server->remote_nls);
@@ -359,15 +358,12 @@ static int smb_build_path(struct smb_sb_info *server, unsigned char *buf,
}
*path++ = '\\';
maxlen -= len+1;
-
- parent = entry->d_parent;
- dget(parent);
spin_unlock(&entry->d_lock);
+
+ parent = dget_parent(entry);
dput(entry);
entry = parent;
- spin_lock(&entry->d_lock);
}
- spin_unlock(&entry->d_lock);
dput(entry);
reverse_string(buf, path-buf);
diff --git a/fs/super.c b/fs/super.c
index 8819e3a7ff20..b9c9869165db 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -273,14 +273,14 @@ void generic_shutdown_super(struct super_block *sb)
get_fs_excl();
sb->s_flags &= ~MS_ACTIVE;
- /* bad name - it should be evict_inodes() */
- invalidate_inodes(sb);
+ fsnotify_unmount_inodes(&sb->s_inodes);
+
+ evict_inodes(sb);
if (sop->put_super)
sop->put_super(sb);
- /* Forget any remaining inodes */
- if (invalidate_inodes(sb)) {
+ if (!list_empty(&sb->s_inodes)) {
printk("VFS: Busy inodes after unmount of %s. "
"Self-destruct in 5 seconds. Have a nice day...\n",
sb->s_id);
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 33e047b59b8d..11e7f7d11cd0 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -126,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir,
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
return add_nondir(dentry, inode);
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 87ebcce72213..14f64b689d7f 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -550,7 +550,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
lock_2_inodes(dir, inode);
inc_nlink(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
inode->i_ctime = ubifs_current_time(inode);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index bf5fc674193c..6d8dc02baebb 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -1101,7 +1101,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
inc_nlink(inode);
inode->i_ctime = current_fs_time(inode->i_sb);
mark_inode_dirty(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
d_instantiate(dentry, inode);
unlock_kernel();
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index b056f02b1fb3..12f39b9e4437 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -180,7 +180,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
- atomic_inc(&inode->i_count);
+ ihold(inode);
error = ufs_add_nondir(dentry, inode);
unlock_kernel();
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 480f28127f09..6100ec0fa1d4 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -22,6 +22,7 @@ config XFS_FS
config XFS_QUOTA
bool "XFS Quota support"
depends on XFS_FS
+ select QUOTACTL
help
If you say Y here, you will be able to set limits for disk usage on
a per user and/or a per group basis under XFS. XFS considers quota
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index b552f816de15..c9af48fffcd7 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1139,8 +1139,7 @@ xfs_vm_writepage(
type = IO_DELAY;
flags = BMAPI_ALLOCATE;
- if (wbc->sync_mode == WB_SYNC_NONE &&
- wbc->nonblocking)
+ if (wbc->sync_mode == WB_SYNC_NONE)
flags |= BMAPI_TRYLOCK;
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index ba5312802aa9..63fd2c07cb57 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1580,6 +1580,7 @@ xfs_mapping_buftarg(
XFS_BUFTARG_NAME(btp));
return ENOMEM;
}
+ inode->i_ino = get_next_ino();
inode->i_mode = S_IFBLK;
inode->i_bdev = bdev;
inode->i_rdev = bdev->bd_dev;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index ec858e09d546..96107efc0c61 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -317,7 +317,7 @@ xfs_vn_link(
if (unlikely(error))
return -error;
- atomic_inc(&inode->i_count);
+ ihold(inode);
d_instantiate(dentry, inode);
return 0;
}
@@ -760,7 +760,9 @@ xfs_setup_inode(
inode->i_ino = ip->i_ino;
inode->i_state = I_NEW;
- inode_add_to_lists(ip->i_mount->m_super, inode);
+
+ inode_sb_list_add(inode);
+ insert_inode_hash(inode);
inode->i_mode = ip->i_d.di_mode;
inode->i_nlink = ip->i_d.di_nlink;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index ab31ce5aeaf9..cf808782c065 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -576,7 +576,7 @@ xfs_max_file_offset(
/* Figure out maximum filesize, on Linux this can depend on
* the filesystem blocksize (on 32 bit platforms).
- * __block_prepare_write does this in an [unsigned] long...
+ * __block_write_begin does this in an [unsigned] long...
* page->index << (PAGE_CACHE_SHIFT - bbits)
* So, for page sized blocks (4K on 32 bit platforms),
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index fac52290de90..fb2ca2e4cdc9 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -500,7 +500,7 @@ void xfs_mark_inode_dirty_sync(xfs_inode_t *);
#define IHOLD(ip) \
do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
- atomic_inc(&(VFS_I(ip)->i_count)); \
+ ihold(VFS_I(ip)); \
trace_xfs_ihold(ip, _THIS_IP_); \
} while (0)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 4de84ce3a927..359ef11725a6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -184,7 +184,7 @@ struct acpi_device_pnp {
#define acpi_device_bid(d) ((d)->pnp.bus_id)
#define acpi_device_adr(d) ((d)->pnp.bus_address)
-char *acpi_device_hid(struct acpi_device *device);
+const char *acpi_device_hid(struct acpi_device *device);
#define acpi_device_name(d) ((d)->pnp.device_name)
#define acpi_device_class(d) ((d)->pnp.device_class)
@@ -389,21 +389,25 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_OPS
int acpi_pm_device_sleep_state(struct device *, int *);
-int acpi_pm_device_sleep_wake(struct device *, bool);
-#else /* !CONFIG_PM_SLEEP */
+#else
static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
{
if (p)
*p = ACPI_STATE_D0;
return ACPI_STATE_D3;
}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+int acpi_pm_device_sleep_wake(struct device *, bool);
+#else
static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
return -ENODEV;
}
-#endif /* !CONFIG_PM_SLEEP */
+#endif
#endif /* CONFIG_ACPI */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 23d78b4d088b..3090471b2a5e 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -115,8 +115,6 @@ void pci_acpi_crs_quirks(void);
#define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01
#define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02
-int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
-
/*--------------------------------------------------------------------------
Dock Station
-------------------------------------------------------------------------- */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 29bf945143e8..65b3f5888f42 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -98,8 +98,6 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
/*
* Spinlock primitives
*/
-acpi_status acpi_os_create_lock(acpi_spinlock * out_handle);
-
void acpi_os_delete_lock(acpi_spinlock handle);
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
@@ -223,25 +221,15 @@ acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
*/
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
- u32 reg, u32 *value, u32 width);
+ u32 reg, u64 *value, u32 width);
acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
u32 reg, u64 value, u32 width);
/*
- * Interim function needed for PCI IRQ routing
- */
-void
-acpi_os_derive_pci_id(acpi_handle device,
- acpi_handle region, struct acpi_pci_id **pci_id);
-
-/*
* Miscellaneous
*/
-acpi_status acpi_os_validate_interface(char *interface);
-acpi_status acpi_osi_invalidate(char* interface);
-
acpi_status
acpi_os_validate_address(u8 space_id, acpi_physical_address address,
acpi_size length, char *name);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 984cdc62e30b..53b7cfd924a3 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20100702
+#define ACPI_CA_VERSION 0x20101013
#include "actypes.h"
#include "actbl.h"
@@ -72,6 +72,7 @@ extern u8 acpi_gbl_truncate_io_addresses;
extern u32 acpi_current_gpe_count;
extern struct acpi_table_fadt acpi_gbl_FADT;
+extern u8 acpi_gbl_system_awake_and_running;
extern u32 acpi_rsdt_forced;
/*
@@ -105,6 +106,10 @@ const char *acpi_format_exception(acpi_status exception);
acpi_status acpi_purge_cached_objects(void);
+acpi_status acpi_install_interface(acpi_string interface_name);
+
+acpi_status acpi_remove_interface(acpi_string interface_name);
+
/*
* ACPI Memory management
*/
@@ -263,6 +268,8 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
#endif
+acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
+
/*
* Event interfaces
*/
@@ -308,6 +315,8 @@ acpi_install_gpe_block(acpi_handle gpe_device,
acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
+acpi_status acpi_update_gpes(void);
+
/*
* Resource interfaces
*/
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 5db8f472fec9..2b134b691e34 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -115,7 +115,6 @@
*
* ACPI_SIZE 16/32/64-bit unsigned value
* ACPI_NATIVE_INT 16/32/64-bit signed value
- *
*/
/*******************************************************************************
@@ -132,6 +131,16 @@ typedef COMPILER_DEPENDENT_INT64 INT64;
/*! [End] no source code translation !*/
+/*
+ * Value returned by acpi_os_get_thread_id. There is no standard "thread_id"
+ * across operating systems or even the various UNIX systems. Since ACPICA
+ * only needs the thread ID as a unique thread identifier, we use a u64
+ * as the only common data type - it will accommodate any type of pointer or
+ * any type of integer. It is up to the host-dependent OSL to cast the
+ * native thread ID type to a u64 (in acpi_os_get_thread_id).
+ */
+#define acpi_thread_id u64
+
/*******************************************************************************
*
* Types specific to 64-bit targets
@@ -211,12 +220,6 @@ typedef u32 acpi_physical_address;
*
******************************************************************************/
-/* Value returned by acpi_os_get_thread_id */
-
-#ifndef acpi_thread_id
-#define acpi_thread_id acpi_size
-#endif
-
/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */
#ifndef acpi_cpu_flags
@@ -375,16 +378,6 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */
typedef u8 acpi_owner_id;
#define ACPI_OWNER_ID_MAX 0xFF
-struct uint64_struct {
- u32 lo;
- u32 hi;
-};
-
-union uint64_overlay {
- u64 full;
- struct uint64_struct part;
-};
-
#define ACPI_INTEGER_BIT_SIZE 64
#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */
@@ -950,6 +943,9 @@ acpi_status(*acpi_walk_callback) (acpi_handle object,
u32 nesting_level,
void *context, void **return_value);
+typedef
+u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
+
/* Interrupt handler return values */
#define ACPI_INTERRUPT_NOT_HANDLED 0x00
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index c05aeba9e8f0..a3e334ab1119 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -193,6 +193,12 @@
#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE
#endif
+/* "inline" keywords - configurable since inline is not standardized */
+
+#ifndef ACPI_INLINE
+#define ACPI_INLINE
+#endif
+
/*
* Debugger threading model
* Use single threaded if the entire subsystem is contained in an application
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 0cd53e3cd1a3..5dcb9537343c 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -44,6 +44,8 @@
#ifndef __ACGCC_H__
#define __ACGCC_H__
+#define ACPI_INLINE __inline__
+
/* Function name is used for debug output. Non-ANSI, compiler-dependent */
#define ACPI_GET_FUNCTION_NAME __func__
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 103f08aca764..572189e37133 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -75,7 +75,6 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
#define acpi_cpu_flags unsigned long
-#define acpi_thread_id struct task_struct *
#else /* !__KERNEL__ */
@@ -88,7 +87,7 @@
/* Host-dependent types and defines for user-space ACPICA */
#define ACPI_FLUSH_CPU_CACHE()
-#define acpi_thread_id pthread_t
+#define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
#if defined(__ia64__) || defined(__x86_64__)
#define ACPI_MACHINE_WIDTH 64
@@ -113,12 +112,13 @@
#ifdef __KERNEL__
+#include <acpi/actypes.h>
/*
* Overrides for in-kernel ACPICA
*/
static inline acpi_thread_id acpi_os_get_thread_id(void)
{
- return current;
+ return (acpi_thread_id)(unsigned long)current;
}
/*
@@ -127,7 +127,6 @@ static inline acpi_thread_id acpi_os_get_thread_id(void)
* However, boot has (system_state != SYSTEM_RUNNING)
* to quiet __might_sleep() in kmalloc() and resume does not.
*/
-#include <acpi/actypes.h>
static inline void *acpi_os_allocate(acpi_size size)
{
return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index ca0f239f0e13..2bcc5c7c22a6 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -33,10 +33,10 @@ typedef u64 cputime64_t;
/*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
*/
-#define cputime_to_msecs(__ct) jiffies_to_msecs(__ct)
-#define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs)
+#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct);
+#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs);
/*
* Convert cputime to seconds and back.
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 8ca18e26d7e3..ff5c66080c8c 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -210,7 +210,7 @@ extern void gpio_unexport(unsigned gpio);
#endif /* CONFIG_GPIO_SYSFS */
-#else /* !CONFIG_HAVE_GPIO_LIB */
+#else /* !CONFIG_GPIOLIB */
static inline int gpio_is_valid(int number)
{
@@ -239,7 +239,7 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
gpio_set_value(gpio, value);
}
-#endif /* !CONFIG_HAVE_GPIO_LIB */
+#endif /* !CONFIG_GPIOLIB */
#ifndef CONFIG_GPIO_SYSFS
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f4229fb315e1..2c0fc10956ba 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -150,6 +150,7 @@
#define DATA_DATA \
*(.data) \
*(.ref.data) \
+ *(.data..shared_aligned) /* percpu related */ \
DEV_KEEP(init.data) \
DEV_KEEP(exit.data) \
CPU_KEEP(init.data) \
@@ -636,7 +637,7 @@
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
- . = ALIGN(PAGE_SIZE); \
+ . = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
*(.init.ramfs) \
VMLINUX_SYMBOL(__initramfs_end) = .;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 4c9461a4f9e6..274eaaa15c36 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -699,13 +699,8 @@ struct drm_driver {
int (*suspend) (struct drm_device *, pm_message_t state);
int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
- int (*context_ctor) (struct drm_device *dev, int context);
int (*context_dtor) (struct drm_device *dev, int context);
- int (*kernel_context_switch) (struct drm_device *dev, int old,
- int new);
- void (*kernel_context_switch_unlock) (struct drm_device *dev);
/**
* get_vblank_counter - get raw hardware vblank counter
@@ -777,8 +772,6 @@ struct drm_driver {
struct drm_file *file_priv);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct drm_file *file_priv);
- resource_size_t (*get_map_ofs) (struct drm_local_map * map);
- resource_size_t (*get_reg_ofs) (struct drm_device *dev);
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
@@ -795,8 +788,6 @@ struct drm_driver {
void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
bool from_release);
- int (*proc_init)(struct drm_minor *minor);
- void (*proc_cleanup)(struct drm_minor *minor);
int (*debugfs_init)(struct drm_minor *minor);
void (*debugfs_cleanup)(struct drm_minor *minor);
@@ -972,7 +963,6 @@ struct drm_device {
__volatile__ long context_flag; /**< Context swapping flag */
__volatile__ long interrupt_flag; /**< Interruption handler flag */
__volatile__ long dma_flag; /**< DMA dispatch flag */
- struct timer_list timer; /**< Timer for delaying ctx switch */
wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
int last_checked; /**< Last context checked for DMA */
int last_context; /**< Last current context */
@@ -1045,25 +1035,12 @@ struct drm_device {
struct drm_minor *control; /**< Control node for card */
struct drm_minor *primary; /**< render type primary screen head */
- /** \name Drawable information */
- /*@{ */
- spinlock_t drw_lock;
- struct idr drw_idr;
- /*@} */
-
struct drm_mode_config mode_config; /**< Current mode config */
/** \name GEM information */
/*@{ */
spinlock_t object_name_lock;
struct idr object_name_idr;
- atomic_t object_count;
- atomic_t object_memory;
- atomic_t pin_count;
- atomic_t pin_memory;
- atomic_t gtt_count;
- atomic_t gtt_memory;
- uint32_t gtt_total;
uint32_t invalidate_domains; /* domains pending invalidation */
uint32_t flush_domains; /* domains pending flush */
/*@} */
@@ -1175,8 +1152,6 @@ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct vm_area_struct *vma);
extern void drm_vm_close_locked(struct vm_area_struct *vma);
-extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
-extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
@@ -1186,8 +1161,7 @@ extern int drm_mem_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
-extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
-extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
+extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
@@ -1239,17 +1213,6 @@ extern int drm_setsareactx(struct drm_device *dev, void *data,
extern int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
- /* Drawable IOCTL support (drm_drawable.h) */
-extern int drm_adddraw(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_rmdraw(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_update_drawable_info(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
- drm_drawable_t id);
-extern void drm_drawable_free_all(struct drm_device *dev);
-
/* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1264,7 +1227,6 @@ extern int drm_lock(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_unlock(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
@@ -1359,10 +1321,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
-extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
-extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
-extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern void drm_agp_chipset_flush(struct drm_device *dev);
/* Stub support (drm_stub.h) */
@@ -1414,7 +1372,6 @@ extern int drm_bufs_info(struct seq_file *m, void *data);
extern int drm_vblank_info(struct seq_file *m, void *data);
extern int drm_clients_info(struct seq_file *m, void* data);
extern int drm_gem_name_info(struct seq_file *m, void *data);
-extern int drm_gem_object_info(struct seq_file *m, void* data);
#if DRM_DEBUG_CODE
extern int drm_vma_info(struct seq_file *m, void *data);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3e5a51af757c..029aa688e787 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -221,7 +221,8 @@ struct drm_framebuffer_funcs {
* the semantics and arguments have a one to one mapping
* on this function.
*/
- int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
unsigned color, struct drm_clip_rect *clips,
unsigned num_clips);
};
@@ -762,6 +763,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern bool drm_detect_monitor_audio(struct edid *edid);
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 59b7073b13fe..73b071203dcc 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -39,6 +39,11 @@
#include <linux/fb.h>
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
@@ -61,7 +66,8 @@ struct drm_crtc_helper_funcs {
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
int (*mode_set_base_atomic)(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y);
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic);
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index a49e791db0b0..83a389e44543 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -23,6 +23,9 @@
#ifndef _DRM_DP_HELPER_H_
#define _DRM_DP_HELPER_H_
+#include <linux/types.h>
+#include <linux/i2c.h>
+
/* From the VESA DisplayPort spec */
#define AUX_NATIVE_WRITE 0x8
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index e41c74facb6a..8c641bed9bbd 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLT 11
typedef struct drm_i915_getparam {
int param;
@@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 {
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
+#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
-#define I915_EXEC_BSD (1<<1)
+#define I915_EXEC_BSD (2<<0)
+#define I915_EXEC_BLT (3<<0)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
new file mode 100644
index 000000000000..d3c81946f613
--- /dev/null
+++ b/include/drm/intel-gtt.h
@@ -0,0 +1,18 @@
+/* Common header for intel-gtt.ko and i915.ko */
+
+#ifndef _DRM_INTEL_GTT_H
+#define _DRM_INTEL_GTT_H
+struct intel_gtt {
+ /* Number of stolen gtt entries at the beginning. */
+ unsigned int gtt_stolen_entries;
+ /* Total number of gtt entries. */
+ unsigned int gtt_total_entries;
+ /* Part of the gtt that is mappable by the cpu, for those chips where
+ * this is not the full gtt. */
+ unsigned int gtt_mappable_entries;
+};
+
+struct intel_gtt *intel_gtt_get(void);
+
+#endif
+
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2040e6c4f172..5afa5b52063e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -102,7 +102,8 @@ struct ttm_bus_placement {
*/
struct ttm_mem_reg {
- struct drm_mm_node *mm_node;
+ void *mm_node;
+ unsigned long start;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index b87504235f18..d01b4ddbdc56 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -203,7 +203,22 @@ struct ttm_tt {
* It's set up by the ttm_bo_driver::init_mem_type method.
*/
+struct ttm_mem_type_manager;
+
+struct ttm_mem_type_manager_func {
+ int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+ int (*takedown)(struct ttm_mem_type_manager *man);
+ int (*get_node)(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem);
+ void (*put_node)(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem);
+ void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+};
+
struct ttm_mem_type_manager {
+ struct ttm_bo_device *bdev;
/*
* No protection. Constant from start.
@@ -222,8 +237,8 @@ struct ttm_mem_type_manager {
* TODO: Consider one lru_lock per ttm_mem_type_manager.
* Plays ill with list removal, though.
*/
-
- struct drm_mm manager;
+ const struct ttm_mem_type_manager_func *func;
+ void *priv;
struct list_head lru;
};
@@ -649,6 +664,12 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool interruptible,
bool no_wait_reserve, bool no_wait_gpu);
+
+extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+
/**
* ttm_bo_wait_for_cpu
*
@@ -891,6 +912,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define TTM_HAS_AGP
#include <linux/agp_backend.h>
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 4d0842391edc..650e6bf6f69f 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -72,6 +72,7 @@
#define DRM_VMW_PARAM_FIFO_OFFSET 3
#define DRM_VMW_PARAM_HW_CAPS 4
#define DRM_VMW_PARAM_FIFO_CAPS 5
+#define DRM_VMW_PARAM_MAX_FB_SIZE 6
/**
* struct drm_vmw_getparam_arg
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 831c4634162c..05a59f0ce37a 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -372,7 +372,6 @@ header-y += veth.h
header-y += vhost.h
header-y += videodev.h
header-y += videodev2.h
-header-y += videotext.h
header-y += virtio_9p.h
header-y += virtio_balloon.h
header-y += virtio_blk.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c227757feb06..050a7bccb836 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -245,8 +245,6 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
-int acpi_check_mem_region(resource_size_t start, resource_size_t n,
- const char *name);
int acpi_resources_are_enforced(void);
@@ -308,6 +306,9 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
u32 *mask, u32 req);
extern void acpi_early_init(void);
+int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -344,12 +345,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
return 0;
}
-static inline int acpi_check_mem_region(resource_size_t start,
- resource_size_t n, const char *name)
-{
- return 0;
-}
-
struct acpi_table_header;
static inline int acpi_table_parse(char *id,
int (*handler)(struct acpi_table_header *))
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
new file mode 100644
index 000000000000..521a0f8974ac
--- /dev/null
+++ b/include/linux/amba/pl08x.h
@@ -0,0 +1,222 @@
+/*
+ * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
+ *
+ * Copyright (C) 2005 ARM Ltd
+ * Copyright (C) 2010 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * pl08x information required by platform code
+ *
+ * Please credit ARM.com
+ * Documentation: ARM DDI 0196D
+ *
+ */
+
+#ifndef AMBA_PL08X_H
+#define AMBA_PL08X_H
+
+/* We need sizes of structs from this header */
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+/**
+ * struct pl08x_channel_data - data structure to pass info between
+ * platform and PL08x driver regarding channel configuration
+ * @bus_id: name of this device channel, not just a device name since
+ * devices may have more than one channel e.g. "foo_tx"
+ * @min_signal: the minimum DMA signal number to be muxed in for this
+ * channel (for platforms supporting muxed signals). If you have
+ * static assignments, make sure this is set to the assigned signal
+ * number, PL08x have 16 possible signals in number 0 thru 15 so
+ * when these are not enough they often get muxed (in hardware)
+ * disabling simultaneous use of the same channel for two devices.
+ * @max_signal: the maximum DMA signal number to be muxed in for
+ * the channel. Set to the same as min_signal for
+ * devices with static assignments
+ * @muxval: a number usually used to poke into some mux regiser to
+ * mux in the signal to this channel
+ * @cctl_opt: default options for the channel control register
+ * @addr: source/target address in physical memory for this DMA channel,
+ * can be the address of a FIFO register for burst requests for example.
+ * This can be left undefined if the PrimeCell API is used for configuring
+ * this.
+ * @circular_buffer: whether the buffer passed in is circular and
+ * shall simply be looped round round (like a record baby round
+ * round round round)
+ * @single: the device connected to this channel will request single
+ * DMA transfers, not bursts. (Bursts are default.)
+ */
+struct pl08x_channel_data {
+ char *bus_id;
+ int min_signal;
+ int max_signal;
+ u32 muxval;
+ u32 cctl;
+ u32 ccfg;
+ dma_addr_t addr;
+ bool circular_buffer;
+ bool single;
+};
+
+/**
+ * Struct pl08x_bus_data - information of source or destination
+ * busses for a transfer
+ * @addr: current address
+ * @maxwidth: the maximum width of a transfer on this bus
+ * @buswidth: the width of this bus in bytes: 1, 2 or 4
+ * @fill_bytes: bytes required to fill to the next bus memory
+ * boundary
+ */
+struct pl08x_bus_data {
+ dma_addr_t addr;
+ u8 maxwidth;
+ u8 buswidth;
+ u32 fill_bytes;
+};
+
+/**
+ * struct pl08x_phy_chan - holder for the physical channels
+ * @id: physical index to this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @signal: the physical signal (aka channel) serving this
+ * physical channel right now
+ * @serving: the virtual channel currently being served by this
+ * physical channel
+ */
+struct pl08x_phy_chan {
+ unsigned int id;
+ void __iomem *base;
+ spinlock_t lock;
+ int signal;
+ struct pl08x_dma_chan *serving;
+ u32 csrc;
+ u32 cdst;
+ u32 clli;
+ u32 cctl;
+ u32 ccfg;
+};
+
+/**
+ * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @llis_bus: DMA memory address (physical) start for the LLIs
+ * @llis_va: virtual memory address start for the LLIs
+ */
+struct pl08x_txd {
+ struct dma_async_tx_descriptor tx;
+ struct list_head node;
+ enum dma_data_direction direction;
+ struct pl08x_bus_data srcbus;
+ struct pl08x_bus_data dstbus;
+ int len;
+ dma_addr_t llis_bus;
+ void *llis_va;
+ struct pl08x_channel_data *cd;
+ bool active;
+ /*
+ * Settings to be put into the physical channel when we
+ * trigger this txd
+ */
+ u32 csrc;
+ u32 cdst;
+ u32 clli;
+ u32 cctl;
+};
+
+/**
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual
+ * channel states
+ * @PL08X_CHAN_IDLE: the channel is idle
+ * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
+ * channel, but the transfer is currently paused
+ * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum pl08x_dma_chan_state {
+ PL08X_CHAN_IDLE,
+ PL08X_CHAN_RUNNING,
+ PL08X_CHAN_PAUSED,
+ PL08X_CHAN_WAITING,
+};
+
+/**
+ * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
+ * @chan: wrappped abstract channel
+ * @phychan: the physical channel utilized by this channel, if there is one
+ * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
+ * @name: name of channel
+ * @cd: channel platform data
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @runtime_direction: current direction of this channel according to
+ * runtime config
+ * @lc: last completed transaction on this channel
+ * @desc_list: queued transactions pending on this channel
+ * @at: active transaction on this channel
+ * @lockflags: sometimes we let a lock last between two function calls,
+ * especially prep/submit, and then we need to store the IRQ flags
+ * in the channel state, here
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, paused, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ * @waiting: a TX descriptor on this channel which is waiting for
+ * a physical channel to become available
+ */
+struct pl08x_dma_chan {
+ struct dma_chan chan;
+ struct pl08x_phy_chan *phychan;
+ struct tasklet_struct tasklet;
+ char *name;
+ struct pl08x_channel_data *cd;
+ dma_addr_t runtime_addr;
+ enum dma_data_direction runtime_direction;
+ atomic_t last_issued;
+ dma_cookie_t lc;
+ struct list_head desc_list;
+ struct pl08x_txd *at;
+ unsigned long lockflags;
+ spinlock_t lock;
+ void *host;
+ enum pl08x_dma_chan_state state;
+ bool slave;
+ struct pl08x_txd *waiting;
+};
+
+/**
+ * struct pl08x_platform_data - the platform configuration for the
+ * PL08x PrimeCells.
+ * @slave_channels: the channels defined for the different devices on the
+ * platform, all inclusive, including multiplexed channels. The available
+ * physical channels will be multiplexed around these signals as they
+ * are requested, just enumerate all possible channels.
+ * @get_signal: request a physical signal to be used for a DMA
+ * transfer immediately: if there is some multiplexing or similar blocking
+ * the use of the channel the transfer can be denied by returning
+ * less than zero, else it returns the allocated signal number
+ * @put_signal: indicate to the platform that this physical signal is not
+ * running any DMA transfer and multiplexing can be recycled
+ * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
+ * LLI addresses are on 0/1 Master 1/2.
+ */
+struct pl08x_platform_data {
+ struct pl08x_channel_data *slave_channels;
+ unsigned int num_slave_channels;
+ struct pl08x_channel_data memcpy_channel;
+ int (*get_signal)(struct pl08x_dma_chan *);
+ void (*put_signal)(struct pl08x_dma_chan *);
+};
+
+#ifdef CONFIG_AMBA_PL08X
+bool pl08x_filter_id(struct dma_chan *chan, void *chan_id);
+#else
+static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ return false;
+}
+#endif
+
+#endif /* AMBA_PL08X_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 35b00746c712..4ce34fa937d4 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -111,6 +111,7 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
+extern struct list_head bdi_pending_list;
static inline int wb_has_dirty_io(struct bdi_writeback *wb)
{
@@ -285,7 +286,7 @@ enum {
void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
-
+long wait_iff_congested(struct zone *zone, int sync, long timeout);
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
{
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
new file mode 100644
index 000000000000..198087a16fc4
--- /dev/null
+++ b/include/linux/basic_mmio_gpio.h
@@ -0,0 +1,20 @@
+/*
+ * Basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BASIC_MMIO_GPIO_H
+#define __BASIC_MMIO_GPIO_H
+
+struct bgpio_pdata {
+ int base;
+};
+
+#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 646b462d04df..5027a599077d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -891,6 +891,14 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,
nr_blocks << (sb->s_blocksize_bits - 9),
gfp_mask, flags);
}
+static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
+ sector_t nr_blocks, gfp_t gfp_mask)
+{
+ return blkdev_issue_zeroout(sb->s_bdev,
+ block << (sb->s_blocksize_bits - 9),
+ nr_blocks << (sb->s_blocksize_bits - 9),
+ gfp_mask);
+}
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index dd1b25b2641c..68d1fe7b877c 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -212,7 +212,6 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
-int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 709dfb901d11..ed4ba111bc8d 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -154,6 +154,10 @@ enum {
* A thread in rmdir() is wating for this cgroup.
*/
CGRP_WAIT_ON_RMDIR,
+ /*
+ * Clone cgroup values when creating a new child cgroup
+ */
+ CGRP_CLONE_CHILDREN,
};
/* which pidlist file are we talking about? */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 51e3145196f6..36d57f74cd01 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,7 +10,7 @@
#include <linux/wait.h>
-/**
+/*
* struct completion - structure used to maintain state for a "completion"
*
* This is the opaque structure used to maintain the state for a "completion".
@@ -34,7 +34,7 @@ struct completion {
({ init_completion(&work); work; })
/**
- * DECLARE_COMPLETION: - declare and initialize a completion structure
+ * DECLARE_COMPLETION - declare and initialize a completion structure
* @work: identifier for the completion structure
*
* This macro declares and initializes a completion structure. Generally used
@@ -50,7 +50,7 @@ struct completion {
* are on the kernel stack:
*/
/**
- * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
+ * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure
* @work: identifier for the completion structure
*
* This macro declares and initializes a completion structure on the kernel
@@ -64,7 +64,7 @@ struct completion {
#endif
/**
- * init_completion: - Initialize a dynamically allocated completion
+ * init_completion - Initialize a dynamically allocated completion
* @x: completion structure that is to be initialized
*
* This inline function will initialize a dynamically created completion
@@ -92,7 +92,7 @@ extern void complete(struct completion *);
extern void complete_all(struct completion *);
/**
- * INIT_COMPLETION: - reinitialize a completion structure
+ * INIT_COMPLETION - reinitialize a completion structure
* @x: completion structure to be reinitialized
*
* This macro should be used to reinitialize a completion structure so it can
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 3a779ffba60b..7e8ca75d2dad 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -88,12 +88,6 @@ struct cn_queue_dev {
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
- /* Sent to kevent to create cn_queue only when needed */
- struct work_struct wq_creation;
- /* Tell if the wq_creation job is pending/completed */
- atomic_t wq_requested;
- /* Wait for cn_queue to be created */
- wait_queue_head_t wq_created;
struct list_head queue_list;
spinlock_t queue_lock;
@@ -141,8 +135,6 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
-int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
-
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2106495cc11..9d8688b92d8b 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -64,13 +64,15 @@ enum dma_transaction_type {
DMA_PQ_VAL,
DMA_MEMSET,
DMA_INTERRUPT,
+ DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
+ DMA_CYCLIC,
};
/* last transaction type for creation of the capabilities mask */
-#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
+#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
/**
@@ -119,12 +121,15 @@ enum dma_ctrl_flags {
* configuration data in statically from the platform). An additional
* argument of struct dma_slave_config must be passed in with this
* command.
+ * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
+ * into external start mode.
*/
enum dma_ctrl_cmd {
DMA_TERMINATE_ALL,
DMA_PAUSE,
DMA_RESUME,
DMA_SLAVE_CONFIG,
+ FSLDMA_EXTERNAL_START,
};
/**
@@ -316,14 +321,14 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
-#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
spinlock_t lock;
#endif
};
-#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
}
@@ -422,6 +427,9 @@ struct dma_tx_state {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
+ * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
+ * The function takes a buffer of size buf_len. The callback function will
+ * be called after period_len bytes have been transferred.
* @device_control: manipulate all pending operations on a channel, returns
* zero or error code
* @device_tx_status: poll for transaction completion, the optional
@@ -473,11 +481,19 @@ struct dma_device {
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction);
int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg);
@@ -487,6 +503,40 @@ struct dma_device {
void (*device_issue_pending)(struct dma_chan *chan);
};
+static inline int dmaengine_device_control(struct dma_chan *chan,
+ enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ return chan->device->device_control(chan, cmd, arg);
+}
+
+static inline int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
+ (unsigned long)config);
+}
+
+static inline int dmaengine_terminate_all(struct dma_chan *chan)
+{
+ return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+}
+
+static inline int dmaengine_pause(struct dma_chan *chan)
+{
+ return dmaengine_device_control(chan, DMA_PAUSE, 0);
+}
+
+static inline int dmaengine_resume(struct dma_chan *chan)
+{
+ return dmaengine_device_control(chan, DMA_RESUME, 0);
+}
+
+static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
+{
+ return desc->tx_submit(desc);
+}
+
static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
{
size_t mask;
@@ -606,11 +656,11 @@ static inline void net_dmaengine_put(void)
#ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put()
-#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
#else
#define async_dma_find_channel(type) dma_find_channel(type)
-#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
+#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
#else
static inline void async_dmaengine_get(void)
{
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f0268deca658..7fca3dc4e475 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -931,6 +931,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#define fb_writel sbus_writel
#define fb_writeq sbus_writeq
#define fb_memset sbus_memset_io
+#define fb_memcpy_fromfb sbus_memcpy_fromio
+#define fb_memcpy_tofb sbus_memcpy_toio
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
@@ -943,6 +945,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#define fb_writel __raw_writel
#define fb_writeq __raw_writeq
#define fb_memset memset_io
+#define fb_memcpy_fromfb memcpy_fromio
+#define fb_memcpy_tofb memcpy_toio
#else
@@ -955,6 +959,8 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
#define fb_memset memset
+#define fb_memcpy_fromfb memcpy
+#define fb_memcpy_tofb memcpy
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4f34ff6e5558..1c73b50e81ff 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -9,6 +9,7 @@
#include <linux/limits.h>
#include <linux/ioctl.h>
#include <linux/blk_types.h>
+#include <linux/types.h>
/*
* It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@@ -32,11 +33,17 @@
#define SEEK_END 2 /* seek relative to end of file */
#define SEEK_MAX SEEK_END
+struct fstrim_range {
+ uint64_t start;
+ uint64_t len;
+ uint64_t minlen;
+};
+
/* And dynamically-tunable limits and defaults: */
struct files_stat_struct {
- int nr_files; /* read only */
- int nr_free_files; /* read only */
- int max_files; /* tunable */
+ unsigned long nr_files; /* read only */
+ unsigned long nr_free_files; /* read only */
+ unsigned long max_files; /* tunable */
};
struct inodes_stat_t {
@@ -92,6 +99,9 @@ struct inodes_stat_t {
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)0x1000)
+/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
+#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
@@ -231,6 +241,7 @@ struct inodes_stat_t {
#define S_NOCMTIME 128 /* Do not update file c/mtime */
#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
#define S_PRIVATE 512 /* Inode is fs-internal */
+#define S_IMA 1024 /* Inode has an associated IMA struct */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -265,6 +276,7 @@ struct inodes_stat_t {
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
+#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
/* the read-only stuff doesn't really belong here, but any other place is
probably as bad and I don't want to create yet another include file. */
@@ -312,6 +324,7 @@ struct inodes_stat_t {
#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
#define FIFREEZE _IOWR('X', 119, int) /* Freeze */
#define FITHAW _IOWR('X', 120, int) /* Thaw */
+#define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */
#define FS_IOC_GETFLAGS _IOR('f', 1, long)
#define FS_IOC_SETFLAGS _IOW('f', 2, long)
@@ -400,7 +413,7 @@ extern void __init inode_init_early(void);
extern void __init files_init(unsigned long);
extern struct files_stat_struct files_stat;
-extern int get_max_files(void);
+extern unsigned long get_max_files(void);
extern int sysctl_nr_open;
extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
@@ -720,7 +733,8 @@ struct posix_acl;
struct inode {
struct hlist_node i_hash;
- struct list_head i_list; /* backing dev IO list */
+ struct list_head i_wb_list; /* backing dev IO list */
+ struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
struct list_head i_dentry;
unsigned long i_ino;
@@ -772,6 +786,10 @@ struct inode {
unsigned int i_flags;
+#ifdef CONFIG_IMA
+ /* protected by i_lock */
+ unsigned int i_readcount; /* struct files open RO */
+#endif
atomic_t i_writecount;
#ifdef CONFIG_SECURITY
void *i_security;
@@ -783,6 +801,11 @@ struct inode {
void *i_private; /* fs or device private pointer */
};
+static inline int inode_unhashed(struct inode *inode)
+{
+ return hlist_unhashed(&inode->i_hash);
+}
+
/*
* inode->i_mutex nesting subclasses for the lock validator:
*
@@ -1107,6 +1130,7 @@ extern int fcntl_getlease(struct file *filp);
/* fs/locks.c */
extern void locks_init_lock(struct file_lock *);
+extern struct file_lock * locks_alloc_lock(void);
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
@@ -1295,6 +1319,11 @@ struct fasync_struct {
/* SMP safe fasync helpers: */
extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
+extern int fasync_remove_entry(struct file *, struct fasync_struct **);
+extern struct fasync_struct *fasync_alloc(void);
+extern void fasync_free(struct fasync_struct *);
+
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
@@ -1583,6 +1612,7 @@ struct super_operations {
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
+ int (*trim_fs) (struct super_block *, struct fstrim_range *);
};
/*
@@ -1633,16 +1663,17 @@ struct super_operations {
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
-#define I_DIRTY_SYNC 1
-#define I_DIRTY_DATASYNC 2
-#define I_DIRTY_PAGES 4
+#define I_DIRTY_SYNC (1 << 0)
+#define I_DIRTY_DATASYNC (1 << 1)
+#define I_DIRTY_PAGES (1 << 2)
#define __I_NEW 3
#define I_NEW (1 << __I_NEW)
-#define I_WILL_FREE 16
-#define I_FREEING 32
-#define I_CLEAR 64
+#define I_WILL_FREE (1 << 4)
+#define I_FREEING (1 << 5)
+#define I_CLEAR (1 << 6)
#define __I_SYNC 7
#define I_SYNC (1 << __I_SYNC)
+#define I_REFERENCED (1 << 8)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
@@ -1734,6 +1765,7 @@ static inline void file_accessed(struct file *file)
}
int sync_inode(struct inode *inode, struct writeback_control *wbc);
+int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
const char *name;
@@ -2078,7 +2110,6 @@ extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int);
#endif
-extern int invalidate_inodes(struct super_block *);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -2162,7 +2193,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
-extern void inode_add_to_lists(struct super_block *, struct inode *);
+extern void ihold(struct inode * inode);
extern void iput(struct inode *);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
@@ -2182,11 +2213,11 @@ extern struct inode * iget_locked(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
extern void unlock_new_inode(struct inode *);
+extern unsigned int get_next_ino(void);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void end_writeback(struct inode *);
-extern void destroy_inode(struct inode *);
extern void __destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int should_remove_suid(struct dentry *);
@@ -2194,9 +2225,11 @@ extern int file_remove_suid(struct file *);
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
extern void remove_inode_hash(struct inode *);
-static inline void insert_inode_hash(struct inode *inode) {
+static inline void insert_inode_hash(struct inode *inode)
+{
__insert_inode_hash(inode, inode->i_ino);
}
+extern void inode_sb_list_add(struct inode *inode);
#ifdef CONFIG_BLOCK
extern void submit_bio(int, struct bio *);
@@ -2479,7 +2512,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
struct ctl_table;
int proc_nr_files(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-
+int proc_nr_dentry(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int proc_nr_inodes(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
int __init get_filesystem_list(char *buf);
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 975609cb8548..e8713d55360a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -9,6 +9,32 @@
struct vm_area_struct;
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA 0x01u
+#define ___GFP_HIGHMEM 0x02u
+#define ___GFP_DMA32 0x04u
+#define ___GFP_MOVABLE 0x08u
+#define ___GFP_WAIT 0x10u
+#define ___GFP_HIGH 0x20u
+#define ___GFP_IO 0x40u
+#define ___GFP_FS 0x80u
+#define ___GFP_COLD 0x100u
+#define ___GFP_NOWARN 0x200u
+#define ___GFP_REPEAT 0x400u
+#define ___GFP_NOFAIL 0x800u
+#define ___GFP_NORETRY 0x1000u
+#define ___GFP_COMP 0x4000u
+#define ___GFP_ZERO 0x8000u
+#define ___GFP_NOMEMALLOC 0x10000u
+#define ___GFP_HARDWALL 0x20000u
+#define ___GFP_THISNODE 0x40000u
+#define ___GFP_RECLAIMABLE 0x80000u
+#ifdef CONFIG_KMEMCHECK
+#define ___GFP_NOTRACK 0x200000u
+#else
+#define ___GFP_NOTRACK 0
+#endif
+
/*
* GFP bitmasks..
*
@@ -18,10 +44,10 @@ struct vm_area_struct;
* without the underscores and use them consistently. The definitions here may
* be used in bit comparisons.
*/
-#define __GFP_DMA ((__force gfp_t)0x01u)
-#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
-#define __GFP_DMA32 ((__force gfp_t)0x04u)
-#define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
/*
* Action modifiers - doesn't change the zoning
@@ -38,27 +64,22 @@ struct vm_area_struct;
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
*/
-#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
-#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
-#define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */
-#define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */
-#define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */
-#define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
-#define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */
-#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */
-#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */
-#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
-#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
-#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
-#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
-#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
-#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
-
-#ifdef CONFIG_KMEMCHECK
-#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
-#else
-#define __GFP_NOTRACK ((__force gfp_t)0)
-#endif
+#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
+#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
+#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
+#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
+#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
/*
* This may seem redundant, but it's a way of annotating false positives vs.
@@ -186,14 +207,14 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
#endif
#define GFP_ZONE_TABLE ( \
- (ZONE_NORMAL << 0 * ZONES_SHIFT) \
- | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \
- | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \
- | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \
- | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \
- | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \
- | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
- | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
+ (ZONE_NORMAL << 0 * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
+ | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
+ | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
+ | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
)
/*
@@ -203,20 +224,20 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
* allowed.
*/
#define GFP_ZONE_BAD ( \
- 1 << (__GFP_DMA | __GFP_HIGHMEM) \
- | 1 << (__GFP_DMA | __GFP_DMA32) \
- | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \
- | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \
- | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \
- | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \
- | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \
- | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
+ 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_DMA | ___GFP_DMA32) \
+ | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
+ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
+ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
)
static inline enum zone_type gfp_zone(gfp_t flags)
{
enum zone_type z;
- int bit = flags & GFP_ZONEMASK;
+ int bit = (__force int) (flags & GFP_ZONEMASK);
z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
((1 << ZONES_SHIFT) - 1);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e3060ef85b6d..e9138198e823 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#include <asm/kmap_types.h>
-#ifdef CONFIG_DEBUG_HIGHMEM
-
-void debug_kmap_atomic(enum km_type type);
-
-#else
-
-static inline void debug_kmap_atomic(enum km_type type)
-{
-}
-
-#endif
-
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
@@ -66,19 +54,19 @@ static inline void kunmap(struct page *page)
{
}
-static inline void *kmap_atomic(struct page *page, enum km_type idx)
+static inline void *__kmap_atomic(struct page *page)
{
pagefault_disable();
return page_address(page);
}
-#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
+#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
-static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
+static inline void __kunmap_atomic(void *addr)
{
pagefault_enable();
}
-#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
@@ -86,12 +74,50 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
#endif /* CONFIG_HIGHMEM */
-/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */
-/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */
-#define kunmap_atomic(addr, idx) do { \
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- kunmap_atomic_notypecheck((addr), (idx)); \
- } while (0)
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
+static inline int kmap_atomic_idx_push(void)
+{
+ int idx = __get_cpu_var(__kmap_atomic_idx)++;
+#ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ BUG_ON(idx > KM_TYPE_NR);
+#endif
+ return idx;
+}
+
+static inline int kmap_atomic_idx(void)
+{
+ return __get_cpu_var(__kmap_atomic_idx) - 1;
+}
+
+static inline int kmap_atomic_idx_pop(void)
+{
+ int idx = --__get_cpu_var(__kmap_atomic_idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(idx < 0);
+#endif
+ return idx;
+}
+
+#endif
+
+/*
+ * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
+ */
+#define kmap_atomic(page, args...) __kmap_atomic(page)
+
+/*
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ */
+#define kunmap_atomic(addr, args...) \
+do { \
+ BUILD_BUG_ON(__same_type((addr), struct page *)); \
+ __kunmap_atomic(addr); \
+} while (0)
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
@@ -201,8 +227,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
+ kunmap_atomic(vfrom, KM_USER0);
}
#endif
@@ -214,8 +240,8 @@ static inline void copy_highpage(struct page *to, struct page *from)
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_page(vto, vfrom);
- kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
+ kunmap_atomic(vfrom, KM_USER0);
}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f479700df61b..943c76b3d4bb 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -43,7 +43,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
int acctflags);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
-void __isolate_hwpoisoned_huge_page(struct page *page);
+int dequeue_hwpoisoned_huge_page(struct page *page);
+void copy_huge_page(struct page *dst, struct page *src);
extern unsigned long hugepages_treat_as_movable;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
@@ -101,7 +102,10 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define huge_pte_offset(mm, address) 0
-#define __isolate_hwpoisoned_huge_page(page) 0
+#define dequeue_hwpoisoned_huge_page(page) 0
+static inline void copy_huge_page(struct page *dst, struct page *src)
+{
+}
#define hugetlb_change_protection(vma, address, end, newprot)
@@ -228,6 +232,8 @@ struct huge_bootmem_page {
struct hstate *hstate;
};
+struct page *alloc_huge_page_node(struct hstate *h, int nid);
+
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -301,8 +307,14 @@ static inline struct hstate *page_hstate(struct page *page)
return size_to_hstate(PAGE_SIZE << compound_order(page));
}
+static inline unsigned hstate_index_to_shift(unsigned index)
+{
+ return hstates[index].order + PAGE_SHIFT;
+}
+
#else
struct hstate {};
+#define alloc_huge_page_node(h, nid) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_vma(v) NULL
@@ -317,6 +329,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1;
}
+#define hstate_index_to_shift(index) 0
#endif
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index 269181b8f623..3c5d6b6e765c 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -74,6 +74,20 @@
#define ADP5588_DEVICE_ID_MASK 0xF
+ /* Configuration Register1 */
+#define ADP5588_AUTO_INC (1 << 7)
+#define ADP5588_GPIEM_CFG (1 << 6)
+#define ADP5588_INT_CFG (1 << 4)
+#define ADP5588_GPI_IEN (1 << 1)
+
+/* Interrupt Status Register */
+#define ADP5588_GPI_INT (1 << 1)
+#define ADP5588_KE_INT (1 << 0)
+
+#define ADP5588_MAXGPIO 18
+#define ADP5588_BANK(offs) ((offs) >> 3)
+#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
+
/* Put one of these structures in i2c_board_info platform_data */
#define ADP5588_KEYMAPSIZE 80
@@ -126,9 +140,12 @@ struct adp5588_kpad_platform_data {
const struct adp5588_gpio_platform_data *gpio_data;
};
+struct i2c_client; /* forward declaration */
+
struct adp5588_gpio_platform_data {
- unsigned gpio_start; /* GPIO Chip base # */
- unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
+ int gpio_start; /* GPIO Chip base # */
+ unsigned irq_base; /* interrupt base # */
+ unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
int (*setup)(struct i2c_client *client,
int gpio, unsigned ngpio,
void *context);
diff --git a/include/linux/i2c/apds990x.h b/include/linux/i2c/apds990x.h
new file mode 100644
index 000000000000..d186fcc5d257
--- /dev/null
+++ b/include/linux/i2c/apds990x.h
@@ -0,0 +1,79 @@
+/*
+ * This file is part of the APDS990x sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __APDS990X_H__
+#define __APDS990X_H__
+
+
+#define APDS_IRLED_CURR_12mA 0x3
+#define APDS_IRLED_CURR_25mA 0x2
+#define APDS_IRLED_CURR_50mA 0x1
+#define APDS_IRLED_CURR_100mA 0x0
+
+/**
+ * struct apds990x_chip_factors - defines effect of the cover window
+ * @ga: Total glass attenuation
+ * @cf1: clear channel factor 1 for raw to lux conversion
+ * @irf1: IR channel factor 1 for raw to lux conversion
+ * @cf2: clear channel factor 2 for raw to lux conversion
+ * @irf2: IR channel factor 2 for raw to lux conversion
+ * @df: device factor for conversion formulas
+ *
+ * Structure for tuning ALS calculation to match with environment.
+ * Values depend on the material above the sensor and the sensor
+ * itself. If the GA is zero, driver will use uncovered sensor default values
+ * format: decimal value * APDS_PARAM_SCALE except df which is plain integer.
+ */
+#define APDS_PARAM_SCALE 4096
+struct apds990x_chip_factors {
+ int ga;
+ int cf1;
+ int irf1;
+ int cf2;
+ int irf2;
+ int df;
+};
+
+/**
+ * struct apds990x_platform_data - platform data for apsd990x.c driver
+ * @cf: chip factor data
+ * @pddrive: IR-led driving current
+ * @ppcount: number of IR pulses used for proximity estimation
+ * @setup_resources: interrupt line setup call back function
+ * @release_resources: interrupt line release call back function
+ *
+ * Proximity detection result depends heavily on correct ppcount, pdrive
+ * and cover window.
+ *
+ */
+
+struct apds990x_platform_data {
+ struct apds990x_chip_factors cf;
+ u8 pdrive;
+ u8 ppcount;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+};
+
+#endif
diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/i2c/bh1770glc.h
new file mode 100644
index 000000000000..8b5e2df36c72
--- /dev/null
+++ b/include/linux/i2c/bh1770glc.h
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __BH1770_H__
+#define __BH1770_H__
+
+/**
+ * struct bh1770_platform_data - platform data for bh1770glc driver
+ * @led_def_curr: IR led driving current.
+ * @glass_attenuation: Attenuation factor for covering window.
+ * @setup_resources: Call back for interrupt line setup function
+ * @release_resources: Call back for interrupte line release function
+ *
+ * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor
+ * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85
+ */
+
+struct bh1770_platform_data {
+#define BH1770_LED_5mA 0
+#define BH1770_LED_10mA 1
+#define BH1770_LED_20mA 2
+#define BH1770_LED_50mA 3
+#define BH1770_LED_100mA 4
+#define BH1770_LED_150mA 5
+#define BH1770_LED_200mA 6
+ __u8 led_def_curr;
+#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */
+ __u32 glass_attenuation;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+};
+#endif
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 928ae712709f..13a801f3d028 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -81,6 +81,7 @@ struct idr {
#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
/**
+ * DOC: idr sync
* idr synchronization (stolen from radix-tree.h)
*
* idr_find() is able to be called locklessly, using RCU. The caller must
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2fea6c8ef6ba..1f8c06ce0fa6 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -29,6 +29,8 @@ extern struct fs_struct init_fs;
.running = 0, \
.lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
+ .cred_guard_mutex = \
+ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
}
extern struct nsproxy init_nsproxy;
@@ -145,8 +147,6 @@ extern struct cred init_cred;
.group_leader = &tsk, \
RCU_INIT_POINTER(.real_cred, &init_cred), \
RCU_INIT_POINTER(.cred, &init_cred), \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h
index d9d08b6269b6..10496bd24c5c 100644
--- a/include/linux/intel_mid_dma.h
+++ b/include/linux/intel_mid_dma.h
@@ -27,14 +27,7 @@
#include <linux/dmaengine.h>
-/*DMA transaction width, src and dstn width would be same
-The DMA length must be width aligned,
-for 32 bit width the length must be 32 bit (4bytes) aligned only*/
-enum intel_mid_dma_width {
- LNW_DMA_WIDTH_8BIT = 0x0,
- LNW_DMA_WIDTH_16BIT = 0x1,
- LNW_DMA_WIDTH_32BIT = 0x2,
-};
+#define DMA_PREP_CIRCULAR_LIST (1 << 10)
/*DMA mode configurations*/
enum intel_mid_dma_mode {
@@ -69,18 +62,15 @@ enum intel_mid_dma_msize {
* @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
* @src_msize: Source DMA burst size
* @dst_msize: Dst DMA burst size
+ * @per_addr: Periphral address
* @device_instance: DMA peripheral device instance, we can have multiple
* peripheral device connected to single DMAC
*/
struct intel_mid_dma_slave {
- enum dma_data_direction dirn;
- enum intel_mid_dma_width src_width; /*width of DMA src txn*/
- enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/
enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
- enum intel_mid_dma_msize src_msize; /*size if src burst*/
- enum intel_mid_dma_msize dst_msize; /*size of dst burst*/
unsigned int device_instance; /*0, 1 for periphral instance*/
+ struct dma_slave_config dma_slave;
};
#endif /*__INTEL_MID_DMA_H__*/
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 01b281646251..79d0c4f6d071 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -410,7 +410,7 @@ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
static inline void __raise_softirq_irqoff(unsigned int nr)
{
- trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
+ trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
}
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 7fb592793738..8cdcc2a199ad 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping)
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
- unsigned long offset,
- int slot)
+ unsigned long offset)
{
resource_size_t phys_addr;
unsigned long pfn;
@@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
- return iomap_atomic_prot_pfn(pfn, slot, mapping->prot);
+ return iomap_atomic_prot_pfn(pfn, mapping->prot);
}
static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr)
{
- iounmap_atomic(vaddr, slot);
+ iounmap_atomic(vaddr);
}
static inline void __iomem *
@@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping)
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
- unsigned long offset,
- int slot)
+ unsigned long offset)
{
return ((char __force __iomem *) mapping) + offset;
}
static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr)
{
}
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index b22790268b64..d377ea815d45 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -112,6 +112,7 @@ struct resource_list {
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
+extern int resource_alloc_from_bottom;
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0b52924a0cb6..2ae86aa21fce 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -395,7 +395,7 @@ struct jbd2_inode {
struct inode *i_vfs_inode;
/* Flags of inode [j_list_lock] */
- unsigned int i_flags;
+ unsigned long i_flags;
};
struct jbd2_revoke_table_s;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index edef168a0406..450092c1e35f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -173,6 +173,11 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
+#define abs64(x) ({ \
+ s64 __x = (x); \
+ (__x < 0) ? -__x : __x; \
+ })
+
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void);
#else
@@ -203,10 +208,10 @@ extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
-extern int strict_strtoul(const char *, unsigned int, unsigned long *);
-extern int strict_strtol(const char *, unsigned int, long *);
-extern int strict_strtoull(const char *, unsigned int, unsigned long long *);
-extern int strict_strtoll(const char *, unsigned int, long long *);
+extern int __must_check strict_strtoul(const char *, unsigned int, unsigned long *);
+extern int __must_check strict_strtol(const char *, unsigned int, long *);
+extern int __must_check strict_strtoull(const char *, unsigned int, unsigned long long *);
+extern int __must_check strict_strtoll(const char *, unsigned int, long long *);
extern int sprintf(char * buf, const char * fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int vsprintf(char *buf, const char *, va_list)
@@ -277,6 +282,11 @@ asmlinkage int vprintk(const char *fmt, va_list args)
asmlinkage int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2))) __cold;
+/*
+ * Please don't use printk_ratelimit(), because it shares ratelimiting state
+ * with all other unrelated printk_ratelimit() callsites. Instead use
+ * printk_ratelimited() or plain old __ratelimit().
+ */
extern int __printk_ratelimit(const char *func);
#define printk_ratelimit() __printk_ratelimit(__func__)
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
@@ -651,6 +661,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
+#define min3(x, y, z) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ typeof(z) _min3 = (z); \
+ (void) (&_min1 == &_min2); \
+ (void) (&_min1 == &_min3); \
+ _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
+ (_min2 < _min3 ? _min2 : _min3); })
+
+#define max3(x, y, z) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ typeof(z) _max3 = (z); \
+ (void) (&_max1 == &_max2); \
+ (void) (&_max1 == &_max3); \
+ _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+ (_max2 > _max3 ? _max2 : _max3); })
+
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
* @x: value1
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index c059044bc6dc..ad54c846911b 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -33,6 +33,7 @@ struct kernel_stat {
#ifndef CONFIG_GENERIC_HARDIRQS
unsigned int irqs[NR_IRQS];
#endif
+ unsigned long irqs_sum;
unsigned int softirqs[NR_SOFTIRQS];
};
@@ -54,6 +55,7 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
struct irq_desc *desc)
{
kstat_this_cpu.irqs[irq]++;
+ kstat_this_cpu.irqs_sum++;
}
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
@@ -65,8 +67,9 @@ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
#define kstat_irqs_this_cpu(DESC) \
((DESC)->kstat_irqs[smp_processor_id()])
-#define kstat_incr_irqs_this_cpu(irqno, DESC) \
- ((DESC)->kstat_irqs[smp_processor_id()]++)
+#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
+ ((DESC)->kstat_irqs[smp_processor_id()]++);\
+ kstat_this_cpu.irqs_sum++; } while (0)
#endif
@@ -83,6 +86,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
/*
* Number of interrupts per specific IRQ source, since bootup
*/
+#ifndef CONFIG_GENERIC_HARDIRQS
static inline unsigned int kstat_irqs(unsigned int irq)
{
unsigned int sum = 0;
@@ -93,7 +97,17 @@ static inline unsigned int kstat_irqs(unsigned int irq)
return sum;
}
+#else
+extern unsigned int kstat_irqs(unsigned int irq);
+#endif
+/*
+ * Number of interrupts per cpu, since bootup
+ */
+static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+{
+ return kstat_cpu(cpu).irqs_sum;
+}
/*
* Lock/unlock the current runqueue - to extract task statistics:
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 62dbee554f60..10308c6a3d1c 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -172,7 +172,13 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
static inline unsigned int __must_check
-__kfifo_must_check_helper(unsigned int val)
+__kfifo_uint_must_check_helper(unsigned int val)
+{
+ return val;
+}
+
+static inline int __must_check
+__kfifo_int_must_check_helper(int val)
{
return val;
}
@@ -267,7 +273,7 @@ __kfifo_must_check_helper(unsigned int val)
* @fifo: address of the fifo to be used
*/
#define kfifo_avail(fifo) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmpq = (fifo); \
const size_t __recsize = sizeof(*__tmpq->rectype); \
@@ -300,7 +306,7 @@ __kfifo_must_check_helper( \
* This function returns the size of the next fifo record in number of bytes.
*/
#define kfifo_peek_len(fifo) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
@@ -323,7 +329,7 @@ __kfifo_must_check_helper( \
* Return 0 if no error, otherwise an error code.
*/
#define kfifo_alloc(fifo, size, gfp_mask) \
-__kfifo_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -419,7 +425,7 @@ __kfifo_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_get(fifo, val) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
typeof((val) + 1) __val = (val); \
@@ -460,7 +466,7 @@ __kfifo_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_peek(fifo, val) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
typeof((val) + 1) __val = (val); \
@@ -552,7 +558,7 @@ __kfifo_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_out(fifo, buf, n) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
typeof((buf) + 1) __buf = (buf); \
@@ -580,7 +586,7 @@ __kfifo_must_check_helper( \
* copied.
*/
#define kfifo_out_spinlocked(fifo, buf, n, lock) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
unsigned long __flags; \
unsigned int __ret; \
@@ -609,7 +615,7 @@ __kfifo_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_from_user(fifo, from, len, copied) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
const void __user *__from = (from); \
@@ -637,7 +643,7 @@ __kfifo_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
@@ -764,7 +770,7 @@ __kfifo_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_out_peek(fifo, buf, n) \
-__kfifo_must_check_helper( \
+__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
typeof((buf) + 1) __buf = (buf); \
diff --git a/include/linux/list.h b/include/linux/list.h
index 88a000617d77..9a5f8a71810c 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -636,6 +636,12 @@ static inline void hlist_add_after(struct hlist_node *n,
next->next->pprev = &next->next;
}
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+ n->pprev = &n->next;
+}
+
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
diff --git a/include/linux/magic.h b/include/linux/magic.h
index eb9800f05782..ff690d05f129 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -57,5 +57,6 @@
#define DEVPTS_SUPER_MAGIC 0x1cd1
#define SOCKFS_MAGIC 0x534F434B
+#define V9FS_MAGIC 0x01021997
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c87f1528703a..23fcdfcba81b 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -35,6 +35,14 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
return dividend / divisor;
}
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ */
+static inline s64 div64_s64(s64 dividend, s64 divisor)
+{
+ return dividend / divisor;
+}
+
#elif BITS_PER_LONG == 32
#ifndef div_u64_rem
@@ -53,6 +61,10 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
extern u64 div64_u64(u64 dividend, u64 divisor);
#endif
+#ifndef div64_s64
+extern s64 div64_s64(s64 dividend, s64 divisor);
+#endif
+
#endif /* BITS_PER_LONG */
/**
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 864035fb8f8a..4307231bd22f 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -70,6 +70,10 @@ extern void online_page(struct page *page);
extern int online_pages(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern bool is_pageblock_removable_nolock(struct page *page);
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 7238231b8dd4..085527fb8261 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -14,6 +14,8 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *);
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining);
+extern int migrate_huge_pages(struct list_head *l, new_page_t x,
+ unsigned long private, int offlining);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -23,12 +25,17 @@ extern int migrate_prep_local(void);
extern int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags);
+extern void migrate_page_copy(struct page *newpage, struct page *page);
+extern int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page);
#else
#define PAGE_MIGRATION 0
static inline void putback_lru_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining) { return -ENOSYS; }
+static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
+ unsigned long private, int offlining) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
@@ -40,6 +47,15 @@ static inline int migrate_vmas(struct mm_struct *mm,
return -ENOSYS;
}
+static inline void migrate_page_copy(struct page *newpage,
+ struct page *page) {}
+
+static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ return -ENOSYS;
+}
+
/* Possible settings for the migrate_page() method in address_operations */
#define migrate_page NULL
#define fail_migrate_page NULL
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 78a1b9671752..9a18667c13cc 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -58,6 +58,7 @@ enum {
MLX4_CMD_SENSE_PORT = 0x4d,
MLX4_CMD_HW_HEALTH_CHECK = 0x50,
MLX4_CMD_SET_PORT = 0xc,
+ MLX4_CMD_SET_NODE = 0x5a,
MLX4_CMD_ACCESS_DDR = 0x2e,
MLX4_CMD_MAP_ICM = 0xffa,
MLX4_CMD_UNMAP_ICM = 0xff9,
@@ -141,6 +142,7 @@ enum {
MLX4_SET_PORT_MAC_TABLE = 0x2,
MLX4_SET_PORT_VLAN_TABLE = 0x3,
MLX4_SET_PORT_PRIO_MAP = 0x4,
+ MLX4_SET_PORT_GID_TABLE = 0x5,
};
struct mlx4_dev;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7338654c02b4..a7b15bc7648e 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -67,7 +67,8 @@ enum {
MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
MLX4_DEV_CAP_FLAG_RAW_MCAST = 1 << 19,
MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1 << 20,
- MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21
+ MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21,
+ MLX4_DEV_CAP_FLAG_IBOE = 1 << 30
};
enum {
@@ -171,6 +172,10 @@ enum {
MLX4_NUM_FEXCH = 64 * 1024,
};
+enum {
+ MLX4_MAX_FAST_REG_PAGES = 511,
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
@@ -379,6 +384,27 @@ struct mlx4_av {
u8 dgid[16];
};
+struct mlx4_eth_av {
+ __be32 port_pd;
+ u8 reserved1;
+ u8 smac_idx;
+ u16 reserved2;
+ u8 reserved3;
+ u8 gid_index;
+ u8 stat_rate;
+ u8 hop_limit;
+ __be32 sl_tclass_flowlabel;
+ u8 dgid[16];
+ u32 reserved4[2];
+ __be16 vlan;
+ u8 mac[6];
+};
+
+union mlx4_ext_av {
+ struct mlx4_av ib;
+ struct mlx4_eth_av eth;
+};
+
struct mlx4_dev {
struct pci_dev *pdev;
unsigned long flags;
@@ -407,6 +433,12 @@ struct mlx4_init_port_param {
if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
~(dev)->caps.port_mask) & 1 << ((port) - 1))
+#define mlx4_foreach_ib_transport_port(port, dev) \
+ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
+ if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \
+ ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+
+
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -474,6 +506,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 53c5fdb6eac4..f407cd4bfb34 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -44,15 +44,24 @@ enum mlx4_dev_event {
MLX4_DEV_EVENT_PORT_REINIT,
};
+enum mlx4_protocol {
+ MLX4_PROTOCOL_IB,
+ MLX4_PROTOCOL_EN,
+};
+
struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context);
void (*event) (struct mlx4_dev *dev, void *context,
enum mlx4_dev_event event, int port);
+ void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
struct list_head list;
+ enum mlx4_protocol protocol;
};
int mlx4_register_interface(struct mlx4_interface *intf);
void mlx4_unregister_interface(struct mlx4_interface *intf);
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 7abe64326f72..0eeb2a1a867c 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -109,10 +109,11 @@ struct mlx4_qp_path {
__be32 tclass_flowlabel;
u8 rgid[16];
u8 sched_queue;
- u8 snooper_flags;
+ u8 vlan_index;
u8 reserved3[2];
u8 counter_index;
- u8 reserved4[7];
+ u8 reserved4;
+ u8 dmac[6];
};
struct mlx4_qp_context {
@@ -166,6 +167,7 @@ enum {
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
+ MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
struct mlx4_wqe_ctrl_seg {
@@ -219,7 +221,8 @@ struct mlx4_wqe_datagram_seg {
__be32 av[8];
__be32 dqpn;
__be32 qkey;
- __be32 reservd[2];
+ __be16 vlan;
+ u8 mac[6];
};
struct mlx4_wqe_lso_seg {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7687228dd3b7..721f451c3029 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -144,6 +144,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
+#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
/*
* This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -497,8 +498,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
-#ifdef NODE_NOT_IN_PAGEFLAGS
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
SECTIONS_PGOFF : ZONES_PGOFF)
@@ -718,12 +719,21 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_SIGBUS 0x0002
#define VM_FAULT_MAJOR 0x0004
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
-#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */
+#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
+#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
+#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
+
+#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
+
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
+ VM_FAULT_HWPOISON_LARGE)
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
+/* Encode hstate index for a hwpoisoned large page */
+#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
+#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
/*
* Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
@@ -860,6 +870,7 @@ int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping);
+void account_page_writeback(struct page *page);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
@@ -1023,7 +1034,15 @@ extern void unregister_shrinker(struct shrinker *);
int vma_wants_writenotify(struct vm_area_struct *vma);
-extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
+extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl);
+static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl)
+{
+ pte_t *ptep;
+ __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
+ return ptep;
+}
#ifdef __PAGETABLE_PUD_FOLDED
static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cb57d657ce4d..bb7288a782fd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -310,6 +310,8 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
+ /* How many tasks sharing this mm are OOM_DISABLE */
+ atomic_t oom_disable_count;
};
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 6b7525099e56..8ce082781ccb 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -48,6 +48,7 @@ struct mmc_ext_csd {
unsigned int sa_timeout; /* Units: 100ns */
unsigned int hs_max_dtr;
unsigned int sectors;
+ unsigned int card_type;
unsigned int hc_erase_size; /* In sectors */
unsigned int hc_erase_timeout; /* In milliseconds */
unsigned int sec_trim_mult; /* Secure trim multiplier */
@@ -113,6 +114,7 @@ struct mmc_card {
#define MMC_STATE_READONLY (1<<1) /* card is read-only */
#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */
#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */
+#define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */
unsigned int quirks; /* card quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -154,11 +156,13 @@ struct mmc_card {
#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
+#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
+#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
{
@@ -173,6 +177,8 @@ static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
+#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
+
#define mmc_list_to_card(l) container_of(l, struct mmc_card, node)
#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d)
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 7429033acb66..64e013f1cfb8 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -153,6 +153,8 @@ extern int mmc_can_secure_erase_trim(struct mmc_card *card);
extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr);
+extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
+
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1575b52c3bfa..6d87f68ce4b6 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -50,6 +50,12 @@ struct mmc_ios {
#define MMC_TIMING_LEGACY 0
#define MMC_TIMING_MMC_HS 1
#define MMC_TIMING_SD_HS 2
+
+ unsigned char ddr; /* dual data rate used */
+
+#define MMC_SDR_MODE 0
+#define MMC_1_2V_DDR_MODE 1
+#define MMC_1_8V_DDR_MODE 2
};
struct mmc_host_ops {
@@ -123,6 +129,7 @@ struct mmc_host {
const struct mmc_host_ops *ops;
unsigned int f_min;
unsigned int f_max;
+ unsigned int f_init;
u32 ocr_avail;
struct notifier_block pm_notify;
@@ -157,13 +164,16 @@ struct mmc_host {
#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
+#define MMC_CAP_1_8V_DDR (1 << 11) /* can support */
+ /* DDR mode at 1.8V */
+#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
+ /* DDR mode at 1.2V */
mmc_pm_flag_t pm_caps; /* supported pm features */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
- unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
- unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */
+ unsigned short max_segs; /* see blk_queue_max_segments */
unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
@@ -212,6 +222,10 @@ struct mmc_host {
struct led_trigger *led; /* activity led */
#endif
+#ifdef CONFIG_REGULATOR
+ bool regulator_enabled; /* regulator state */
+#endif
+
struct dentry *debugfs_root;
unsigned long private[0] ____cacheline_aligned;
@@ -236,8 +250,8 @@ static inline void *mmc_priv(struct mmc_host *host)
extern int mmc_suspend_host(struct mmc_host *);
extern int mmc_resume_host(struct mmc_host *);
-extern void mmc_power_save_host(struct mmc_host *host);
-extern void mmc_power_restore_host(struct mmc_host *host);
+extern int mmc_power_save_host(struct mmc_host *host);
+extern int mmc_power_restore_host(struct mmc_host *host);
extern void mmc_detect_change(struct mmc_host *, unsigned long delay);
extern void mmc_request_done(struct mmc_host *, struct mmc_request *);
@@ -250,8 +264,24 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
struct regulator;
+#ifdef CONFIG_REGULATOR
int mmc_regulator_get_ocrmask(struct regulator *supply);
-int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit);
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit);
+#else
+static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ return 0;
+}
+
+static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
+{
+ return 0;
+}
+#endif
int mmc_card_awake(struct mmc_host *host);
int mmc_card_sleep(struct mmc_host *host);
@@ -268,5 +298,13 @@ static inline void mmc_set_disable_delay(struct mmc_host *host,
host->disable_delay = disable_delay;
}
+/* Module parameter */
+extern int mmc_assume_removable;
+
+static inline int mmc_card_is_removable(struct mmc_host *host)
+{
+ return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
+}
+
#endif
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index dd11ae51fb68..956fbd877692 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -277,11 +277,19 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */
#define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */
-#define EXT_CSD_CARD_TYPE_MASK 0x3 /* Mask out reserved and DDR bits */
+#define EXT_CSD_CARD_TYPE_MASK 0xF /* Mask out reserved bits */
+#define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */
+ /* DDR mode @1.8V or 3V I/O */
+#define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */
+ /* DDR mode @1.2V I/O */
+#define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \
+ | EXT_CSD_CARD_TYPE_DDR_1_2V)
#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
+#define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */
+#define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */
#define EXT_CSD_SEC_ER_EN BIT(0)
#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
diff --git a/include/linux/sdhci-pltfm.h b/include/linux/mmc/sdhci-pltfm.h
index 0239bd70241e..548d59d404cb 100644
--- a/include/linux/sdhci-pltfm.h
+++ b/include/linux/mmc/sdhci-pltfm.h
@@ -28,7 +28,7 @@ struct sdhci_host;
struct sdhci_pltfm_data {
struct sdhci_ops *ops;
unsigned int quirks;
- int (*init)(struct sdhci_host *host);
+ int (*init)(struct sdhci_host *host, struct sdhci_pltfm_data *pdata);
void (*exit)(struct sdhci_host *host);
};
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
new file mode 100644
index 000000000000..1fdc673f2396
--- /dev/null
+++ b/include/linux/mmc/sdhci.h
@@ -0,0 +1,144 @@
+/*
+ * linux/include/linux/mmc/sdhci.h - Secure Digital Host Controller Interface
+ *
+ * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+#ifndef __SDHCI_H
+#define __SDHCI_H
+
+#include <linux/scatterlist.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
+struct sdhci_host {
+ /* Data set by hardware interface driver */
+ const char *hw_name; /* Hardware bus name */
+
+ unsigned int quirks; /* Deviations from spec. */
+
+/* Controller doesn't honor resets unless we touch the clock register */
+#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
+/* Controller has bad caps bits, but really supports DMA */
+#define SDHCI_QUIRK_FORCE_DMA (1<<1)
+/* Controller doesn't like to be reset when there is no card inserted. */
+#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
+/* Controller doesn't like clearing the power reg before a change */
+#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
+/* Controller has flaky internal state so reset it on each ios change */
+#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
+/* Controller has an unusable DMA engine */
+#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
+/* Controller has an unusable ADMA engine */
+#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
+/* Controller can only DMA from 32-bit aligned addresses */
+#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
+/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
+#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
+/* Controller can only ADMA chunks that are a multiple of 32 bits */
+#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
+/* Controller needs to be reset after each request to stay stable */
+#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
+/* Controller needs voltage and power writes to happen separately */
+#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
+/* Controller provides an incorrect timeout value for transfers */
+#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
+/* Controller has an issue with buffer bits for small transfers */
+#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
+/* Controller does not provide transfer-complete interrupt when not busy */
+#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
+/* Controller has unreliable card detection */
+#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
+/* Controller reports inverted write-protect state */
+#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
+/* Controller has nonstandard clock management */
+#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
+/* Controller does not like fast PIO transfers */
+#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
+/* Controller losing signal/interrupt enable states after reset */
+#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
+/* Controller has to be forced to use block size of 2048 bytes */
+#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
+/* Controller cannot do multi-block transfers */
+#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
+/* Controller can only handle 1-bit data transfers */
+#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
+/* Controller needs 10ms delay between applying power and clock */
+#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
+/* Controller uses SDCLK instead of TMCLK for data timeouts */
+#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
+/* Controller reports wrong base clock capability */
+#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
+/* Controller cannot support End Attribute in NOP ADMA descriptor */
+#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
+/* Controller is missing device caps. Use caps provided by host */
+#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
+/* Controller uses Auto CMD12 command to stop the transfer */
+#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
+/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
+#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
+
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+
+ const struct sdhci_ops *ops; /* Low level hw interface */
+
+ struct regulator *vmmc; /* Power regulator */
+
+ /* Internal data */
+ struct mmc_host *mmc; /* MMC structure */
+ u64 dma_mask; /* custom DMA mask */
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+ struct led_classdev led; /* LED control */
+ char led_name[32];
+#endif
+
+ spinlock_t lock; /* Mutex */
+
+ int flags; /* Host attributes */
+#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
+#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
+#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
+#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
+
+ unsigned int version; /* SDHCI spec. version */
+
+ unsigned int max_clk; /* Max possible freq (MHz) */
+ unsigned int timeout_clk; /* Timeout freq (KHz) */
+
+ unsigned int clock; /* Current clock (MHz) */
+ u8 pwr; /* Current voltage */
+
+ struct mmc_request *mrq; /* Current request */
+ struct mmc_command *cmd; /* Current command */
+ struct mmc_data *data; /* Current data request */
+ unsigned int data_early:1; /* Data finished before cmd */
+
+ struct sg_mapping_iter sg_miter; /* SG state for PIO */
+ unsigned int blocks; /* remaining PIO blocks */
+
+ int sg_count; /* Mapped sg entries */
+
+ u8 *adma_desc; /* ADMA descriptor table */
+ u8 *align_buffer; /* Bounce buffer */
+
+ dma_addr_t adma_addr; /* Mapped ADMA descr. table */
+ dma_addr_t align_addr; /* Mapped bounce buffer */
+
+ struct tasklet_struct card_tasklet; /* Tasklet structures */
+ struct tasklet_struct finish_tasklet;
+
+ struct timer_list timer; /* Timer for timeouts */
+
+ unsigned int caps; /* Alternative capabilities */
+
+ unsigned long private[0] ____cacheline_aligned;
+};
+#endif /* __SDHCI_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 4e02ee2b071e..43dcfbdc39de 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -227,7 +227,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
/*
* These two macros will sometime replace ptep_clear_flush.
- * ptep_clear_flush is impleemnted as macro itself, so this also is
+ * ptep_clear_flush is implemented as macro itself, so this also is
* implemented as a macro until ptep_clear_flush will converted to an
* inline function, to diminish the risk of compilation failure. The
* invalidate_page method over time can be moved outside the PT lock
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3984c4eb41fd..39c24ebe9cfd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -104,6 +104,8 @@ enum zone_stat_item {
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
+ NR_DIRTIED, /* page dirtyings since bootup */
+ NR_WRITTEN, /* page writings since bootup */
#ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */
NUMA_MISS, /* allocated in non intended node */
@@ -421,6 +423,9 @@ struct zone {
typedef enum {
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
+ ZONE_CONGESTED, /* zone has many dirty pages backed by
+ * a congested BDI
+ */
} zone_flags_t;
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -438,6 +443,11 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
clear_bit(flag, &zone->flags);
}
+static inline int zone_is_reclaim_congested(const struct zone *zone)
+{
+ return test_bit(ZONE_CONGESTED, &zone->flags);
+}
+
static inline int zone_is_reclaim_locked(const struct zone *zone)
{
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 9d2f1837b3d8..112adf8bd47d 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -21,8 +21,8 @@
#define __module_cat(a,b) ___module_cat(a,b)
#define __MODULE_INFO(tag, name, info) \
static const char __module_cat(name,__LINE__)[] \
- __used \
- __attribute__((section(".modinfo"),unused)) = __stringify(tag) "=" info
+ __used __attribute__((section(".modinfo"), unused, aligned(1))) \
+ = __stringify(tag) "=" info
#else /* !MODULE */
#define __MODULE_INFO(tag, name, info)
#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index dee0b11a8759..16faa130088c 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -229,6 +229,8 @@ enum {
extern int sock_wake_async(struct socket *sk, int how, int band);
extern int sock_register(const struct net_proto_family *fam);
extern void sock_unregister(int family);
+extern int __sock_create(struct net *net, int family, int type, int proto,
+ struct socket **res, int kern);
extern int sock_create(int family, int type, int proto,
struct socket **res);
extern int sock_create_kern(int family, int type, int proto,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fcd3dda86322..072652d94d9f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
table->ents[hash & table->mask] = RPS_NO_CPU;
}
-extern struct rps_sock_flow_table *rps_sock_flow_table;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
- struct rps_map *rps_map;
- struct rps_dev_flow_table *rps_flow_table;
- struct kobject kobj;
- struct netdev_rx_queue *first;
- atomic_t count;
+ struct rps_map __rcu *rps_map;
+ struct rps_dev_flow_table __rcu *rps_flow_table;
+ struct kobject kobj;
+ struct netdev_rx_queue *first;
+ atomic_t count;
} ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */
@@ -944,7 +944,7 @@ struct net_device {
/* Protocol specific pointers */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- struct vlan_group *vlgrp; /* VLAN group */
+ struct vlan_group __rcu *vlgrp; /* VLAN group */
#endif
#ifdef CONFIG_NET_DSA
void *dsa_ptr; /* dsa specific data */
@@ -952,7 +952,7 @@ struct net_device {
void *atalk_ptr; /* AppleTalk link */
struct in_device __rcu *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
- void *ip6_ptr; /* IPv6 specific data */
+ struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
void *ec_ptr; /* Econet specific data */
void *ax25_ptr; /* AX.25 specific data */
struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
@@ -1072,7 +1072,7 @@ struct net_device {
struct pcpu_dstats __percpu *dstats; /* dummy stats */
};
/* GARP */
- struct garp_port *garp_port;
+ struct garp_port __rcu *garp_port;
/* class/net/name entry */
struct device dev;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 07e40c625972..4925b22219d2 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -17,7 +17,9 @@
#define NFS4_BITMAP_SIZE 2
#define NFS4_VERIFIER_SIZE 8
-#define NFS4_STATEID_SIZE 16
+#define NFS4_STATEID_SEQID_SIZE 4
+#define NFS4_STATEID_OTHER_SIZE 12
+#define NFS4_STATEID_SIZE (NFS4_STATEID_SEQID_SIZE + NFS4_STATEID_OTHER_SIZE)
#define NFS4_FHSIZE 128
#define NFS4_MAXPATHLEN PATH_MAX
#define NFS4_MAXNAMLEN NAME_MAX
@@ -61,6 +63,9 @@
#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000
#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000
+#define NFS4_CDFC4_FORE 0x1
+#define NFS4_CDFC4_BACK 0x2
+
#define NFS4_SET_TO_SERVER_TIME 0
#define NFS4_SET_TO_CLIENT_TIME 1
@@ -167,7 +172,16 @@ struct nfs4_acl {
};
typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
-typedef struct { char data[NFS4_STATEID_SIZE]; } nfs4_stateid;
+
+struct nfs41_stateid {
+ __be32 seqid;
+ char other[NFS4_STATEID_OTHER_SIZE];
+} __attribute__ ((packed));
+
+typedef union {
+ char data[NFS4_STATEID_SIZE];
+ struct nfs41_stateid stateid;
+} nfs4_stateid;
enum nfs_opnum4 {
OP_ACCESS = 3,
@@ -471,6 +485,8 @@ enum lock_type4 {
#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
@@ -532,6 +548,8 @@ enum {
NFSPROC4_CLNT_SEQUENCE,
NFSPROC4_CLNT_GET_LEASE_TIME,
NFSPROC4_CLNT_RECLAIM_COMPLETE,
+ NFSPROC4_CLNT_LAYOUTGET,
+ NFSPROC4_CLNT_GETDEVICEINFO,
};
/* nfs41 types */
@@ -550,6 +568,49 @@ enum state_protect_how4 {
SP4_SSV = 2
};
+enum pnfs_layouttype {
+ LAYOUT_NFSV4_1_FILES = 1,
+ LAYOUT_OSD2_OBJECTS = 2,
+ LAYOUT_BLOCK_VOLUME = 3,
+};
+
+/* used for both layout return and recall */
+enum pnfs_layoutreturn_type {
+ RETURN_FILE = 1,
+ RETURN_FSID = 2,
+ RETURN_ALL = 3
+};
+
+enum pnfs_iomode {
+ IOMODE_READ = 1,
+ IOMODE_RW = 2,
+ IOMODE_ANY = 3,
+};
+
+enum pnfs_notify_deviceid_type4 {
+ NOTIFY_DEVICEID4_CHANGE = 1 << 1,
+ NOTIFY_DEVICEID4_DELETE = 1 << 2,
+};
+
+#define NFL4_UFLG_MASK 0x0000003F
+#define NFL4_UFLG_DENSE 0x00000001
+#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
+#define NFL4_UFLG_STRIPE_UNIT_SIZE_MASK 0xFFFFFFC0
+
+/* Encoded in the loh_body field of type layouthint4 */
+enum filelayout_hint_care4 {
+ NFLH4_CARE_DENSE = NFL4_UFLG_DENSE,
+ NFLH4_CARE_COMMIT_THRU_MDS = NFL4_UFLG_COMMIT_THRU_MDS,
+ NFLH4_CARE_STRIPE_UNIT_SIZE = 0x00000040,
+ NFLH4_CARE_STRIPE_COUNT = 0x00000080
+};
+
+#define NFS4_DEVICEID4_SIZE 16
+
+struct nfs4_deviceid {
+ char data[NFS4_DEVICEID4_SIZE];
+};
+
#endif
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a46e430d9622..bba26684acdc 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -188,6 +188,9 @@ struct nfs_inode {
struct nfs_delegation __rcu *delegation;
fmode_t delegation_state;
struct rw_semaphore rwsem;
+
+ /* pNFS layout information */
+ struct pnfs_layout_hdr *layout;
#endif /* CONFIG_NFS_V4*/
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache;
@@ -615,6 +618,8 @@ nfs_fileid_to_ino_t(u64 fileid)
#define NFSDBG_CLIENT 0x0200
#define NFSDBG_MOUNT 0x0400
#define NFSDBG_FSCACHE 0x0800
+#define NFSDBG_PNFS 0x1000
+#define NFSDBG_PNFS_LD 0x2000
#define NFSDBG_ALL 0xFFFF
#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 5eef862ec187..452d96436d26 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -82,6 +82,8 @@ struct nfs_client {
/* The flags used for obtaining the clientid during EXCHANGE_ID */
u32 cl_exchange_flags;
struct nfs4_session *cl_session; /* sharred session */
+ struct list_head cl_layouts;
+ struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_FSCACHE
@@ -145,6 +147,7 @@ struct nfs_server {
u32 acl_bitmask; /* V4 bitmask representing the ACEs
that are supported on this
filesystem */
+ struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
#endif
void (*destroy)(struct nfs_server *);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index da7a1300dc60..ba6cc8f223c9 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -114,6 +114,7 @@ struct nfs_fsinfo {
__u64 maxfilesize;
struct timespec time_delta; /* server time granularity */
__u32 lease_time; /* in seconds */
+ __u32 layouttype; /* supported pnfs layout driver */
};
struct nfs_fsstat {
@@ -186,6 +187,55 @@ struct nfs4_get_lease_time_res {
struct nfs4_sequence_res lr_seq_res;
};
+#define PNFS_LAYOUT_MAXSIZE 4096
+
+struct nfs4_layoutdriver_data {
+ __u32 len;
+ void *buf;
+};
+
+struct pnfs_layout_range {
+ u32 iomode;
+ u64 offset;
+ u64 length;
+};
+
+struct nfs4_layoutget_args {
+ __u32 type;
+ struct pnfs_layout_range range;
+ __u64 minlength;
+ __u32 maxcount;
+ struct inode *inode;
+ struct nfs_open_context *ctx;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_layoutget_res {
+ __u32 return_on_close;
+ struct pnfs_layout_range range;
+ __u32 type;
+ nfs4_stateid stateid;
+ struct nfs4_layoutdriver_data layout;
+ struct nfs4_sequence_res seq_res;
+};
+
+struct nfs4_layoutget {
+ struct nfs4_layoutget_args args;
+ struct nfs4_layoutget_res res;
+ struct pnfs_layout_segment **lsegpp;
+ int status;
+};
+
+struct nfs4_getdeviceinfo_args {
+ struct pnfs_device *pdev;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_getdeviceinfo_res {
+ struct pnfs_device *pdev;
+ struct nfs4_sequence_res seq_res;
+};
+
/*
* Arguments to the open call.
*/
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e8c06122be36..19ef95d293ae 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -67,7 +67,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
#define get_pageblock_flags(page) \
get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
-#define set_pageblock_flags(page) \
- set_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
+#define set_pageblock_flags(page, flags) \
+ set_pageblock_flags_group(page, flags, \
+ 0, NR_PAGEBLOCK_BITS-1)
#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e12cdc6d79ee..2d1ffe3cf1ee 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -299,6 +299,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
+extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+ unsigned int flags);
extern void unlock_page(struct page *page);
static inline void __set_page_locked(struct page *page)
@@ -351,6 +353,17 @@ static inline void lock_page_nosync(struct page *page)
}
/*
+ * lock_page_or_retry - Lock the page, unless this would block and the
+ * caller indicated that it can handle a retry.
+ */
+static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
+ unsigned int flags)
+{
+ might_sleep();
+ return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+}
+
+/*
* This is exported only for wait_on_page_locked/wait_on_page_writeback.
* Never use this directly!
*/
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c8d95e369ff4..7454408c41b6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -541,7 +541,7 @@ struct pci_error_handlers {
struct module;
struct pci_driver {
struct list_head node;
- char *name;
+ const char *name;
const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
@@ -819,6 +819,9 @@ pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
int pci_back_from_sleep(struct pci_dev *dev);
bool pci_dev_run_wake(struct pci_dev *dev);
+bool pci_check_pme_status(struct pci_dev *dev);
+void pci_wakeup_event(struct pci_dev *dev);
+void pci_pme_wakeup_bus(struct pci_bus *bus);
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b4c3d1b50037..87e2c2e7aed3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -767,6 +767,8 @@
#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000
#define PCI_DEVICE_ID_ELSA_QS3000 0x3000
+#define PCI_VENDOR_ID_STMICRO 0x104A
+
#define PCI_VENDOR_ID_BUSLOGIC 0x104B
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
@@ -1251,6 +1253,8 @@
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
+#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360
+#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB
@@ -2430,6 +2434,13 @@
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
+#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
+#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
+#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821
+#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
+#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
+#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
@@ -2451,9 +2462,10 @@
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
-#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
-#define PCI_DEVICE_ID_INTEL_CPT_LPC_MIN 0x1c41
-#define PCI_DEVICE_ID_INTEL_CPT_LPC_MAX 0x1c5f
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
+#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC 0x1d40
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
@@ -2662,9 +2674,9 @@
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
-#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
-#define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX 0x3b1f
-#define PCI_DEVICE_ID_INTEL_PCH_SMBUS 0x3b30
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
@@ -2673,8 +2685,8 @@
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
-#define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031
-#define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032
+#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
+#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 455b9ccdfca7..af83076c31a6 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -300,12 +300,14 @@
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
-/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
+/* MSI-X registers */
#define PCI_MSIX_FLAGS 2
#define PCI_MSIX_FLAGS_QSIZE 0x7FF
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
-#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
+#define PCI_MSIX_TABLE 4
+#define PCI_MSIX_PBA 8
+#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
/* CompactPCI Hotswap Register */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 018db9a62ffe..27ef6b190ea6 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -148,18 +148,6 @@
DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
/*
- * Declaration/definition used for large per-CPU variables that must be
- * aligned to something larger than the pagesize.
- */
-#define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \
- DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
- __aligned(size)
-
-#define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \
- DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
- __aligned(size)
-
-/*
* Intermodule exports for per-CPU variables. sparse forgets about
* address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
* noop if __CHECKER__.
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 8a7d510ffa9c..46f6ba56fa91 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -78,6 +78,11 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
return 1;
}
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return (fbc->counters != NULL);
+}
+
#else
struct percpu_counter {
@@ -143,6 +148,11 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return 1;
+}
+
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a6e047a04f79..7da5fa845959 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -472,11 +472,7 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_device_register(struct phy_device *phy);
-int phy_clear_interrupt(struct phy_device *phydev);
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
int phy_init_hw(struct phy_device *phydev);
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
const char *bus_id, u32 flags, phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -492,17 +488,12 @@ void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
-void phy_sanitize_settings(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
-int phy_enable_interrupts(struct phy_device *phydev);
-int phy_disable_interrupts(struct phy_device *phydev);
static inline int phy_read_status(struct phy_device *phydev) {
return phydev->drv->read_status(phydev);
}
-int genphy_config_advert(struct phy_device *phydev);
-int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
@@ -511,8 +502,6 @@ int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
-void phy_prepare_link(struct phy_device *phydev,
- void (*adjust_link)(struct net_device *));
void phy_state_machine(struct work_struct *work);
void phy_start_machine(struct phy_device *phydev,
void (*handler)(struct net_device *));
@@ -523,7 +512,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
void phy_device_free(struct phy_device *phydev);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 600cc1fde64d..56e76af78102 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -73,6 +73,8 @@ extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
+extern long select_estimate_accuracy(struct timespec *tv);
+
static inline int poll_schedule(struct poll_wqueues *pwq, int state)
{
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 30083a896f36..7d7325685c42 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -89,6 +89,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_POWER_NOW,
@@ -125,7 +126,10 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_BATTERY = 0,
POWER_SUPPLY_TYPE_UPS,
POWER_SUPPLY_TYPE_MAINS,
- POWER_SUPPLY_TYPE_USB,
+ POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */
+ POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
+ POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
+ POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
};
union power_supply_propval {
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 4272521e29e9..092a04f874a8 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -100,7 +100,8 @@
#include <linux/sched.h> /* For struct task_struct. */
-extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
+extern long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
extern int ptrace_traceme(void);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
@@ -108,7 +109,8 @@ extern int ptrace_attach(struct task_struct *tsk);
extern int ptrace_detach(struct task_struct *, unsigned int);
extern void ptrace_disable(struct task_struct *);
extern int ptrace_check_attach(struct task_struct *task, int kill);
-extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
+extern int ptrace_request(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
@@ -132,8 +134,10 @@ static inline void ptrace_unlink(struct task_struct *child)
__ptrace_unlink(child);
}
-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data);
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data);
/**
* task_ptrace - return %PT_* flags that apply to a task
diff --git a/include/linux/ramoops.h b/include/linux/ramoops.h
new file mode 100644
index 000000000000..0ae68a2c1212
--- /dev/null
+++ b/include/linux/ramoops.h
@@ -0,0 +1,15 @@
+#ifndef __RAMOOPS_H
+#define __RAMOOPS_H
+
+/*
+ * Ramoops platform data
+ * @mem_size memory size for ramoops
+ * @mem_address physical memory address to contain ramoops
+ */
+
+struct ramoops_platform_data {
+ unsigned long mem_size;
+ unsigned long mem_address;
+};
+
+#endif
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 8f69d09a41a5..03ff67b0cdf5 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -36,6 +36,8 @@ static inline void ratelimit_state_init(struct ratelimit_state *rs,
rs->begin = 0;
}
+extern struct ratelimit_state printk_ratelimit_state;
+
extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
#define __ratelimit(state) ___ratelimit(state, __func__)
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 91a4177e60ce..5ca47e59b727 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2072,6 +2072,8 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
+int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
+
/* namei.c */
void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 25b4f686d918..8d3a2486544d 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -62,18 +62,6 @@ enum ring_buffer_type {
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
void *ring_buffer_event_data(struct ring_buffer_event *event);
-/**
- * ring_buffer_event_time_delta - return the delta timestamp of the event
- * @event: the event to get the delta timestamp of
- *
- * The delta timestamp is the 27 bit timestamp since the last event.
- */
-static inline unsigned
-ring_buffer_event_time_delta(struct ring_buffer_event *event)
-{
- return event->time_delta;
-}
-
/*
* ring_buffer_discard_commit will remove an event that has not
* ben committed yet. If this is used, then ring_buffer_unlock_commit
diff --git a/include/linux/rio.h b/include/linux/rio.h
index bd6eb0ed34a7..0bed941f9b13 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -67,6 +67,7 @@
#define RIO_PW_MSG_SIZE 64
extern struct bus_type rio_bus_type;
+extern struct device rio_bus;
extern struct list_head rio_devices; /* list of all devices */
struct rio_mport;
@@ -98,6 +99,7 @@ union rio_pw_msg;
* @riores: RIO resources this device owns
* @pwcback: port-write callback function for this device
* @destid: Network destination ID
+ * @prev: Previous RIO device connected to the current one
*/
struct rio_dev {
struct list_head global_list; /* node in list of all RIO devices */
@@ -111,7 +113,7 @@ struct rio_dev {
u16 asm_rev;
u16 efptr;
u32 pef;
- u32 swpinfo; /* Only used for switches */
+ u32 swpinfo;
u32 src_ops;
u32 dst_ops;
u32 comp_tag;
@@ -124,6 +126,7 @@ struct rio_dev {
struct resource riores[RIO_MAX_DEV_RESOURCES];
int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
u16 destid;
+ struct rio_dev *prev;
};
#define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
@@ -174,6 +177,7 @@ enum rio_phy_type {
* @index: Port index, unique among all port interfaces of the same type
* @sys_size: RapidIO common transport system size
* @phy_type: RapidIO phy type
+ * @phys_efptr: RIO port extended features pointer
* @name: Port name string
* @priv: Master port private data
*/
@@ -195,6 +199,7 @@ struct rio_mport {
* 1 - Large size, 65536 devices.
*/
enum rio_phy_type phy_type; /* RapidIO phy type */
+ u32 phys_efptr;
unsigned char name[40];
void *priv; /* Master port private data */
};
@@ -215,9 +220,14 @@ struct rio_net {
unsigned char id; /* RIO network ID */
};
+/* Definitions used by switch sysfs initialization callback */
+#define RIO_SW_SYSFS_CREATE 1 /* Create switch attributes */
+#define RIO_SW_SYSFS_REMOVE 0 /* Remove switch attributes */
+
/**
* struct rio_switch - RIO switch info
* @node: Node in global list of switches
+ * @rdev: Associated RIO device structure
* @switchid: Switch ID that is unique across a network
* @hopcount: Hopcount to this switch
* @destid: Associated destid in the path
@@ -230,9 +240,12 @@ struct rio_net {
* @get_domain: Callback for switch-specific domain get function
* @em_init: Callback for switch-specific error management initialization function
* @em_handle: Callback for switch-specific error management handler function
+ * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
+ * @nextdev: Array of per-port pointers to the next attached device
*/
struct rio_switch {
struct list_head node;
+ struct rio_dev *rdev;
u16 switchid;
u16 hopcount;
u16 destid;
@@ -250,6 +263,8 @@ struct rio_switch {
u8 *sw_domain);
int (*em_init) (struct rio_dev *dev);
int (*em_handle) (struct rio_dev *dev, u8 swport);
+ int (*sw_sysfs) (struct rio_dev *dev, int create);
+ struct rio_dev *nextdev[0];
};
/* Low-level architecture-dependent routines */
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index db50e1c288b7..ee7b6ada188f 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -34,5 +34,7 @@
#define RIO_DID_IDTCPS16 0x035b
#define RIO_DID_IDTCPS6Q 0x035f
#define RIO_DID_IDTCPS10Q 0x035e
+#define RIO_DID_IDTCPS1848 0x0374
+#define RIO_DID_IDTCPS1616 0x0379
#endif /* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index aedee0489fb4..d63dcbaea169 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -33,6 +33,7 @@
#define RIO_PEF_MEMORY 0x40000000 /* [I] MMIO */
#define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */
#define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */
+#define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */
#define RIO_PEF_INB_MBOX 0x00f00000 /* [II] Mailboxes */
#define RIO_PEF_INB_MBOX0 0x00800000 /* [II] Mailbox 0 */
#define RIO_PEF_INB_MBOX1 0x00400000 /* [II] Mailbox 1 */
@@ -51,6 +52,7 @@
#define RIO_SWP_INFO_PORT_TOTAL_MASK 0x0000ff00 /* [I] Total number of ports */
#define RIO_SWP_INFO_PORT_NUM_MASK 0x000000ff /* [I] Maintenance transaction port number */
#define RIO_GET_TOTAL_PORTS(x) ((x & RIO_SWP_INFO_PORT_TOTAL_MASK) >> 8)
+#define RIO_GET_PORT_NUM(x) (x & RIO_SWP_INFO_PORT_NUM_MASK)
#define RIO_SRC_OPS_CAR 0x18 /* [I] Source Operations CAR */
#define RIO_SRC_OPS_READ 0x00008000 /* [I] Read op */
@@ -159,6 +161,7 @@
#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */
#define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70
+#define RIO_STD_RTE_CONF_EXTCFGEN 0x80000000
#define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74
#define RIO_STD_RTE_DEFAULT_PORT 0x78
@@ -222,15 +225,17 @@
#define RIO_PORT_GEN_MASTER 0x40000000
#define RIO_PORT_GEN_DISCOVERED 0x20000000
#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */
+#define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */
+#define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */
#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */
#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */
-#define RIO_PORT_N_MNT_RSP_ASTAT 0x000003e0 /* ackID Status */
+#define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */
#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */
#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */
#define RIO_PORT_N_ACK_CLEAR 0x80000000
-#define RIO_PORT_N_ACK_INBOUND 0x1f000000
-#define RIO_PORT_N_ACK_OUTSTAND 0x00001f00
-#define RIO_PORT_N_ACK_OUTBOUND 0x0000001f
+#define RIO_PORT_N_ACK_INBOUND 0x3f000000
+#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00
+#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f
#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
@@ -238,7 +243,6 @@
#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004
#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002
#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001
-#define RIO_PORT_N_ERR_STS_CLR_MASK 0x07120204
#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20)
#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
@@ -261,6 +265,10 @@
#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */
#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */
#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */
+#define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */
+#define REM_LTL_ERR_UNSOLR 0x00800000 /* Unsolicited Response */
+#define REM_LTL_ERR_UNSUPTR 0x00400000 /* Unsupported Transaction */
+#define REM_LTL_ERR_IMPSPEC 0x000000ff /* Implementation Specific */
#define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */
#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */
#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 31b2fd75dcba..bb83c0da2071 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -25,8 +25,8 @@
* pointing to this anon_vma once its vma list is empty.
*/
struct anon_vma {
- spinlock_t lock; /* Serialize access to vma list */
struct anon_vma *root; /* Root of this anon_vma tree */
+ spinlock_t lock; /* Serialize access to vma list */
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
/*
@@ -205,9 +205,20 @@ int try_to_unmap_one(struct page *, struct vm_area_struct *,
/*
* Called from mm/filemap_xip.c to unmap empty zero page
*/
-pte_t *page_check_address(struct page *, struct mm_struct *,
+pte_t *__page_check_address(struct page *, struct mm_struct *,
unsigned long, spinlock_t **, int);
+static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+ unsigned long address,
+ spinlock_t **ptlp, int sync)
+{
+ pte_t *ptep;
+
+ __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
+ ptlp, sync));
+ return ptep;
+}
+
/*
* Used by swapoff to help locate where page is expected in vma.
*/
@@ -230,7 +241,20 @@ int try_to_munlock(struct page *);
/*
* Called by memory-failure.c to kill processes.
*/
-struct anon_vma *page_lock_anon_vma(struct page *page);
+struct anon_vma *__page_lock_anon_vma(struct page *page);
+
+static inline struct anon_vma *page_lock_anon_vma(struct page *page)
+{
+ struct anon_vma *anon_vma;
+
+ __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
+
+ /* (void) is needed to make gcc happy */
+ (void) __cond_lock(&anon_vma->root->lock, anon_vma);
+
+ return anon_vma;
+}
+
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 56154bbb8da9..be7adb7588e5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -626,6 +626,10 @@ struct signal_struct {
int oom_adj; /* OOM kill score adjustment (bit shift) */
int oom_score_adj; /* OOM kill score adjustment */
+
+ struct mutex cred_guard_mutex; /* guard against foreign influences on
+ * credential calculations
+ * (notably. ptrace) */
};
/* Context switch must be unlocked if interrupts are to be enabled */
@@ -1305,9 +1309,6 @@ struct task_struct {
* credentials (COW) */
const struct cred __rcu *cred; /* effective (overridable) subjective task
* credentials (COW) */
- struct mutex cred_guard_mutex; /* guard against foreign influences on
- * credential calculations
- * (notably. ptrace) */
struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
char comm[TASK_COMM_LEN]; /* executable name excluding path
@@ -1706,7 +1707,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_DUMPCORE 0x00000200 /* dumped core */
#define PF_SIGNALED 0x00000400 /* killed by a signal */
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
@@ -2237,9 +2237,16 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
-extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
unsigned long *flags);
+#define lock_task_sighand(tsk, flags) \
+({ struct sighand_struct *__ss; \
+ __cond_lock(&(tsk)->sighand->siglock, \
+ (__ss = __lock_task_sighand(tsk, flags))); \
+ __ss; \
+}) \
+
static inline void unlock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index 0299b4ce63db..7f770c638e99 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -70,9 +70,6 @@
#define SFI_SIG_APIC "APIC"
#define SFI_SIG_XSDT "XSDT"
#define SFI_SIG_WAKE "WAKE"
-#define SFI_SIG_SPIB "SPIB"
-#define SFI_SIG_I2CB "I2CB"
-#define SFI_SIG_GPEM "GPEM"
#define SFI_SIG_DEVS "DEVS"
#define SFI_SIG_GPIO "GPIO"
@@ -168,27 +165,6 @@ struct sfi_gpio_table_entry {
char pin_name[16];
} __packed;
-struct sfi_spi_table_entry {
- u16 host_num; /* attached to host 0, 1...*/
- u16 cs; /* chip select */
- u16 irq_info;
- char name[16];
- u8 dev_info[10];
-} __packed;
-
-struct sfi_i2c_table_entry {
- u16 host_num;
- u16 addr; /* slave addr */
- u16 irq_info;
- char name[16];
- u8 dev_info[10];
-} __packed;
-
-struct sfi_gpe_table_entry {
- u16 logical_id; /* logical id */
- u16 phys_id; /* physical GPE id */
-} __packed;
-
typedef int (*sfi_table_handler) (struct sfi_table_header *table);
#ifdef CONFIG_SFI
diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
index b363b916c909..3ff4961da9b5 100644
--- a/include/linux/signalfd.h
+++ b/include/linux/signalfd.h
@@ -33,6 +33,7 @@ struct signalfd_siginfo {
__u64 ssi_utime;
__u64 ssi_stime;
__u64 ssi_addr;
+ __u16 ssi_addr_lsb;
/*
* Pad strcture to 128 bytes. Remember to update the
@@ -43,7 +44,7 @@ struct signalfd_siginfo {
* comes out of a read(2) and we really don't want to have
* a compat on read(2).
*/
- __u8 __pad[48];
+ __u8 __pad[46];
};
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfa2d20e35f1..6dc95cac6b3d 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -13,9 +13,10 @@
extern void cpu_idle(void);
+typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
struct list_head list;
- void (*func) (void *info);
+ smp_call_func_t func;
void *info;
u16 flags;
u16 priv;
@@ -24,8 +25,8 @@ struct call_single_data {
/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
- int wait);
+int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
+ int wait);
#ifdef CONFIG_SMP
@@ -69,15 +70,15 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
-int smp_call_function(void(*func)(void *info), void *info, int wait);
+int smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
- void (*func)(void *info), void *info, bool wait);
+ smp_call_func_t func, void *info, bool wait);
void __smp_call_function_single(int cpuid, struct call_single_data *data,
int wait);
int smp_call_function_any(const struct cpumask *mask,
- void (*func)(void *info), void *info, int wait);
+ smp_call_func_t func, void *info, int wait);
/*
* Generic and arch helpers
@@ -94,7 +95,7 @@ void ipi_call_unlock_irq(void);
/*
* Call a function on all processors
*/
-int on_each_cpu(void (*func) (void *info), void *info, int wait);
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001
@@ -122,7 +123,7 @@ static inline void smp_send_stop(void) { }
* These macros fold the SMP functionality into a single CPU system
*/
#define raw_smp_processor_id() 0
-static inline int up_smp_call_function(void (*func)(void *), void *info)
+static inline int up_smp_call_function(smp_call_func_t func, void *info)
{
return 0;
}
@@ -143,7 +144,7 @@ static inline void smp_send_reschedule(int cpu) { }
static inline void init_call_single_data(void) { }
static inline int
-smp_call_function_any(const struct cpumask *mask, void (*func)(void *info),
+smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
void *info, int wait)
{
return smp_call_function_single(0, func, info, wait);
diff --git a/include/linux/spi/74x164.h b/include/linux/spi/74x164.h
new file mode 100644
index 000000000000..d85c52f294a0
--- /dev/null
+++ b/include/linux/spi/74x164.h
@@ -0,0 +1,11 @@
+#ifndef LINUX_SPI_74X164_H
+#define LINUX_SPI_74X164_H
+
+#define GEN_74X164_DRIVER_NAME "74x164"
+
+struct gen_74x164_chip_platform_data {
+ /* number assigned to the first GPIO */
+ unsigned base;
+};
+
+#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 5bbc447175dc..b2024757edd5 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -122,8 +122,8 @@ extern const struct rpc_authops authnull_ops;
int __init rpc_init_authunix(void);
int __init rpc_init_generic_auth(void);
int __init rpcauth_init_module(void);
-void __exit rpcauth_remove_module(void);
-void __exit rpc_destroy_generic_auth(void);
+void rpcauth_remove_module(void);
+void rpc_destroy_generic_auth(void);
void rpc_destroy_authunix(void);
struct rpc_cred * rpc_lookup_cred(void);
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 7bf3e84b92f4..6950c981882d 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -125,12 +125,15 @@ struct cache_detail {
*/
struct cache_req {
struct cache_deferred_req *(*defer)(struct cache_req *req);
+ int thread_wait; /* How long (jiffies) we can block the
+ * current thread to wait for updates.
+ */
};
/* this must be embedded in a deferred_request that is being
* delayed awaiting cache-fill
*/
struct cache_deferred_req {
- struct list_head hash; /* on hash chain */
+ struct hlist_node hash; /* on hash chain */
struct list_head recent; /* on fifo */
struct cache_head *item; /* cache item we wait on */
void *owner; /* we might need to discard all defered requests
@@ -194,7 +197,9 @@ extern void cache_purge(struct cache_detail *detail);
#define NEVER (0x7FFFFFFF)
extern void __init cache_initialize(void);
extern int cache_register(struct cache_detail *cd);
+extern int cache_register_net(struct cache_detail *cd, struct net *net);
extern void cache_unregister(struct cache_detail *cd);
+extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
mode_t, struct cache_detail *);
@@ -218,14 +223,42 @@ static inline int get_int(char **bpp, int *anint)
return 0;
}
+/*
+ * timestamps kept in the cache are expressed in seconds
+ * since boot. This is the best for measuring differences in
+ * real time.
+ */
+static inline time_t seconds_since_boot(void)
+{
+ struct timespec boot;
+ getboottime(&boot);
+ return get_seconds() - boot.tv_sec;
+}
+
+static inline time_t convert_to_wallclock(time_t sinceboot)
+{
+ struct timespec boot;
+ getboottime(&boot);
+ return boot.tv_sec + sinceboot;
+}
+
static inline time_t get_expiry(char **bpp)
{
int rv;
+ struct timespec boot;
+
if (get_int(bpp, &rv))
return 0;
if (rv < 0)
return 0;
- return rv;
+ getboottime(&boot);
+ return rv - boot.tv_sec;
}
+static inline void sunrpc_invalidate(struct cache_head *h,
+ struct cache_detail *detail)
+{
+ h->expiry_time = seconds_since_boot() - 1;
+ detail->nextcheck = seconds_since_boot();
+}
#endif /* _LINUX_SUNRPC_CACHE_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index c83df09a8e2b..a5a55f284b7d 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -102,6 +102,7 @@ struct rpc_procinfo {
#ifdef __KERNEL__
struct rpc_create_args {
+ struct net *net;
int protocol;
struct sockaddr *address;
size_t addrsize;
diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h
deleted file mode 100644
index e3e6a3437f8b..000000000000
--- a/include/linux/sunrpc/gss_spkm3.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * linux/include/linux/sunrpc/gss_spkm3.h
- *
- * Copyright (c) 2000 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- */
-
-#include <linux/sunrpc/auth_gss.h>
-#include <linux/sunrpc/gss_err.h>
-#include <linux/sunrpc/gss_asn1.h>
-
-struct spkm3_ctx {
- struct xdr_netobj ctx_id; /* per message context id */
- int endtime; /* endtime of the context */
- struct xdr_netobj mech_used;
- unsigned int ret_flags ;
- struct xdr_netobj conf_alg;
- struct xdr_netobj derived_conf_key;
- struct xdr_netobj intg_alg;
- struct xdr_netobj derived_integ_key;
-};
-
-/* OIDs declarations for K-ALG, I-ALG, C-ALG, and OWF-ALG */
-extern const struct xdr_netobj hmac_md5_oid;
-extern const struct xdr_netobj cast5_cbc_oid;
-
-/* SPKM InnerContext Token types */
-
-#define SPKM_ERROR_TOK 3
-#define SPKM_MIC_TOK 4
-#define SPKM_WRAP_TOK 5
-#define SPKM_DEL_TOK 6
-
-u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
-
-u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype);
-
-#define CKSUMTYPE_RSA_MD5 0x0007
-#define CKSUMTYPE_HMAC_MD5 0x0008
-
-s32 make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
- unsigned int hdrlen, struct xdr_buf *body,
- unsigned int body_offset, struct xdr_netobj *cksum);
-void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits);
-int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen,
- int explen);
-void spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen,
- unsigned char *ctxhdr, int elen, int zbit);
-void spkm3_make_mic_token(unsigned char **tokp, int toklen,
- struct xdr_netobj *mic_hdr,
- struct xdr_netobj *md5cksum, int md5elen, int md5zbit);
-u32 spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen,
- unsigned char **cksum);
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index 5fa0f2084307..680471d1f28a 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -38,8 +38,21 @@ struct svc_stat {
rpcbadclnt;
};
-void rpc_proc_init(void);
-void rpc_proc_exit(void);
+struct net;
+#ifdef CONFIG_PROC_FS
+int rpc_proc_init(struct net *);
+void rpc_proc_exit(struct net *);
+#else
+static inline int rpc_proc_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void rpc_proc_exit(struct net *net)
+{
+}
+#endif
+
#ifdef MODULE
void rpc_modcount(struct inode *, int);
#endif
@@ -54,9 +67,6 @@ void svc_proc_unregister(const char *);
void svc_seq_show(struct seq_file *,
const struct svc_stat *);
-
-extern struct proc_dir_entry *proc_net_rpc;
-
#else
static inline struct proc_dir_entry *rpc_proc_register(struct rpc_stat *s) { return NULL; }
@@ -69,9 +79,6 @@ static inline void svc_proc_unregister(const char *p) {}
static inline void svc_seq_show(struct seq_file *seq,
const struct svc_stat *st) {}
-
-#define proc_net_rpc NULL
-
#endif
#endif /* _LINUX_SUNRPC_STATS_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 5f4e18b3ce73..bbdb680ffbe9 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -12,6 +12,7 @@
struct svc_xprt_ops {
struct svc_xprt *(*xpo_create)(struct svc_serv *,
+ struct net *net,
struct sockaddr *, int,
int);
struct svc_xprt *(*xpo_accept)(struct svc_xprt *);
@@ -32,6 +33,16 @@ struct svc_xprt_class {
u32 xcl_max_payload;
};
+/*
+ * This is embedded in an object that wants a callback before deleting
+ * an xprt; intended for use by NFSv4.1, which needs to know when a
+ * client's tcp connection (and hence possibly a backchannel) goes away.
+ */
+struct svc_xpt_user {
+ struct list_head list;
+ void (*callback)(struct svc_xpt_user *);
+};
+
struct svc_xprt {
struct svc_xprt_class *xpt_class;
struct svc_xprt_ops *xpt_ops;
@@ -66,14 +77,31 @@ struct svc_xprt {
struct sockaddr_storage xpt_remote; /* remote peer's address */
size_t xpt_remotelen; /* length of address */
struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
+ struct list_head xpt_users; /* callbacks on free */
+
+ struct net *xpt_net;
};
+static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+{
+ spin_lock(&xpt->xpt_lock);
+ list_add(&u->list, &xpt->xpt_users);
+ spin_unlock(&xpt->xpt_lock);
+}
+
+static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+{
+ spin_lock(&xpt->xpt_lock);
+ list_del_init(&u->list);
+ spin_unlock(&xpt->xpt_lock);
+}
+
int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
-int svc_create_xprt(struct svc_serv *, const char *, const int,
- const unsigned short, int);
+int svc_create_xprt(struct svc_serv *, const char *, struct net *,
+ const int, const unsigned short, int);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_received(struct svc_xprt *);
void svc_xprt_put(struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index d39dbdc7b10f..25d333c1b571 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -108,10 +108,15 @@ struct auth_ops {
#define SVC_NEGATIVE 4
#define SVC_OK 5
#define SVC_DROP 6
-#define SVC_DENIED 7
-#define SVC_PENDING 8
-#define SVC_COMPLETE 9
+#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely
+ * lost so if there is a tcp connection, it
+ * should be closed
+ */
+#define SVC_DENIED 8
+#define SVC_PENDING 9
+#define SVC_COMPLETE 10
+struct svc_xprt;
extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp);
extern int svc_authorise(struct svc_rqst *rqstp);
@@ -121,13 +126,13 @@ extern void svc_auth_unregister(rpc_authflavor_t flavor);
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
-extern int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom);
+extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
extern struct auth_domain *auth_domain_find(char *name);
-extern struct auth_domain *auth_unix_lookup(struct in6_addr *addr);
+extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
extern int auth_unix_forget_old(struct auth_domain *dom);
extern void svcauth_unix_purge(void);
-extern void svcauth_unix_info_release(void *);
+extern void svcauth_unix_info_release(struct svc_xprt *xpt);
extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
static inline unsigned long hash_str(char *name, int bits)
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index ab91d86565fd..498ab93a81e4 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -132,6 +132,13 @@ xdr_decode_hyper(__be32 *p, __u64 *valp)
return p + 2;
}
+static inline __be32 *
+xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len)
+{
+ memcpy(ptr, p, len);
+ return p + XDR_QUADLEN(len);
+}
+
/*
* Adjust kvec to reflect end of xdr'ed data (RPC client XDR)
*/
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index ff5a77b28c50..89d10d279a20 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -224,6 +224,7 @@ struct rpc_xprt {
bklog_u; /* backlog queue utilization */
} stat;
+ struct net *xprt_net;
const char *address_strings[RPC_DISPLAY_MAX];
};
@@ -249,6 +250,7 @@ static inline int bc_prealloc(struct rpc_rqst *req)
struct xprt_create {
int ident; /* XPRT_TRANSPORT identifier */
+ struct net * net;
struct sockaddr * srcaddr; /* optional local address */
struct sockaddr * dstaddr; /* remote peer address */
size_t addrlen;
@@ -280,6 +282,8 @@ void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release(struct rpc_task *task);
struct rpc_xprt * xprt_get(struct rpc_xprt *xprt);
void xprt_put(struct rpc_xprt *xprt);
+struct rpc_xprt * xprt_alloc(struct net *net, int size, int max_req);
+void xprt_free(struct rpc_xprt *);
static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
{
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7cdd63366f88..eba53e71d2cc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -271,8 +271,18 @@ extern void scan_mapping_unevictable_pages(struct address_space *);
extern unsigned long scan_unevictable_pages;
extern int scan_unevictable_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+#ifdef CONFIG_NUMA
extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
+#else
+static inline int scan_unevictable_register_node(struct node *node)
+{
+ return 0;
+}
+static inline void scan_unevictable_unregister_node(struct node *node)
+{
+}
+#endif
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 0ff2779c44d0..2e7d81c4e5ad 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -126,6 +126,7 @@
#define MGSL_MODE_BISYNC 4
#define MGSL_MODE_RAW 6
#define MGSL_MODE_BASE_CLOCK 7
+#define MGSL_MODE_XSYNC 8
#define MGSL_BUS_TYPE_ISA 1
#define MGSL_BUS_TYPE_EISA 2
@@ -290,6 +291,10 @@ struct gpio_desc {
#define MGSL_IOCSGPIO _IOW(MGSL_MAGIC_IOC,16,struct gpio_desc)
#define MGSL_IOCGGPIO _IOR(MGSL_MAGIC_IOC,17,struct gpio_desc)
#define MGSL_IOCWAITGPIO _IOWR(MGSL_MAGIC_IOC,18,struct gpio_desc)
+#define MGSL_IOCSXSYNC _IO(MGSL_MAGIC_IOC, 19)
+#define MGSL_IOCGXSYNC _IO(MGSL_MAGIC_IOC, 20)
+#define MGSL_IOCSXCTRL _IO(MGSL_MAGIC_IOC, 21)
+#define MGSL_IOCGXCTRL _IO(MGSL_MAGIC_IOC, 22)
#ifdef __KERNEL__
/* provide 32 bit ioctl compatibility on 64 bit systems */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index e6319d18a55d..cacc27a0e285 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -701,7 +701,8 @@ asmlinkage long sys_nfsservctl(int cmd,
asmlinkage long sys_syslog(int type, char __user *buf, int len);
asmlinkage long sys_uselib(const char __user *library);
asmlinkage long sys_ni_syscall(void);
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data);
+asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
+ unsigned long data);
asmlinkage long sys_add_key(const char __user *_type,
const char __user *_description,
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 10db0102a890..3a2e66d88a32 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -150,7 +150,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
*
* Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
*
- * @task->cred_guard_mutex is held by the caller through the do_execve().
+ * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
*/
static inline int tracehook_unsafe_exec(struct task_struct *task)
{
diff --git a/include/linux/types.h b/include/linux/types.h
index 357dbc19606f..c2a9eb44f2fa 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -121,15 +121,7 @@ typedef __u64 u_int64_t;
typedef __s64 int64_t;
#endif
-/*
- * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
- * common 32/64-bit compat problems.
- * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
- * architectures) and to 8-byte boundaries on 64-bit architetures. The new
- * aligned_64 type enforces 8-byte alignment so that structs containing
- * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
- * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
- */
+/* this is a special 64bit data type that is 8-byte aligned */
#define aligned_u64 __u64 __attribute__((aligned(8)))
#define aligned_be64 __be64 __attribute__((aligned(8)))
#define aligned_le64 __le64 __attribute__((aligned(8)))
@@ -186,7 +178,15 @@ typedef __u64 __bitwise __be64;
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
-/* this is a special 64bit data type that is 8-byte aligned */
+/*
+ * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
+ * common 32/64-bit compat problems.
+ * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
+ * architectures) and to 8-byte boundaries on 64-bit architetures. The new
+ * aligned_64 type enforces 8-byte alignment so that structs containing
+ * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
+ * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
+ */
#define __aligned_u64 __u64 __attribute__((aligned(8)))
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
diff --git a/include/linux/via-core.h b/include/linux/via-core.h
index 7ffb521e1a7a..38bffd8ccca5 100644
--- a/include/linux/via-core.h
+++ b/include/linux/via-core.h
@@ -81,7 +81,7 @@ struct viafb_dev {
unsigned long fbmem_start;
long fbmem_len;
void __iomem *fbmem;
-#if defined(CONFIG_FB_VIA_CAMERA) || defined(CONFIG_FB_VIA_CAMERA_MODULE)
+#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
long camera_fbmem_offset;
long camera_fbmem_size;
#endif
@@ -138,6 +138,7 @@ void viafb_irq_disable(u32 mask);
#define VDE_I_LVDSSIEN 0x40000000 /* LVDS Sense enable */
#define VDE_I_ENABLE 0x80000000 /* Global interrupt enable */
+#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
/*
* DMA management.
*/
@@ -172,6 +173,7 @@ int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg);
*/
#define VGA_WIDTH 640
#define VGA_HEIGHT 480
+#endif /* CONFIG_VIDEO_VIA_CAMERA */
/*
* Indexed port operations. Note that these are all multi-op
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 61490c6dcdbd..5f6f47044abf 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -363,6 +363,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
#define V4L2_PIX_FMT_STV0680 v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
#define V4L2_PIX_FMT_TM6000 v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
+#define V4L2_PIX_FMT_CIT_YYVYUY v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */
+#define V4L2_PIX_FMT_KONICA420 v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */
/*
* F O R M A T E N U M E R A T I O N
@@ -1045,8 +1047,11 @@ enum v4l2_colorfx {
#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36)
+#define V4L2_CID_ILLUMINATORS_1 (V4L2_CID_BASE+37)
+#define V4L2_CID_ILLUMINATORS_2 (V4L2_CID_BASE+38)
+
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+37)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+39)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
@@ -1363,6 +1368,8 @@ struct v4l2_modulator {
#define V4L2_TUNER_CAP_SAP 0x0020
#define V4L2_TUNER_CAP_LANG1 0x0040
#define V4L2_TUNER_CAP_RDS 0x0080
+#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100
+#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200
/* Flags for the 'rxsubchans' field */
#define V4L2_TUNER_SUB_MONO 0x0001
@@ -1392,7 +1399,8 @@ struct v4l2_hw_freq_seek {
enum v4l2_tuner_type type;
__u32 seek_upward;
__u32 wrap_around;
- __u32 reserved[8];
+ __u32 spacing;
+ __u32 reserved[7];
};
/*
diff --git a/include/linux/videotext.h b/include/linux/videotext.h
deleted file mode 100644
index 3e68c8d1c7f7..000000000000
--- a/include/linux/videotext.h
+++ /dev/null
@@ -1,125 +0,0 @@
-#ifndef _VTX_H
-#define _VTX_H
-
-/*
- * Teletext (=Videotext) hardware decoders using interface /dev/vtx
- * Do not confuse with drivers using /dev/vbi which decode videotext by software
- *
- * Videotext IOCTLs changed in order to use _IO() macros defined in <linux/ioctl.h>,
- * unused tuner IOCTLs cleaned up by
- * Michael Geng <linux@MichaelGeng.de>
- *
- * Copyright (c) 1994-97 Martin Buck <martin-2.buck@student.uni-ulm.de>
- * Read COPYING for more information
- *
- */
-
-
-/*
- * Videotext ioctls
- */
-#define VTXIOCGETINFO _IOR (0x81, 1, vtx_info_t)
-#define VTXIOCCLRPAGE _IOW (0x81, 2, vtx_pagereq_t)
-#define VTXIOCCLRFOUND _IOW (0x81, 3, vtx_pagereq_t)
-#define VTXIOCPAGEREQ _IOW (0x81, 4, vtx_pagereq_t)
-#define VTXIOCGETSTAT _IOW (0x81, 5, vtx_pagereq_t)
-#define VTXIOCGETPAGE _IOW (0x81, 6, vtx_pagereq_t)
-#define VTXIOCSTOPDAU _IOW (0x81, 7, vtx_pagereq_t)
-#define VTXIOCPUTPAGE _IO (0x81, 8)
-#define VTXIOCSETDISP _IO (0x81, 9)
-#define VTXIOCPUTSTAT _IO (0x81, 10)
-#define VTXIOCCLRCACHE _IO (0x81, 11)
-#define VTXIOCSETVIRT _IOW (0x81, 12, long)
-
-/* for compatibility, will go away some day */
-#define VTXIOCGETINFO_OLD 0x7101 /* get version of driver & capabilities of vtx-chipset */
-#define VTXIOCCLRPAGE_OLD 0x7102 /* clear page-buffer */
-#define VTXIOCCLRFOUND_OLD 0x7103 /* clear bits indicating that page was found */
-#define VTXIOCPAGEREQ_OLD 0x7104 /* search for page */
-#define VTXIOCGETSTAT_OLD 0x7105 /* get status of page-buffer */
-#define VTXIOCGETPAGE_OLD 0x7106 /* get contents of page-buffer */
-#define VTXIOCSTOPDAU_OLD 0x7107 /* stop data acquisition unit */
-#define VTXIOCPUTPAGE_OLD 0x7108 /* display page on TV-screen */
-#define VTXIOCSETDISP_OLD 0x7109 /* set TV-mode */
-#define VTXIOCPUTSTAT_OLD 0x710a /* set status of TV-output-buffer */
-#define VTXIOCCLRCACHE_OLD 0x710b /* clear cache on VTX-interface (if avail.) */
-#define VTXIOCSETVIRT_OLD 0x710c /* turn on virtual mode (this disables TV-display) */
-
-/*
- * Definitions for VTXIOCGETINFO
- */
-
-#define SAA5243 0
-#define SAA5246 1
-#define SAA5249 2
-#define SAA5248 3
-#define XSTV5346 4
-
-typedef struct {
- int version_major, version_minor; /* version of driver; if version_major changes, driver */
- /* is not backward compatible!!! CHECK THIS!!! */
- int numpages; /* number of page-buffers of vtx-chipset */
- int cct_type; /* type of vtx-chipset (SAA5243, SAA5246, SAA5248 or
- * SAA5249) */
-}
-vtx_info_t;
-
-
-/*
- * Definitions for VTXIOC{CLRPAGE,CLRFOUND,PAGEREQ,GETSTAT,GETPAGE,STOPDAU,PUTPAGE,SETDISP}
- */
-
-#define MIN_UNIT (1<<0)
-#define MIN_TEN (1<<1)
-#define HR_UNIT (1<<2)
-#define HR_TEN (1<<3)
-#define PG_UNIT (1<<4)
-#define PG_TEN (1<<5)
-#define PG_HUND (1<<6)
-#define PGMASK_MAX (1<<7)
-#define PGMASK_PAGE (PG_HUND | PG_TEN | PG_UNIT)
-#define PGMASK_HOUR (HR_TEN | HR_UNIT)
-#define PGMASK_MINUTE (MIN_TEN | MIN_UNIT)
-
-typedef struct
-{
- int page; /* number of requested page (hexadecimal) */
- int hour; /* requested hour (hexadecimal) */
- int minute; /* requested minute (hexadecimal) */
- int pagemask; /* mask defining which values of the above are set */
- int pgbuf; /* buffer where page will be stored */
- int start; /* start of requested part of page */
- int end; /* end of requested part of page */
- void __user *buffer; /* pointer to beginning of destination buffer */
-}
-vtx_pagereq_t;
-
-
-/*
- * Definitions for VTXIOC{GETSTAT,PUTSTAT}
- */
-
-#define VTX_PAGESIZE (40 * 24)
-#define VTX_VIRTUALSIZE (40 * 49)
-
-typedef struct
-{
- int pagenum; /* number of page (hexadecimal) */
- int hour; /* hour (hexadecimal) */
- int minute; /* minute (hexadecimal) */
- int charset; /* national charset */
- unsigned delete : 1; /* delete page (C4) */
- unsigned headline : 1; /* insert headline (C5) */
- unsigned subtitle : 1; /* insert subtitle (C6) */
- unsigned supp_header : 1; /* suppress header (C7) */
- unsigned update : 1; /* update page (C8) */
- unsigned inter_seq : 1; /* interrupted sequence (C9) */
- unsigned dis_disp : 1; /* disable/suppress display (C10) */
- unsigned serial : 1; /* serial mode (C11) */
- unsigned notfound : 1; /* /FOUND */
- unsigned pblf : 1; /* PBLF */
- unsigned hamming : 1; /* hamming-error occurred */
-}
-vtx_pageinfo_t;
-
-#endif /* _VTX_H */
diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h
index 1faa80d92f05..e68b439b2860 100644
--- a/include/linux/virtio_9p.h
+++ b/include/linux/virtio_9p.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
-#include <linux/types.h>
/* The feature bitmap for virtio 9P */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 63a4fe6d51bd..a03dcf62ca9d 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -53,8 +53,10 @@ static inline void vmalloc_init(void)
#endif
extern void *vmalloc(unsigned long size);
+extern void *vzalloc(unsigned long size);
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
+extern void *vzalloc_node(unsigned long size, int node);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 070bb7a88936..0c0771f06bfa 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -190,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
__INIT_WORK((_work), (_func), 0); \
} while (0)
-#define INIT_WORK_ON_STACK(_work, _func) \
+#define INIT_WORK_ONSTACK(_work, _func) \
do { \
__INIT_WORK((_work), (_func), 1); \
} while (0)
@@ -201,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
init_timer(&(_work)->timer); \
} while (0)
-#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
+#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
do { \
- INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
+ INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
init_timer_on_stack(&(_work)->timer); \
} while (0)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 72a5d647a5f2..09eec350054d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -10,8 +10,6 @@
struct backing_dev_info;
extern spinlock_t inode_lock;
-extern struct list_head inode_in_use;
-extern struct list_head inode_unused;
/*
* fs/fs-writeback.c
@@ -143,12 +141,16 @@ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc);
+void tag_pages_for_writeback(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
void set_page_dirty_balance(struct page *page, int page_mkwrite);
void writeback_set_ratelimit(void);
+void tag_pages_for_writeback(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
/* pdflush.c */
extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
diff --git a/include/media/ir-core.h b/include/media/ir-core.h
index eb7fddf8f607..6dc37fae6606 100644
--- a/include/media/ir-core.h
+++ b/include/media/ir-core.h
@@ -60,6 +60,7 @@ enum rc_driver_type {
* @s_idle: optional: enable/disable hardware idle mode, upon which,
device doesn't interrupt host until it sees IR pulses
* @s_learning_mode: enable wide band receiver used for learning
+ * @s_carrier_report: enable carrier reports
*/
struct ir_dev_props {
enum rc_driver_type driver_type;
@@ -82,8 +83,9 @@ struct ir_dev_props {
int (*s_tx_duty_cycle)(void *priv, u32 duty_cycle);
int (*s_rx_carrier_range)(void *priv, u32 min, u32 max);
int (*tx_ir)(void *priv, int *txbuf, u32 n);
- void (*s_idle)(void *priv, int enable);
+ void (*s_idle)(void *priv, bool enable);
int (*s_learning_mode)(void *priv, int enable);
+ int (*s_carrier_report) (void *priv, int enable);
};
struct ir_input_dev {
@@ -157,27 +159,54 @@ void ir_input_unregister(struct input_dev *input_dev);
void ir_repeat(struct input_dev *dev);
void ir_keydown(struct input_dev *dev, int scancode, u8 toggle);
+void ir_keyup(struct ir_input_dev *ir);
u32 ir_g_keycode_from_table(struct input_dev *input_dev, u32 scancode);
/* From ir-raw-event.c */
struct ir_raw_event {
- unsigned pulse:1;
- unsigned duration:31;
+ union {
+ u32 duration;
+
+ struct {
+ u32 carrier;
+ u8 duty_cycle;
+ };
+ };
+
+ unsigned pulse:1;
+ unsigned reset:1;
+ unsigned timeout:1;
+ unsigned carrier_report:1;
};
-#define IR_MAX_DURATION 0x7FFFFFFF /* a bit more than 2 seconds */
+#define DEFINE_IR_RAW_EVENT(event) \
+ struct ir_raw_event event = { \
+ { .duration = 0 } , \
+ .pulse = 0, \
+ .reset = 0, \
+ .timeout = 0, \
+ .carrier_report = 0 }
+
+static inline void init_ir_raw_event(struct ir_raw_event *ev)
+{
+ memset(ev, 0, sizeof(*ev));
+}
+
+#define IR_MAX_DURATION 0xFFFFFFFF /* a bit more than 4 seconds */
void ir_raw_event_handle(struct input_dev *input_dev);
int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev);
int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type type);
int ir_raw_event_store_with_filter(struct input_dev *input_dev,
struct ir_raw_event *ev);
-void ir_raw_event_set_idle(struct input_dev *input_dev, int idle);
+void ir_raw_event_set_idle(struct input_dev *input_dev, bool idle);
static inline void ir_raw_event_reset(struct input_dev *input_dev)
{
- struct ir_raw_event ev = { .pulse = false, .duration = 0 };
+ DEFINE_IR_RAW_EVENT(ev);
+ ev.reset = true;
+
ir_raw_event_store(input_dev, &ev);
ir_raw_event_handle(input_dev);
}
diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h
index 5e96d7a430be..557c676ab7dc 100644
--- a/include/media/ir-kbd-i2c.h
+++ b/include/media/ir-kbd-i2c.h
@@ -3,6 +3,8 @@
#include <media/ir-common.h>
+#define DEFAULT_POLLING_INTERVAL 100 /* ms */
+
struct IR_i2c;
struct IR_i2c {
@@ -15,6 +17,8 @@ struct IR_i2c {
/* Used to avoid fast repeating */
unsigned char old;
+ u32 polling_interval; /* in ms */
+
struct delayed_work work;
char name[32];
char phys[32];
@@ -24,7 +28,6 @@ struct IR_i2c {
enum ir_kbd_get_key_fn {
IR_KBD_GET_KEY_CUSTOM = 0,
IR_KBD_GET_KEY_PIXELVIEW,
- IR_KBD_GET_KEY_PV951,
IR_KBD_GET_KEY_HAUP,
IR_KBD_GET_KEY_KNC1,
IR_KBD_GET_KEY_FUSIONHDTV,
@@ -35,8 +38,9 @@ enum ir_kbd_get_key_fn {
/* Can be passed when instantiating an ir_video i2c device */
struct IR_i2c_init_data {
char *ir_codes;
- const char *name;
- u64 type; /* IR_TYPE_RC5, etc */
+ const char *name;
+ u64 type; /* IR_TYPE_RC5, etc */
+ u32 polling_interval; /* 0 means DEFAULT_POLLING_INTERVAL */
/*
* Specify either a function pointer or a value indicating one of
* ir_kbd_i2c's internal get_key functions
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index b1f60663cb39..54780a560d0e 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -125,10 +125,10 @@ static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
struct lirc_driver {
char name[40];
int minor;
- unsigned long code_length;
+ __u32 code_length;
unsigned int buffer_size; /* in chunks holding one code each */
int sample_rate;
- unsigned long features;
+ __u32 features;
unsigned int chunk_size;
@@ -139,7 +139,7 @@ struct lirc_driver {
struct lirc_buffer *rbuf;
int (*set_use_inc) (void *data);
void (*set_use_dec) (void *data);
- struct file_operations *fops;
+ const struct file_operations *fops;
struct device *dev;
struct module *owner;
};
diff --git a/include/media/omap1_camera.h b/include/media/omap1_camera.h
new file mode 100644
index 000000000000..819767cf04d4
--- /dev/null
+++ b/include/media/omap1_camera.h
@@ -0,0 +1,35 @@
+/*
+ * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface
+ *
+ * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MEDIA_OMAP1_CAMERA_H_
+#define __MEDIA_OMAP1_CAMERA_H_
+
+#include <linux/bitops.h>
+
+#define OMAP1_CAMERA_IOSIZE 0x1c
+
+enum omap1_cam_vb_mode {
+ OMAP1_CAM_DMA_CONTIG = 0,
+ OMAP1_CAM_DMA_SG,
+};
+
+#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2)
+
+struct omap1_cam_platform_data {
+ unsigned long camexclk_khz;
+ unsigned long lclk_khz_max;
+ unsigned long flags;
+};
+
+#define OMAP1_CAMERA_LCLK_RISING BIT(0)
+#define OMAP1_CAMERA_RST_LOW BIT(1)
+#define OMAP1_CAMERA_RST_HIGH BIT(2)
+
+#endif /* __MEDIA_OMAP1_CAMERA_H_ */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 9b201ec8dc7f..e0f17edf38ed 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -17,12 +17,13 @@
#define IR_TYPE_RC6 (1 << 2) /* Philips RC6 protocol */
#define IR_TYPE_JVC (1 << 3) /* JVC protocol */
#define IR_TYPE_SONY (1 << 4) /* Sony12/15/20 protocol */
+#define IR_TYPE_RC5_SZ (1 << 5) /* RC5 variant used by Streamzap */
#define IR_TYPE_LIRC (1 << 30) /* Pass raw IR to lirc userspace */
#define IR_TYPE_OTHER (1u << 31)
#define IR_TYPE_ALL (IR_TYPE_RC5 | IR_TYPE_NEC | IR_TYPE_RC6 | \
IR_TYPE_JVC | IR_TYPE_SONY | IR_TYPE_LIRC | \
- IR_TYPE_OTHER)
+ IR_TYPE_RC5_SZ | IR_TYPE_OTHER)
struct ir_scancode {
u32 scancode;
@@ -54,6 +55,8 @@ void rc_map_init(void);
/* Names of the several keytables defined in-kernel */
#define RC_MAP_ADSTECH_DVB_T_PCI "rc-adstech-dvb-t-pci"
+#define RC_MAP_ALINK_DTU_M "rc-alink-dtu-m"
+#define RC_MAP_ANYSEE "rc-anysee"
#define RC_MAP_APAC_VIEWCOMP "rc-apac-viewcomp"
#define RC_MAP_ASUS_PC39 "rc-asus-pc39"
#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600"
@@ -62,8 +65,10 @@ void rc_map_init(void);
#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt"
#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a"
#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6"
+#define RC_MAP_AVERMEDIA_RM_KS "rc-avermedia-rm-ks"
#define RC_MAP_AVERMEDIA "rc-avermedia"
#define RC_MAP_AVERTV_303 "rc-avertv-303"
+#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700"
#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
#define RC_MAP_BEHOLD "rc-behold"
#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old"
@@ -71,6 +76,8 @@ void rc_map_init(void);
#define RC_MAP_CINERGY "rc-cinergy"
#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec"
#define RC_MAP_DIB0700_RC5_TABLE "rc-dib0700-rc5"
+#define RC_MAP_DIGITALNOW_TINYTWIN "rc-digitalnow-tinytwin"
+#define RC_MAP_DIGITTRADE "rc-digittrade"
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
@@ -94,8 +101,12 @@ void rc_map_init(void);
#define RC_MAP_KAIOMY "rc-kaiomy"
#define RC_MAP_KWORLD_315U "rc-kworld-315u"
#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog"
+#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051"
#define RC_MAP_LIRC "rc-lirc"
+#define RC_MAP_LME2510 "rc-lme2510"
#define RC_MAP_MANLI "rc-manli"
+#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii"
+#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
#define RC_MAP_NEBULA "rc-nebula"
@@ -114,14 +125,18 @@ void rc_map_init(void);
#define RC_MAP_PURPLETV "rc-purpletv"
#define RC_MAP_PV951 "rc-pv951"
#define RC_MAP_RC5_HAUPPAUGE_NEW "rc-rc5-hauppauge-new"
-#define RC_MAP_RC5_STREAMZAP "rc-rc5-streamzap"
#define RC_MAP_RC5_TV "rc-rc5-tv"
#define RC_MAP_RC6_MCE "rc-rc6-mce"
#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys"
+#define RC_MAP_STREAMZAP "rc-streamzap"
#define RC_MAP_TBS_NEC "rc-tbs-nec"
#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs"
+#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim"
#define RC_MAP_TEVII_NEC "rc-tevii-nec"
+#define RC_MAP_TOTAL_MEDIA_IN_HAND "rc-total-media-in-hand"
+#define RC_MAP_TREKSTOR "rc-trekstor"
#define RC_MAP_TT_1500 "rc-tt-1500"
+#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027"
#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr"
#define RC_MAP_WINFAST "rc-winfast"
diff --git a/include/media/s3c_fimc.h b/include/media/s3c_fimc.h
new file mode 100644
index 000000000000..ca1b6738e4a4
--- /dev/null
+++ b/include/media/s3c_fimc.h
@@ -0,0 +1,60 @@
+/*
+ * Samsung S5P SoC camera interface driver header
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd
+ * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S3C_FIMC_H_
+#define S3C_FIMC_H_
+
+enum cam_bus_type {
+ FIMC_ITU_601 = 1,
+ FIMC_ITU_656,
+ FIMC_MIPI_CSI2,
+ FIMC_LCD_WB, /* FIFO link from LCD mixer */
+};
+
+#define FIMC_CLK_INV_PCLK (1 << 0)
+#define FIMC_CLK_INV_VSYNC (1 << 1)
+#define FIMC_CLK_INV_HREF (1 << 2)
+#define FIMC_CLK_INV_HSYNC (1 << 3)
+
+struct i2c_board_info;
+
+/**
+ * struct s3c_fimc_isp_info - image sensor information required for host
+ * interace configuration.
+ *
+ * @board_info: pointer to I2C subdevice's board info
+ * @bus_type: determines bus type, MIPI, ITU-R BT.601 etc.
+ * @i2c_bus_num: i2c control bus id the sensor is attached to
+ * @mux_id: FIMC camera interface multiplexer index (separate for MIPI and ITU)
+ * @bus_width: camera data bus width in bits
+ * @flags: flags defining bus signals polarity inversion (High by default)
+ */
+struct s3c_fimc_isp_info {
+ struct i2c_board_info *board_info;
+ enum cam_bus_type bus_type;
+ u16 i2c_bus_num;
+ u16 mux_id;
+ u16 bus_width;
+ u16 flags;
+};
+
+
+#define FIMC_MAX_CAMIF_CLIENTS 2
+
+/**
+ * struct s3c_platform_fimc - camera host interface platform data
+ *
+ * @isp_info: properties of camera sensor required for host interface setup
+ */
+struct s3c_platform_fimc {
+ struct s3c_fimc_isp_info *isp_info[FIMC_MAX_CAMIF_CLIENTS];
+};
+#endif /* S3C_FIMC_H_ */
diff --git a/include/media/sh_vou.h b/include/media/sh_vou.h
index a3ef30242b00..ec3ba9a597a2 100644
--- a/include/media/sh_vou.h
+++ b/include/media/sh_vou.h
@@ -28,7 +28,6 @@ struct sh_vou_pdata {
int i2c_adap;
struct i2c_board_info *board_info;
unsigned long flags;
- char *module_name;
};
#endif
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 2ce957301f77..86e3631764ef 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -21,6 +21,8 @@
extern struct bus_type soc_camera_bus_type;
+struct file;
+
struct soc_camera_device {
struct list_head list;
struct device dev;
@@ -41,10 +43,7 @@ struct soc_camera_device {
/* soc_camera.c private count. Only accessed with .video_lock held */
int use_count;
struct mutex video_lock; /* Protects device data */
-};
-
-struct soc_camera_file {
- struct soc_camera_device *icd;
+ struct file *streamer; /* stream owner */
struct videobuf_queue vb_vidq;
};
@@ -79,7 +78,7 @@ struct soc_camera_host_ops {
int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
void (*init_videobuf)(struct videobuf_queue *,
struct soc_camera_device *);
- int (*reqbufs)(struct soc_camera_file *, struct v4l2_requestbuffers *);
+ int (*reqbufs)(struct soc_camera_device *, struct v4l2_requestbuffers *);
int (*querycap)(struct soc_camera_host *, struct v4l2_capability *);
int (*set_bus_param)(struct soc_camera_device *, __u32);
int (*get_ctrl)(struct soc_camera_device *, struct v4l2_control *);
diff --git a/include/media/sr030pc30.h b/include/media/sr030pc30.h
new file mode 100644
index 000000000000..6f901a653ba2
--- /dev/null
+++ b/include/media/sr030pc30.h
@@ -0,0 +1,21 @@
+/*
+ * Driver header for SR030PC30 camera sensor
+ *
+ * Copyright (c) 2010 Samsung Electronics, Co. Ltd
+ * Contact: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef SR030PC30_H
+#define SR030PC30_H
+
+struct sr030pc30_platform_data {
+ unsigned long clk_rate; /* master clock frequency in Hz */
+ int (*set_power)(struct device *dev, int on);
+};
+
+#endif /* SR030PC30_H */
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 21b4428c12ab..51e89f2267b8 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -38,6 +38,9 @@ enum {
/* module tvaudio: reserved range 50-99 */
V4L2_IDENT_TVAUDIO = 50, /* A tvaudio chip, unknown which it is exactly */
+ /* Sony IMX074 */
+ V4L2_IDENT_IMX074 = 74,
+
/* module saa7110: just ident 100 */
V4L2_IDENT_SAA7110 = 100,
@@ -70,6 +73,7 @@ enum {
V4L2_IDENT_OV9655 = 255,
V4L2_IDENT_SOI968 = 256,
V4L2_IDENT_OV9640 = 257,
+ V4L2_IDENT_OV6650 = 258,
/* module saa7146: reserved range 300-309 */
V4L2_IDENT_SAA7146 = 300,
@@ -111,6 +115,10 @@ enum {
V4L2_IDENT_VPX3216B = 3216,
V4L2_IDENT_VPX3220A = 3220,
+ /* VX855 just ident 3409 */
+ /* Other via devs could use 3314, 3324, 3327, 3336, 3364, 3353 */
+ V4L2_IDENT_VIA_VX855 = 3409,
+
/* module tvp5150 */
V4L2_IDENT_TVP5150 = 5150,
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 98b32645e5a7..41dd480e45f1 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -232,4 +232,14 @@ void v4l_bound_align_image(unsigned int *w, unsigned int wmin,
unsigned int hmax, unsigned int halign,
unsigned int salign);
int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info);
+
+struct v4l2_discrete_probe {
+ const struct v4l2_frmsize_discrete *sizes;
+ int num_sizes;
+};
+
+const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
+ const struct v4l2_discrete_probe *probe,
+ s32 width, s32 height);
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 1efcacbed01a..15802a067a12 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -21,8 +21,7 @@
#define VFL_TYPE_GRABBER 0
#define VFL_TYPE_VBI 1
#define VFL_TYPE_RADIO 2
-#define VFL_TYPE_VTX 3
-#define VFL_TYPE_MAX 4
+#define VFL_TYPE_MAX 3
struct v4l2_ioctl_callbacks;
struct video_device;
@@ -42,8 +41,6 @@ struct v4l2_file_operations {
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
- unsigned long (*get_unmapped_area) (struct file *, unsigned long,
- unsigned long, unsigned long, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*open) (struct file *);
int (*release) (struct file *);
@@ -97,6 +94,9 @@ struct video_device
/* ioctl callbacks */
const struct v4l2_ioctl_ops *ioctl_ops;
+
+ /* serialization lock */
+ struct mutex *lock;
};
/* dev to video-device */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 8bcbd7a0271c..6648036b728d 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -101,46 +101,67 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
/* Call the specified callback for all subdevs matching the condition.
Ignore any errors. Note that you cannot add or delete a subdev
while walking the subdevs list. */
-#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \
+#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \
do { \
- struct v4l2_subdev *sd; \
+ list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \
+ if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \
+ (sd)->ops->o->f((sd) , ##args); \
+ } while (0)
+
+#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \
+ do { \
+ struct v4l2_subdev *__sd; \
\
- list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) \
- if ((cond) && sd->ops->o && sd->ops->o->f) \
- sd->ops->o->f(sd , ##args); \
+ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, cond, o, \
+ f , ##args); \
} while (0)
/* Call the specified callback for all subdevs matching the condition.
If the callback returns an error other than 0 or -ENOIOCTLCMD, then
return with that error code. Note that you cannot add or delete a
subdev while walking the subdevs list. */
-#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \
+#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \
({ \
- struct v4l2_subdev *sd; \
- long err = 0; \
+ long __err = 0; \
\
- list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) { \
- if ((cond) && sd->ops->o && sd->ops->o->f) \
- err = sd->ops->o->f(sd , ##args); \
- if (err && err != -ENOIOCTLCMD) \
+ list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) { \
+ if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \
+ __err = (sd)->ops->o->f((sd) , ##args); \
+ if (__err && __err != -ENOIOCTLCMD) \
break; \
} \
- (err == -ENOIOCTLCMD) ? 0 : err; \
+ (__err == -ENOIOCTLCMD) ? 0 : __err; \
+})
+
+#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \
+ f, args...); \
})
/* Call the specified callback for all subdevs matching grp_id (if 0, then
match them all). Ignore any errors. Note that you cannot add or delete
a subdev while walking the subdevs list. */
-#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \
- __v4l2_device_call_subdevs(v4l2_dev, \
- !(grpid) || sd->grp_id == (grpid), o, f , ##args)
+#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \
+ do { \
+ struct v4l2_subdev *__sd; \
+ \
+ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
+ !(grpid) || __sd->grp_id == (grpid), o, f , \
+ ##args); \
+ } while (0)
/* Call the specified callback for all subdevs matching grp_id (if 0, then
match them all). If the callback returns an error other than 0 or
-ENOIOCTLCMD, then return with that error code. Note that you cannot
add or delete a subdev while walking the subdevs list. */
#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \
- __v4l2_device_call_subdevs_until_err(v4l2_dev, \
- !(grpid) || sd->grp_id == (grpid), o, f , ##args)
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
+ !(grpid) || __sd->grp_id == (grpid), o, f , \
+ ##args); \
+})
#endif
diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h
deleted file mode 100644
index 74bf741d1a9b..000000000000
--- a/include/media/v4l2-i2c-drv.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * v4l2-i2c-drv.h - contains I2C handling code that's identical for
- * all V4L2 I2C drivers. Use this header if the
- * I2C driver is only used by drivers converted
- * to the bus-based I2C API.
- *
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/* NOTE: the full version of this header is in the v4l-dvb repository
- * and allows v4l i2c drivers to be compiled on pre-2.6.26 kernels.
- * The version of this header as it appears in the kernel is a stripped
- * version (without all the backwards compatibility stuff) and so it
- * looks a bit odd.
- *
- * If you look at the full version then you will understand the reason
- * for introducing this header since you really don't want to have all
- * the tricky backwards compatibility code in each and every i2c driver.
- *
- * If the i2c driver will never be compiled for pre-2.6.26 kernels, then
- * DO NOT USE this header! Just write it as a regular i2c driver.
- */
-
-#ifndef __V4L2_I2C_DRV_H__
-#define __V4L2_I2C_DRV_H__
-
-#include <media/v4l2-common.h>
-
-struct v4l2_i2c_driver_data {
- const char * const name;
- int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
- int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
- int (*remove)(struct i2c_client *client);
- int (*suspend)(struct i2c_client *client, pm_message_t state);
- int (*resume)(struct i2c_client *client);
- const struct i2c_device_id *id_table;
-};
-
-static struct v4l2_i2c_driver_data v4l2_i2c_data;
-static struct i2c_driver v4l2_i2c_driver;
-
-
-/* Bus-based I2C implementation for kernels >= 2.6.26 */
-
-static int __init v4l2_i2c_drv_init(void)
-{
- v4l2_i2c_driver.driver.name = v4l2_i2c_data.name;
- v4l2_i2c_driver.command = v4l2_i2c_data.command;
- v4l2_i2c_driver.probe = v4l2_i2c_data.probe;
- v4l2_i2c_driver.remove = v4l2_i2c_data.remove;
- v4l2_i2c_driver.suspend = v4l2_i2c_data.suspend;
- v4l2_i2c_driver.resume = v4l2_i2c_data.resume;
- v4l2_i2c_driver.id_table = v4l2_i2c_data.id_table;
- return i2c_add_driver(&v4l2_i2c_driver);
-}
-
-
-static void __exit v4l2_i2c_drv_cleanup(void)
-{
- i2c_del_driver(&v4l2_i2c_driver);
-}
-
-module_init(v4l2_i2c_drv_init);
-module_exit(v4l2_i2c_drv_cleanup);
-
-#endif /* __V4L2_I2C_DRV_H__ */
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index f0cf2e7def06..8e6559838ae3 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -28,10 +28,18 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FMT_YVYU8_2X8,
V4L2_MBUS_FMT_UYVY8_2X8,
V4L2_MBUS_FMT_VYUY8_2X8,
+ V4L2_MBUS_FMT_YVYU10_2X10,
+ V4L2_MBUS_FMT_YUYV10_2X10,
+ V4L2_MBUS_FMT_YVYU10_1X20,
+ V4L2_MBUS_FMT_YUYV10_1X20,
+ V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE,
V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
V4L2_MBUS_FMT_RGB565_2X8_LE,
V4L2_MBUS_FMT_RGB565_2X8_BE,
+ V4L2_MBUS_FMT_BGR565_2X8_LE,
+ V4L2_MBUS_FMT_BGR565_2X8_BE,
V4L2_MBUS_FMT_SBGGR8_1X8,
V4L2_MBUS_FMT_SBGGR10_1X10,
V4L2_MBUS_FMT_GREY8_1X8,
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 4a97d7341a94..b0316a7cf08d 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -256,10 +256,6 @@ struct v4l2_subdev_video_ops {
int (*querystd)(struct v4l2_subdev *sd, v4l2_std_id *std);
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
- int (*enum_fmt)(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmtdesc);
- int (*g_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
- int (*try_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
- int (*s_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
int (*cropcap)(struct v4l2_subdev *sd, struct v4l2_cropcap *cc);
int (*g_crop)(struct v4l2_subdev *sd, struct v4l2_crop *crop);
int (*s_crop)(struct v4l2_subdev *sd, struct v4l2_crop *crop);
@@ -442,17 +438,28 @@ struct v4l2_subdev {
/* can be used to group similar subdevs, value is driver-specific */
u32 grp_id;
/* pointer to private data */
- void *priv;
+ void *dev_priv;
+ void *host_priv;
};
static inline void v4l2_set_subdevdata(struct v4l2_subdev *sd, void *p)
{
- sd->priv = p;
+ sd->dev_priv = p;
}
static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd)
{
- return sd->priv;
+ return sd->dev_priv;
+}
+
+static inline void v4l2_set_subdev_hostdata(struct v4l2_subdev *sd, void *p)
+{
+ sd->host_priv = p;
+}
+
+static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd)
+{
+ return sd->host_priv;
}
static inline void v4l2_subdev_init(struct v4l2_subdev *sd,
@@ -466,7 +473,8 @@ static inline void v4l2_subdev_init(struct v4l2_subdev *sd,
sd->flags = 0;
sd->name[0] = '\0';
sd->grp_id = 0;
- sd->priv = NULL;
+ sd->dev_priv = NULL;
+ sd->host_priv = NULL;
}
/* Call an ops of a v4l2_subdev, doing the right checks against
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index f2c41cebf453..1d3835fc26be 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -139,6 +139,7 @@ struct videobuf_qtype_ops {
struct videobuf_queue {
struct mutex vb_lock;
+ struct mutex *ext_lock;
spinlock_t *irqlock;
struct device *dev;
@@ -167,7 +168,20 @@ struct videobuf_queue {
void *priv_data;
};
-int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr);
+static inline void videobuf_queue_lock(struct videobuf_queue *q)
+{
+ if (!q->ext_lock)
+ mutex_lock(&q->vb_lock);
+}
+
+static inline void videobuf_queue_unlock(struct videobuf_queue *q)
+{
+ if (!q->ext_lock)
+ mutex_unlock(&q->vb_lock);
+}
+
+int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ int non_blocking, int intr);
int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf);
@@ -185,7 +199,8 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
enum v4l2_field field,
unsigned int msize,
void *priv,
- struct videobuf_qtype_ops *int_ops);
+ struct videobuf_qtype_ops *int_ops,
+ struct mutex *ext_lock);
int videobuf_queue_is_busy(struct videobuf_queue *q);
void videobuf_queue_cancel(struct videobuf_queue *q);
diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h
index ebaa9bc1ee8d..f0ed82543d9f 100644
--- a/include/media/videobuf-dma-contig.h
+++ b/include/media/videobuf-dma-contig.h
@@ -23,7 +23,8 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv);
+ void *priv,
+ struct mutex *ext_lock);
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
void videobuf_dma_contig_free(struct videobuf_queue *q,
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index aa4ebb42a565..1c647e8148c4 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -103,7 +103,8 @@ void videobuf_queue_sg_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv);
+ void *priv,
+ struct mutex *ext_lock);
#endif /* _VIDEOBUF_DMA_SG_H */
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
index e19403c18dae..486a97efdb56 100644
--- a/include/media/videobuf-vmalloc.h
+++ b/include/media/videobuf-vmalloc.h
@@ -36,7 +36,8 @@ void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
- void *priv);
+ void *priv,
+ struct mutex *ext_lock);
void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
diff --git a/include/media/wm8775.h b/include/media/wm8775.h
index 60739c5a23ae..a1c4d417dfa2 100644
--- a/include/media/wm8775.h
+++ b/include/media/wm8775.h
@@ -32,4 +32,7 @@
#define WM8775_AIN3 4
#define WM8775_AIN4 8
+/* subdev group ID */
+#define WM8775_GID (1 << 0)
+
#endif
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index a8de812ccbc8..071fd7a8d781 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -86,6 +86,8 @@ do { \
/**
* enum p9_msg_t - 9P message types
+ * @P9_TLERROR: not used
+ * @P9_RLERROR: response for any failed request for 9P2000.L
* @P9_TSTATFS: file system status request
* @P9_RSTATFS: file system status response
* @P9_TSYMLINK: make symlink request
@@ -137,6 +139,8 @@ do { \
*/
enum p9_msg_t {
+ P9_TLERROR = 6,
+ P9_RLERROR,
P9_TSTATFS = 8,
P9_RSTATFS,
P9_TLOPEN = 12,
@@ -149,6 +153,8 @@ enum p9_msg_t {
P9_RMKNOD,
P9_TRENAME = 20,
P9_RRENAME,
+ P9_TREADLINK = 22,
+ P9_RREADLINK,
P9_TGETATTR = 24,
P9_RGETATTR,
P9_TSETATTR = 26,
@@ -159,6 +165,12 @@ enum p9_msg_t {
P9_RXATTRCREATE,
P9_TREADDIR = 40,
P9_RREADDIR,
+ P9_TFSYNC = 50,
+ P9_RFSYNC,
+ P9_TLOCK = 52,
+ P9_RLOCK,
+ P9_TGETLOCK = 54,
+ P9_RGETLOCK,
P9_TLINK = 70,
P9_RLINK,
P9_TMKDIR = 72,
@@ -458,6 +470,48 @@ struct p9_iattr_dotl {
u64 mtime_nsec;
};
+#define P9_LOCK_SUCCESS 0
+#define P9_LOCK_BLOCKED 1
+#define P9_LOCK_ERROR 2
+#define P9_LOCK_GRACE 3
+
+#define P9_LOCK_FLAGS_BLOCK 1
+#define P9_LOCK_FLAGS_RECLAIM 2
+
+/* struct p9_flock: POSIX lock structure
+ * @type - type of lock
+ * @flags - lock flags
+ * @start - starting offset of the lock
+ * @length - number of bytes
+ * @proc_id - process id which wants to take lock
+ * @client_id - client id
+ */
+
+struct p9_flock {
+ u8 type;
+ u32 flags;
+ u64 start;
+ u64 length;
+ u32 proc_id;
+ char *client_id;
+};
+
+/* struct p9_getlock: getlock structure
+ * @type - type of lock
+ * @start - starting offset of the lock
+ * @length - number of bytes
+ * @proc_id - process id which wants to take lock
+ * @client_id - client id
+ */
+
+struct p9_getlock {
+ u8 type;
+ u64 start;
+ u64 length;
+ u32 proc_id;
+ char *client_id;
+};
+
/* Structures for Protocol Operations */
struct p9_tstatfs {
u32 fid;
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 7f63d5ab7b44..83ba6a4d58a3 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -229,6 +229,7 @@ int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, gid_t gid,
int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
gid_t gid, struct p9_qid *qid);
int p9_client_clunk(struct p9_fid *fid);
+int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
u64 offset, u32 count);
@@ -248,6 +249,8 @@ int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode,
dev_t rdev, gid_t gid, struct p9_qid *);
int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
gid_t gid, struct p9_qid *);
+int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
+int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
@@ -259,5 +262,6 @@ int p9_is_proto_dotu(struct p9_client *clnt);
int p9_is_proto_dotl(struct p9_client *clnt);
struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *);
int p9_client_xattrcreate(struct p9_fid *, const char *, u64, int);
+int p9_client_readlink(struct p9_fid *fid, char **target);
#endif /* NET_9P_CLIENT_H */
diff --git a/include/net/caif/caif_shm.h b/include/net/caif/caif_shm.h
new file mode 100644
index 000000000000..5bcce55438cf
--- /dev/null
+++ b/include/net/caif/caif_shm.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_SHM_H_
+#define CAIF_SHM_H_
+
+struct shmdev_layer {
+ u32 shm_base_addr;
+ u32 shm_total_sz;
+ u32 shm_id;
+ u32 shm_loopback;
+ void *hmbx;
+ int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
+ int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
+ struct shmdev_layer *pshm_dev, void *pshm_drv);
+ struct net_device *pshm_netdev;
+};
+
+extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
+extern void caif_shmcore_remove(struct net_device *pshm_netdev);
+
+#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index a217c838ec0d..ffe9cb719c0e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -95,7 +95,7 @@ struct dst_entry {
unsigned long lastuse;
union {
struct dst_entry *next;
- struct rtable *rt_next;
+ struct rtable __rcu *rt_next;
struct rt6_info *rt6_next;
struct dn_route *dn_next;
};
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 106f3097d384..075f1e3a0fed 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -20,7 +20,7 @@ struct fib_rule {
u32 table;
u8 action;
u32 target;
- struct fib_rule * ctarget;
+ struct fib_rule __rcu *ctarget;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
struct rcu_head rcu;
diff --git a/include/net/garp.h b/include/net/garp.h
index 825f172caba9..f4c295984c45 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -107,7 +107,7 @@ struct garp_applicant {
};
struct garp_port {
- struct garp_applicant *applicants[GARP_APPLICATION_MAX + 1];
+ struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1];
};
extern int garp_register_application(struct garp_application *app);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 417d0c894f29..fe239bfe5f7f 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,7 +15,7 @@
struct inet_peer {
/* group together avl_left,avl_right,v4daddr to speedup lookups */
- struct inet_peer *avl_left, *avl_right;
+ struct inet_peer __rcu *avl_left, *avl_right;
__be32 v4daddr; /* peer's address */
__u32 avl_height;
struct list_head unused;
diff --git a/include/net/ip.h b/include/net/ip.h
index dbee3fe260e1..86e2b182a0c0 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -59,7 +59,7 @@ struct ipcm_cookie {
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
struct ip_ra_chain {
- struct ip_ra_chain *next;
+ struct ip_ra_chain __rcu *next;
struct sock *sk;
union {
void (*destructor)(struct sock *);
@@ -68,7 +68,7 @@ struct ip_ra_chain {
struct rcu_head rcu;
};
-extern struct ip_ra_chain *ip_ra_chain;
+extern struct ip_ra_chain __rcu *ip_ra_chain;
/* IP flags. */
#define IP_CE 0x8000 /* Flag: "Congestion" */
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fc94ec568a50..fc73e667b50e 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -13,7 +13,7 @@
/* IPv6 tunnel */
struct ip6_tnl {
- struct ip6_tnl *next; /* next tunnel in list */
+ struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
struct ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 58abbf966b0c..a32654d52730 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -16,7 +16,7 @@ struct ip_tunnel_6rd_parm {
};
struct ip_tunnel {
- struct ip_tunnel *next;
+ struct ip_tunnel __rcu *next;
struct net_device *dev;
int err_count; /* Number of arrived ICMP errors */
@@ -34,12 +34,12 @@ struct ip_tunnel {
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
#endif
- struct ip_tunnel_prl_entry *prl; /* potential router list */
+ struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
unsigned int prl_count; /* # of entries in PRL */
};
struct ip_tunnel_prl_entry {
- struct ip_tunnel_prl_entry *next;
+ struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
u16 flags;
struct rcu_head rcu_head;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 65af9a07cf76..1bf812b21fb7 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -88,7 +88,7 @@ struct net {
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;
#endif
- struct net_generic *gen;
+ struct net_generic __rcu *gen;
/* Note : following structs are cache line aligned */
#ifdef CONFIG_XFRM
diff --git a/include/net/protocol.h b/include/net/protocol.h
index f1effdd3c265..dc07495bce4c 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -89,10 +89,10 @@ struct inet_protosw {
#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
-extern const struct net_protocol *inet_protos[MAX_INET_PROTOS];
+extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-extern const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
+extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
#endif
extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
diff --git a/include/net/sock.h b/include/net/sock.h
index 73a4f9702a65..c7a736228ca2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -301,7 +301,7 @@ struct sock {
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
- struct sk_filter *sk_filter;
+ struct sk_filter __rcu *sk_filter;
void *sk_protinfo;
struct timer_list sk_timer;
ktime_t sk_stamp;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f28d7c9b9f8d..bcfb6b24b019 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1264,7 +1264,7 @@ struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
int (*err_handler)(struct sk_buff *skb, u32 info);
- struct xfrm_tunnel *next;
+ struct xfrm_tunnel __rcu *next;
int priority;
};
@@ -1272,7 +1272,7 @@ struct xfrm6_tunnel {
int (*handler)(struct sk_buff *skb);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info);
- struct xfrm6_tunnel *next;
+ struct xfrm6_tunnel __rcu *next;
int priority;
};
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index fa0d52b8e622..b5fc9f39122b 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -39,7 +39,9 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
+#include <linux/if_vlan.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
struct rdma_addr_client {
atomic_t refcount;
@@ -63,6 +65,7 @@ struct rdma_dev_addr {
unsigned char broadcast[MAX_ADDR_LEN];
unsigned short dev_type;
int bound_dev_if;
+ enum rdma_transport_type transport;
};
/**
@@ -127,9 +130,51 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0;
}
+static inline void iboe_mac_vlan_to_ll(union ib_gid *gid, u8 *mac, u16 vid)
+{
+ memset(gid->raw, 0, 16);
+ *((__be32 *) gid->raw) = cpu_to_be32(0xfe800000);
+ if (vid < 0x1000) {
+ gid->raw[12] = vid & 0xff;
+ gid->raw[11] = vid >> 8;
+ } else {
+ gid->raw[12] = 0xfe;
+ gid->raw[11] = 0xff;
+ }
+ memcpy(gid->raw + 13, mac + 3, 3);
+ memcpy(gid->raw + 8, mac, 3);
+ gid->raw[8] ^= 2;
+}
+
+static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN ?
+ vlan_dev_vlan_id(dev) : 0xffff;
+}
+
+static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
+ union ib_gid *gid)
+{
+ struct net_device *dev;
+ u16 vid = 0xffff;
+
+ dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ if (dev) {
+ vid = rdma_vlan_dev_vlan_id(dev);
+ dev_put(dev);
+ }
+
+ iboe_mac_vlan_to_ll(gid, dev_addr->src_dev_addr, vid);
+}
+
static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid);
+ if (dev_addr->transport == RDMA_TRANSPORT_IB &&
+ dev_addr->dev_type != ARPHRD_INFINIBAND)
+ iboe_addr_get_sgid(dev_addr, gid);
+ else
+ memcpy(gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof *gid);
}
static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
@@ -147,4 +192,91 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g
memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid);
}
+static inline enum ib_mtu iboe_get_mtu(int mtu)
+{
+ /*
+ * reduce IB headers from effective IBoE MTU. 28 stands for
+ * atomic header which is the biggest possible header after BTH
+ */
+ mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28;
+
+ if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096))
+ return IB_MTU_4096;
+ else if (mtu >= ib_mtu_enum_to_int(IB_MTU_2048))
+ return IB_MTU_2048;
+ else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024))
+ return IB_MTU_1024;
+ else if (mtu >= ib_mtu_enum_to_int(IB_MTU_512))
+ return IB_MTU_512;
+ else if (mtu >= ib_mtu_enum_to_int(IB_MTU_256))
+ return IB_MTU_256;
+ else
+ return 0;
+}
+
+static inline int iboe_get_rate(struct net_device *dev)
+{
+ struct ethtool_cmd cmd;
+
+ if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings ||
+ dev->ethtool_ops->get_settings(dev, &cmd))
+ return IB_RATE_PORT_CURRENT;
+
+ if (cmd.speed >= 40000)
+ return IB_RATE_40_GBPS;
+ else if (cmd.speed >= 30000)
+ return IB_RATE_30_GBPS;
+ else if (cmd.speed >= 20000)
+ return IB_RATE_20_GBPS;
+ else if (cmd.speed >= 10000)
+ return IB_RATE_10_GBPS;
+ else
+ return IB_RATE_PORT_CURRENT;
+}
+
+static inline int rdma_link_local_addr(struct in6_addr *addr)
+{
+ if (addr->s6_addr32[0] == htonl(0xfe800000) &&
+ addr->s6_addr32[1] == 0)
+ return 1;
+
+ return 0;
+}
+
+static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac)
+{
+ memcpy(mac, &addr->s6_addr[8], 3);
+ memcpy(mac + 3, &addr->s6_addr[13], 3);
+ mac[0] ^= 2;
+}
+
+static inline int rdma_is_multicast_addr(struct in6_addr *addr)
+{
+ return addr->s6_addr[0] == 0xff;
+}
+
+static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac)
+{
+ int i;
+
+ mac[0] = 0x33;
+ mac[1] = 0x33;
+ for (i = 2; i < 6; ++i)
+ mac[i] = addr->s6_addr[i + 10];
+}
+
+static inline u16 rdma_get_vlan_id(union ib_gid *dgid)
+{
+ u16 vid;
+
+ vid = dgid->raw[11] << 8 | dgid->raw[12];
+ return vid < 0x1000 ? vid : 0xffff;
+}
+
+static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN ?
+ vlan_dev_real_dev(dev) : 0;
+}
+
#endif /* IB_ADDR_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index cbb50f4da3dd..b37fe3b10a9d 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -37,6 +37,8 @@
enum {
IB_LRH_BYTES = 8,
+ IB_ETH_BYTES = 14,
+ IB_VLAN_BYTES = 4,
IB_GRH_BYTES = 40,
IB_BTH_BYTES = 12,
IB_DETH_BYTES = 8
@@ -210,14 +212,32 @@ struct ib_unpacked_deth {
__be32 source_qpn;
};
+struct ib_unpacked_eth {
+ u8 dmac_h[4];
+ u8 dmac_l[2];
+ u8 smac_h[2];
+ u8 smac_l[4];
+ __be16 type;
+};
+
+struct ib_unpacked_vlan {
+ __be16 tag;
+ __be16 type;
+};
+
struct ib_ud_header {
+ int lrh_present;
struct ib_unpacked_lrh lrh;
- int grh_present;
- struct ib_unpacked_grh grh;
- struct ib_unpacked_bth bth;
+ int eth_present;
+ struct ib_unpacked_eth eth;
+ int vlan_present;
+ struct ib_unpacked_vlan vlan;
+ int grh_present;
+ struct ib_unpacked_grh grh;
+ struct ib_unpacked_bth bth;
struct ib_unpacked_deth deth;
- int immediate_present;
- __be32 immediate_data;
+ int immediate_present;
+ __be32 immediate_data;
};
void ib_pack(const struct ib_field *desc,
@@ -230,9 +250,12 @@ void ib_unpack(const struct ib_field *desc,
void *buf,
void *structure);
-void ib_ud_header_init(int payload_bytes,
- int grh_present,
- int immediate_present,
+void ib_ud_header_init(int payload_bytes,
+ int lrh_present,
+ int eth_present,
+ int vlan_present,
+ int grh_present,
+ int immediate_present,
struct ib_ud_header *header);
int ib_ud_header_pack(struct ib_ud_header *header,
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index a17f77106149..fe5b05177a2c 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -205,7 +205,8 @@ struct ib_uverbs_query_port_resp {
__u8 active_width;
__u8 active_speed;
__u8 phys_state;
- __u8 reserved[3];
+ __u8 link_layer;
+ __u8 reserved[2];
};
struct ib_uverbs_alloc_pd {
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 857b3b9cf120..e04c4888d1fd 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -75,6 +75,12 @@ enum rdma_transport_type {
enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
+enum rdma_link_layer {
+ IB_LINK_LAYER_UNSPECIFIED,
+ IB_LINK_LAYER_INFINIBAND,
+ IB_LINK_LAYER_ETHERNET,
+};
+
enum ib_device_cap_flags {
IB_DEVICE_RESIZE_MAX_WR = 1,
IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
@@ -1010,6 +1016,8 @@ struct ib_device {
int (*query_port)(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr);
+ enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
+ u8 port_num);
int (*query_gid)(struct ib_device *device,
u8 port_num, int index,
union ib_gid *gid);
@@ -1222,6 +1230,9 @@ int ib_query_device(struct ib_device *device,
int ib_query_port(struct ib_device *device,
u8 port_num, struct ib_port_attr *port_attr);
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
+ u8 port_num);
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid);
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index ad178fa78f66..1ae84db4c9fb 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -239,4 +239,42 @@ struct srp_rsp {
u8 data[0];
} __attribute__((packed));
+struct srp_cred_req {
+ u8 opcode;
+ u8 sol_not;
+ u8 reserved[2];
+ __be32 req_lim_delta;
+ u64 tag;
+};
+
+struct srp_cred_rsp {
+ u8 opcode;
+ u8 reserved[7];
+ u64 tag;
+};
+
+/*
+ * The SRP spec defines the fixed portion of the AER_REQ structure to be
+ * 36 bytes, so it needs to be packed to avoid having it padded to 40 bytes
+ * on 64-bit architectures.
+ */
+struct srp_aer_req {
+ u8 opcode;
+ u8 sol_not;
+ u8 reserved[2];
+ __be32 req_lim_delta;
+ u64 tag;
+ u32 reserved2;
+ __be64 lun;
+ __be32 sense_data_len;
+ u32 reserved3;
+ u8 sense_data[0];
+} __attribute__((packed));
+
+struct srp_aer_rsp {
+ u8 opcode;
+ u8 reserved[7];
+ u64 tag;
+};
+
#endif /* SCSI_SRP_H */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 01e9e0076a92..289010d3270b 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -21,7 +21,8 @@ TRACE_EVENT(ext4_free_inode,
TP_ARGS(inode),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( umode_t, mode )
__field( uid_t, uid )
@@ -30,7 +31,8 @@ TRACE_EVENT(ext4_free_inode,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->uid = inode->i_uid;
@@ -38,9 +40,10 @@ TRACE_EVENT(ext4_free_inode,
__entry->blocks = inode->i_blocks;
),
- TP_printk("dev %s ino %lu mode 0%o uid %u gid %u blocks %llu",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->mode, __entry->uid, __entry->gid,
+ TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->mode,
+ __entry->uid, __entry->gid,
(unsigned long long) __entry->blocks)
);
@@ -50,20 +53,22 @@ TRACE_EVENT(ext4_request_inode,
TP_ARGS(dir, mode),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, dir )
__field( umode_t, mode )
),
TP_fast_assign(
- __entry->dev = dir->i_sb->s_dev;
+ __entry->dev_major = MAJOR(dir->i_sb->s_dev);
+ __entry->dev_minor = MINOR(dir->i_sb->s_dev);
__entry->dir = dir->i_ino;
__entry->mode = mode;
),
- TP_printk("dev %s dir %lu mode 0%o",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->dir,
- __entry->mode)
+ TP_printk("dev %d,%d dir %lu mode 0%o",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->dir, __entry->mode)
);
TRACE_EVENT(ext4_allocate_inode,
@@ -72,21 +77,24 @@ TRACE_EVENT(ext4_allocate_inode,
TP_ARGS(inode, dir, mode),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( ino_t, dir )
__field( umode_t, mode )
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->dir = dir->i_ino;
__entry->mode = mode;
),
- TP_printk("dev %s ino %lu dir %lu mode 0%o",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
(unsigned long) __entry->dir, __entry->mode)
);
@@ -98,7 +106,8 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
TP_ARGS(inode, pos, len, flags),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned int, len )
@@ -106,15 +115,17 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
__entry->flags = flags;
),
- TP_printk("dev %s ino %lu pos %llu len %u flags %u",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->pos, __entry->len, __entry->flags)
);
@@ -141,7 +152,8 @@ DECLARE_EVENT_CLASS(ext4__write_end,
TP_ARGS(inode, pos, len, copied),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned int, len )
@@ -149,16 +161,18 @@ DECLARE_EVENT_CLASS(ext4__write_end,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
__entry->copied = copied;
),
- TP_printk("dev %s ino %lu pos %llu len %u copied %u",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->pos, __entry->len, __entry->copied)
+ TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->pos,
+ __entry->len, __entry->copied)
);
DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
@@ -199,21 +213,23 @@ TRACE_EVENT(ext4_writepage,
TP_ARGS(inode, page),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( pgoff_t, index )
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->index = page->index;
),
- TP_printk("dev %s ino %lu page_index %lu",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->index)
+ TP_printk("dev %d,%d ino %lu page_index %lu",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->index)
);
TRACE_EVENT(ext4_da_writepages,
@@ -222,13 +238,13 @@ TRACE_EVENT(ext4_da_writepages,
TP_ARGS(inode, wbc),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( long, nr_to_write )
__field( long, pages_skipped )
__field( loff_t, range_start )
__field( loff_t, range_end )
- __field( char, nonblocking )
__field( char, for_kupdate )
__field( char, for_reclaim )
__field( char, range_cyclic )
@@ -236,24 +252,27 @@ TRACE_EVENT(ext4_da_writepages,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->range_start = wbc->range_start;
__entry->range_end = wbc->range_end;
- __entry->nonblocking = wbc->nonblocking;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->writeback_index = inode->i_mapping->writeback_index;
),
- TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu",
- jbd2_dev_to_name(__entry->dev),
+ TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
+ "range_start %llu range_end %llu "
+ "for_kupdate %d for_reclaim %d "
+ "range_cyclic %d writeback_index %lu",
+ __entry->dev_major, __entry->dev_minor,
(unsigned long) __entry->ino, __entry->nr_to_write,
__entry->pages_skipped, __entry->range_start,
- __entry->range_end, __entry->nonblocking,
+ __entry->range_end,
__entry->for_kupdate, __entry->for_reclaim,
__entry->range_cyclic,
(unsigned long) __entry->writeback_index)
@@ -265,7 +284,8 @@ TRACE_EVENT(ext4_da_write_pages,
TP_ARGS(inode, mpd),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( __u64, b_blocknr )
__field( __u32, b_size )
@@ -276,7 +296,8 @@ TRACE_EVENT(ext4_da_write_pages,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->b_blocknr = mpd->b_blocknr;
__entry->b_size = mpd->b_size;
@@ -286,8 +307,9 @@ TRACE_EVENT(ext4_da_write_pages,
__entry->pages_written = mpd->pages_written;
),
- TP_printk("dev %s ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->b_blocknr, __entry->b_size,
__entry->b_state, __entry->first_page,
__entry->io_done, __entry->pages_written)
@@ -300,7 +322,8 @@ TRACE_EVENT(ext4_da_writepages_result,
TP_ARGS(inode, wbc, ret, pages_written),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( int, ret )
__field( int, pages_written )
@@ -310,7 +333,8 @@ TRACE_EVENT(ext4_da_writepages_result,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->ret = ret;
__entry->pages_written = pages_written;
@@ -319,8 +343,8 @@ TRACE_EVENT(ext4_da_writepages_result,
__entry->writeback_index = inode->i_mapping->writeback_index;
),
- TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu",
- jbd2_dev_to_name(__entry->dev),
+ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu",
+ __entry->dev_major, __entry->dev_minor,
(unsigned long) __entry->ino, __entry->ret,
__entry->pages_written, __entry->pages_skipped,
__entry->more_io,
@@ -334,20 +358,23 @@ TRACE_EVENT(ext4_discard_blocks,
TP_ARGS(sb, blk, count),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( __u64, blk )
__field( __u64, count )
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
__entry->blk = blk;
__entry->count = count;
),
- TP_printk("dev %s blk %llu count %llu",
- jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count)
+ TP_printk("dev %d,%d blk %llu count %llu",
+ __entry->dev_major, __entry->dev_minor,
+ __entry->blk, __entry->count)
);
DECLARE_EVENT_CLASS(ext4__mb_new_pa,
@@ -357,7 +384,8 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
TP_ARGS(ac, pa),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( __u64, pa_pstart )
__field( __u32, pa_len )
@@ -366,16 +394,18 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
),
TP_fast_assign(
- __entry->dev = ac->ac_sb->s_dev;
+ __entry->dev_major = MAJOR(ac->ac_sb->s_dev);
+ __entry->dev_minor = MINOR(ac->ac_sb->s_dev);
__entry->ino = ac->ac_inode->i_ino;
__entry->pa_pstart = pa->pa_pstart;
__entry->pa_len = pa->pa_len;
__entry->pa_lstart = pa->pa_lstart;
),
- TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
+ TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->pa_pstart,
+ __entry->pa_len, __entry->pa_lstart)
);
DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
@@ -396,14 +426,15 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
TRACE_EVENT(ext4_mb_release_inode_pa,
TP_PROTO(struct super_block *sb,
- struct ext4_allocation_context *ac,
+ struct inode *inode,
struct ext4_prealloc_space *pa,
unsigned long long block, unsigned int count),
- TP_ARGS(sb, ac, pa, block, count),
+ TP_ARGS(sb, inode, pa, block, count),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( __u64, block )
__field( __u32, count )
@@ -411,43 +442,42 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->ino = (ac && ac->ac_inode) ?
- ac->ac_inode->i_ino : 0;
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
+ __entry->ino = inode->i_ino;
__entry->block = block;
__entry->count = count;
),
- TP_printk("dev %s ino %lu block %llu count %u",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->block, __entry->count)
+ TP_printk("dev %d,%d ino %lu block %llu count %u",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->block, __entry->count)
);
TRACE_EVENT(ext4_mb_release_group_pa,
TP_PROTO(struct super_block *sb,
- struct ext4_allocation_context *ac,
struct ext4_prealloc_space *pa),
- TP_ARGS(sb, ac, pa),
+ TP_ARGS(sb, pa),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( __u64, pa_pstart )
__field( __u32, pa_len )
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->ino = (ac && ac->ac_inode) ?
- ac->ac_inode->i_ino : 0;
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
__entry->pa_pstart = pa->pa_pstart;
__entry->pa_len = pa->pa_len;
),
- TP_printk("dev %s pstart %llu len %u",
- jbd2_dev_to_name(__entry->dev), __entry->pa_pstart, __entry->pa_len)
+ TP_printk("dev %d,%d pstart %llu len %u",
+ __entry->dev_major, __entry->dev_minor,
+ __entry->pa_pstart, __entry->pa_len)
);
TRACE_EVENT(ext4_discard_preallocations,
@@ -456,18 +486,21 @@ TRACE_EVENT(ext4_discard_preallocations,
TP_ARGS(inode),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
),
- TP_printk("dev %s ino %lu",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
+ TP_printk("dev %d,%d ino %lu",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -476,18 +509,20 @@ TRACE_EVENT(ext4_mb_discard_preallocations,
TP_ARGS(sb, needed),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( int, needed )
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
__entry->needed = needed;
),
- TP_printk("dev %s needed %d",
- jbd2_dev_to_name(__entry->dev), __entry->needed)
+ TP_printk("dev %d,%d needed %d",
+ __entry->dev_major, __entry->dev_minor, __entry->needed)
);
TRACE_EVENT(ext4_request_blocks,
@@ -496,7 +531,8 @@ TRACE_EVENT(ext4_request_blocks,
TP_ARGS(ar),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( unsigned int, flags )
__field( unsigned int, len )
@@ -509,7 +545,8 @@ TRACE_EVENT(ext4_request_blocks,
),
TP_fast_assign(
- __entry->dev = ar->inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev);
__entry->ino = ar->inode->i_ino;
__entry->flags = ar->flags;
__entry->len = ar->len;
@@ -521,8 +558,9 @@ TRACE_EVENT(ext4_request_blocks,
__entry->pright = ar->pright;
),
- TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->flags, __entry->len,
(unsigned long long) __entry->logical,
(unsigned long long) __entry->goal,
@@ -538,7 +576,8 @@ TRACE_EVENT(ext4_allocate_blocks,
TP_ARGS(ar, block),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( __u64, block )
__field( unsigned int, flags )
@@ -552,7 +591,8 @@ TRACE_EVENT(ext4_allocate_blocks,
),
TP_fast_assign(
- __entry->dev = ar->inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev);
__entry->ino = ar->inode->i_ino;
__entry->block = block;
__entry->flags = ar->flags;
@@ -565,9 +605,10 @@ TRACE_EVENT(ext4_allocate_blocks,
__entry->pright = ar->pright;
),
- TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->flags, __entry->len, __entry->block,
+ TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->flags,
+ __entry->len, __entry->block,
(unsigned long long) __entry->logical,
(unsigned long long) __entry->goal,
(unsigned long long) __entry->lleft,
@@ -583,7 +624,8 @@ TRACE_EVENT(ext4_free_blocks,
TP_ARGS(inode, block, count, flags),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( umode_t, mode )
__field( __u64, block )
@@ -592,7 +634,8 @@ TRACE_EVENT(ext4_free_blocks,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->block = block;
@@ -600,8 +643,9 @@ TRACE_EVENT(ext4_free_blocks,
__entry->flags = flags;
),
- TP_printk("dev %s ino %lu mode 0%o block %llu count %lu flags %d",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->mode, __entry->block, __entry->count,
__entry->flags)
);
@@ -612,7 +656,8 @@ TRACE_EVENT(ext4_sync_file,
TP_ARGS(file, datasync),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( ino_t, parent )
__field( int, datasync )
@@ -621,14 +666,16 @@ TRACE_EVENT(ext4_sync_file,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(dentry->d_inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(dentry->d_inode->i_sb->s_dev);
__entry->ino = dentry->d_inode->i_ino;
__entry->datasync = datasync;
__entry->parent = dentry->d_parent->d_inode->i_ino;
),
- TP_printk("dev %s ino %ld parent %ld datasync %d ",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %ld parent %ld datasync %d ",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
(unsigned long) __entry->parent, __entry->datasync)
);
@@ -638,18 +685,20 @@ TRACE_EVENT(ext4_sync_fs,
TP_ARGS(sb, wait),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( int, wait )
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
__entry->wait = wait;
),
- TP_printk("dev %s wait %d", jbd2_dev_to_name(__entry->dev),
- __entry->wait)
+ TP_printk("dev %d,%d wait %d", __entry->dev_major,
+ __entry->dev_minor, __entry->wait)
);
TRACE_EVENT(ext4_alloc_da_blocks,
@@ -658,21 +707,24 @@ TRACE_EVENT(ext4_alloc_da_blocks,
TP_ARGS(inode),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( unsigned int, data_blocks )
__field( unsigned int, meta_blocks )
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
),
- TP_printk("dev %s ino %lu data_blocks %u meta_blocks %u",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->data_blocks, __entry->meta_blocks)
);
@@ -682,7 +734,8 @@ TRACE_EVENT(ext4_mballoc_alloc,
TP_ARGS(ac),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( __u16, found )
__field( __u16, groups )
@@ -705,7 +758,8 @@ TRACE_EVENT(ext4_mballoc_alloc,
),
TP_fast_assign(
- __entry->dev = ac->ac_inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev);
__entry->ino = ac->ac_inode->i_ino;
__entry->found = ac->ac_found;
__entry->flags = ac->ac_flags;
@@ -727,10 +781,11 @@ TRACE_EVENT(ext4_mballoc_alloc,
__entry->result_len = ac->ac_f_ex.fe_len;
),
- TP_printk("dev %s inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
+ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
"result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
"tail %u broken %u",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->orig_group, __entry->orig_start,
__entry->orig_len, __entry->orig_logical,
__entry->goal_group, __entry->goal_start,
@@ -748,7 +803,8 @@ TRACE_EVENT(ext4_mballoc_prealloc,
TP_ARGS(ac),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( __u32, orig_logical )
__field( int, orig_start )
@@ -761,7 +817,8 @@ TRACE_EVENT(ext4_mballoc_prealloc,
),
TP_fast_assign(
- __entry->dev = ac->ac_inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev);
__entry->ino = ac->ac_inode->i_ino;
__entry->orig_logical = ac->ac_o_ex.fe_logical;
__entry->orig_start = ac->ac_o_ex.fe_start;
@@ -773,8 +830,9 @@ TRACE_EVENT(ext4_mballoc_prealloc,
__entry->result_len = ac->ac_b_ex.fe_len;
),
- TP_printk("dev %s inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->orig_group, __entry->orig_start,
__entry->orig_len, __entry->orig_logical,
__entry->result_group, __entry->result_start,
@@ -782,46 +840,59 @@ TRACE_EVENT(ext4_mballoc_prealloc,
);
DECLARE_EVENT_CLASS(ext4__mballoc,
- TP_PROTO(struct ext4_allocation_context *ac),
+ TP_PROTO(struct super_block *sb,
+ struct inode *inode,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
- TP_ARGS(ac),
+ TP_ARGS(sb, inode, group, start, len),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
- __field( __u32, result_logical )
__field( int, result_start )
__field( __u32, result_group )
__field( int, result_len )
),
TP_fast_assign(
- __entry->dev = ac->ac_inode->i_sb->s_dev;
- __entry->ino = ac->ac_inode->i_ino;
- __entry->result_logical = ac->ac_b_ex.fe_logical;
- __entry->result_start = ac->ac_b_ex.fe_start;
- __entry->result_group = ac->ac_b_ex.fe_group;
- __entry->result_len = ac->ac_b_ex.fe_len;
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
+ __entry->ino = inode ? inode->i_ino : 0;
+ __entry->result_start = start;
+ __entry->result_group = group;
+ __entry->result_len = len;
),
- TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d inode %lu extent %u/%d/%u ",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->result_group, __entry->result_start,
- __entry->result_len, __entry->result_logical)
+ __entry->result_len)
);
DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
- TP_PROTO(struct ext4_allocation_context *ac),
+ TP_PROTO(struct super_block *sb,
+ struct inode *inode,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
- TP_ARGS(ac)
+ TP_ARGS(sb, inode, group, start, len)
);
DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
- TP_PROTO(struct ext4_allocation_context *ac),
+ TP_PROTO(struct super_block *sb,
+ struct inode *inode,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
- TP_ARGS(ac)
+ TP_ARGS(sb, inode, group, start, len)
);
TRACE_EVENT(ext4_forget,
@@ -830,7 +901,8 @@ TRACE_EVENT(ext4_forget,
TP_ARGS(inode, is_metadata, block),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( umode_t, mode )
__field( int, is_metadata )
@@ -838,16 +910,18 @@ TRACE_EVENT(ext4_forget,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->is_metadata = is_metadata;
__entry->block = block;
),
- TP_printk("dev %s ino %lu mode 0%o is_metadata %d block %llu",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->mode, __entry->is_metadata, __entry->block)
+ TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->mode,
+ __entry->is_metadata, __entry->block)
);
TRACE_EVENT(ext4_da_update_reserve_space,
@@ -856,7 +930,8 @@ TRACE_EVENT(ext4_da_update_reserve_space,
TP_ARGS(inode, used_blocks),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( umode_t, mode )
__field( __u64, i_blocks )
@@ -867,7 +942,8 @@ TRACE_EVENT(ext4_da_update_reserve_space,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->i_blocks = inode->i_blocks;
@@ -877,9 +953,10 @@ TRACE_EVENT(ext4_da_update_reserve_space,
__entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
),
- TP_printk("dev %s ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
- __entry->mode, (unsigned long long) __entry->i_blocks,
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino, __entry->mode,
+ (unsigned long long) __entry->i_blocks,
__entry->used_blocks, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
);
@@ -890,7 +967,8 @@ TRACE_EVENT(ext4_da_reserve_space,
TP_ARGS(inode, md_needed),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( umode_t, mode )
__field( __u64, i_blocks )
@@ -900,7 +978,8 @@ TRACE_EVENT(ext4_da_reserve_space,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->i_blocks = inode->i_blocks;
@@ -909,8 +988,9 @@ TRACE_EVENT(ext4_da_reserve_space,
__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
),
- TP_printk("dev %s ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->mode, (unsigned long long) __entry->i_blocks,
__entry->md_needed, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks)
@@ -922,7 +1002,8 @@ TRACE_EVENT(ext4_da_release_space,
TP_ARGS(inode, freed_blocks),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
__field( umode_t, mode )
__field( __u64, i_blocks )
@@ -933,7 +1014,8 @@ TRACE_EVENT(ext4_da_release_space,
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->i_blocks = inode->i_blocks;
@@ -943,8 +1025,9 @@ TRACE_EVENT(ext4_da_release_space,
__entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
),
- TP_printk("dev %s ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino,
__entry->mode, (unsigned long long) __entry->i_blocks,
__entry->freed_blocks, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
@@ -956,18 +1039,20 @@ DECLARE_EVENT_CLASS(ext4__bitmap_load,
TP_ARGS(sb, group),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( __u32, group )
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
__entry->group = group;
),
- TP_printk("dev %s group %u",
- jbd2_dev_to_name(__entry->dev), __entry->group)
+ TP_printk("dev %d,%d group %u",
+ __entry->dev_major, __entry->dev_minor, __entry->group)
);
DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 6fa7cbab7d93..1c09820df585 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -86,76 +86,62 @@ TRACE_EVENT(irq_handler_exit,
DECLARE_EVENT_CLASS(softirq,
- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+ TP_PROTO(unsigned int vec_nr),
- TP_ARGS(h, vec),
+ TP_ARGS(vec_nr),
TP_STRUCT__entry(
- __field( int, vec )
+ __field( unsigned int, vec )
),
TP_fast_assign(
- if (vec)
- __entry->vec = (int)(h - vec);
- else
- __entry->vec = (int)(long)h;
+ __entry->vec = vec_nr;
),
- TP_printk("vec=%d [action=%s]", __entry->vec,
+ TP_printk("vec=%u [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
);
/**
* softirq_entry - called immediately before the softirq handler
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr: softirq vector number
*
- * The @h parameter, contains a pointer to the struct softirq_action
- * which has a pointer to the action handler that is called. By subtracting
- * the @vec pointer from the @h pointer, we can determine the softirq
- * number. Also, when used in combination with the softirq_exit tracepoint
- * we can determine the softirq latency.
+ * When used in combination with the softirq_exit tracepoint
+ * we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_entry,
- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+ TP_PROTO(unsigned int vec_nr),
- TP_ARGS(h, vec)
+ TP_ARGS(vec_nr)
);
/**
* softirq_exit - called immediately after the softirq handler returns
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr: softirq vector number
*
- * The @h parameter contains a pointer to the struct softirq_action
- * that has handled the softirq. By subtracting the @vec pointer from
- * the @h pointer, we can determine the softirq number. Also, when used in
- * combination with the softirq_entry tracepoint we can determine the softirq
- * latency.
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_exit,
- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+ TP_PROTO(unsigned int vec_nr),
- TP_ARGS(h, vec)
+ TP_ARGS(vec_nr)
);
/**
* softirq_raise - called immediately when a softirq is raised
- * @h: pointer to struct softirq_action
- * @vec: pointer to first struct softirq_action in softirq_vec array
+ * @vec_nr: softirq vector number
*
- * The @h parameter contains a pointer to the softirq vector number which is
- * raised. @vec is NULL and it means @h includes vector number not
- * softirq_action. When used in combination with the softirq_entry tracepoint
- * we can determine the softirq raise latency.
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq raise to run latency.
*/
DEFINE_EVENT(softirq, softirq_raise,
- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+ TP_PROTO(unsigned int vec_nr),
- TP_ARGS(h, vec)
+ TP_ARGS(vec_nr)
);
#endif /* _TRACE_IRQ_H */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index bf16545cc977..7447ea9305b5 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -17,17 +17,19 @@ TRACE_EVENT(jbd2_checkpoint,
TP_ARGS(journal, result),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( int, result )
),
TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev);
+ __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
__entry->result = result;
),
- TP_printk("dev %s result %d",
- jbd2_dev_to_name(__entry->dev), __entry->result)
+ TP_printk("dev %d,%d result %d",
+ __entry->dev_major, __entry->dev_minor, __entry->result)
);
DECLARE_EVENT_CLASS(jbd2_commit,
@@ -37,20 +39,22 @@ DECLARE_EVENT_CLASS(jbd2_commit,
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev);
+ __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
__entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %s transaction %d sync %d",
- jbd2_dev_to_name(__entry->dev), __entry->transaction,
- __entry->sync_commit)
+ TP_printk("dev %d,%d transaction %d sync %d",
+ __entry->dev_major, __entry->dev_minor,
+ __entry->transaction, __entry->sync_commit)
);
DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
@@ -87,22 +91,24 @@ TRACE_EVENT(jbd2_end_commit,
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( char, sync_commit )
__field( int, transaction )
__field( int, head )
),
TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev);
+ __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
__entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
__entry->head = journal->j_tail_sequence;
),
- TP_printk("dev %s transaction %d sync %d head %d",
- jbd2_dev_to_name(__entry->dev), __entry->transaction,
- __entry->sync_commit, __entry->head)
+ TP_printk("dev %d,%d transaction %d sync %d head %d",
+ __entry->dev_major, __entry->dev_minor,
+ __entry->transaction, __entry->sync_commit, __entry->head)
);
TRACE_EVENT(jbd2_submit_inode_data,
@@ -111,17 +117,20 @@ TRACE_EVENT(jbd2_submit_inode_data,
TP_ARGS(inode),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( ino_t, ino )
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
+ __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+ __entry->dev_minor = MINOR(inode->i_sb->s_dev);
__entry->ino = inode->i_ino;
),
- TP_printk("dev %s ino %lu",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
+ TP_printk("dev %d,%d ino %lu",
+ __entry->dev_major, __entry->dev_minor,
+ (unsigned long) __entry->ino)
);
TRACE_EVENT(jbd2_run_stats,
@@ -131,7 +140,8 @@ TRACE_EVENT(jbd2_run_stats,
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( unsigned long, tid )
__field( unsigned long, wait )
__field( unsigned long, running )
@@ -144,7 +154,8 @@ TRACE_EVENT(jbd2_run_stats,
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev_major = MAJOR(dev);
+ __entry->dev_minor = MINOR(dev);
__entry->tid = tid;
__entry->wait = stats->rs_wait;
__entry->running = stats->rs_running;
@@ -156,9 +167,9 @@ TRACE_EVENT(jbd2_run_stats,
__entry->blocks_logged = stats->rs_blocks_logged;
),
- TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
+ TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
"logging %u handle_count %u blocks %u blocks_logged %u",
- jbd2_dev_to_name(__entry->dev), __entry->tid,
+ __entry->dev_major, __entry->dev_minor, __entry->tid,
jiffies_to_msecs(__entry->wait),
jiffies_to_msecs(__entry->running),
jiffies_to_msecs(__entry->locked),
@@ -175,7 +186,8 @@ TRACE_EVENT(jbd2_checkpoint_stats,
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( unsigned long, tid )
__field( unsigned long, chp_time )
__field( __u32, forced_to_close )
@@ -184,7 +196,8 @@ TRACE_EVENT(jbd2_checkpoint_stats,
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev_major = MAJOR(dev);
+ __entry->dev_minor = MINOR(dev);
__entry->tid = tid;
__entry->chp_time = stats->cs_chp_time;
__entry->forced_to_close= stats->cs_forced_to_close;
@@ -192,9 +205,9 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->dropped = stats->cs_dropped;
),
- TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
+ TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
"written %u dropped %u",
- jbd2_dev_to_name(__entry->dev), __entry->tid,
+ __entry->dev_major, __entry->dev_minor, __entry->tid,
jiffies_to_msecs(__entry->chp_time),
__entry->forced_to_close, __entry->written, __entry->dropped)
);
@@ -207,7 +220,8 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
TP_ARGS(journal, first_tid, block_nr, freed),
TP_STRUCT__entry(
- __field( dev_t, dev )
+ __field( int, dev_major )
+ __field( int, dev_minor )
__field( tid_t, tail_sequence )
__field( tid_t, first_tid )
__field(unsigned long, block_nr )
@@ -215,16 +229,18 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
),
TP_fast_assign(
- __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev);
+ __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
__entry->tail_sequence = journal->j_tail_sequence;
__entry->first_tid = first_tid;
__entry->block_nr = block_nr;
__entry->freed = freed;
),
- TP_printk("dev %s from %u to %u offset %lu freed %lu",
- jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
- __entry->first_tid, __entry->block_nr, __entry->freed)
+ TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
+ __entry->dev_major, __entry->dev_minor,
+ __entry->tail_sequence, __entry->first_tid,
+ __entry->block_nr, __entry->freed)
);
#endif /* _TRACE_JBD2_H */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 370aa5a87322..c255fcc587bf 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -10,6 +10,7 @@
#define RECLAIM_WB_ANON 0x0001u
#define RECLAIM_WB_FILE 0x0002u
+#define RECLAIM_WB_MIXED 0x0010u
#define RECLAIM_WB_SYNC 0x0004u
#define RECLAIM_WB_ASYNC 0x0008u
@@ -17,13 +18,20 @@
(flags) ? __print_flags(flags, "|", \
{RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \
{RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \
+ {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \
{RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
#define trace_reclaim_flags(page, sync) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
- (sync == PAGEOUT_IO_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ )
+
+#define trace_shrink_flags(file, sync) ( \
+ (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
+ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
+ (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
)
TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -269,6 +277,40 @@ TRACE_EVENT(mm_vmscan_writepage,
show_reclaim_flags(__entry->reclaim_flags))
);
+TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
+
+ TP_PROTO(int nid, int zid,
+ unsigned long nr_scanned, unsigned long nr_reclaimed,
+ int priority, int reclaim_flags),
+
+ TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, zid)
+ __field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_reclaimed)
+ __field(int, priority)
+ __field(int, reclaim_flags)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->zid = zid;
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->priority = priority;
+ __entry->reclaim_flags = reclaim_flags;
+ ),
+
+ TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
+ __entry->nid, __entry->zid,
+ __entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->priority,
+ show_reclaim_flags(__entry->reclaim_flags))
+);
+
+
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index f345f66ae9d1..89a2b2db4375 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -96,8 +96,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(long, nr_to_write)
__field(long, pages_skipped)
__field(int, sync_mode)
- __field(int, nonblocking)
- __field(int, encountered_congestion)
__field(int, for_kupdate)
__field(int, for_background)
__field(int, for_reclaim)
@@ -153,6 +151,41 @@ DEFINE_WBC_EVENT(wbc_balance_dirty_written);
DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
DEFINE_WBC_EVENT(wbc_writepage);
+DECLARE_EVENT_CLASS(writeback_congest_waited_template,
+
+ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+ TP_ARGS(usec_timeout, usec_delayed),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, usec_timeout )
+ __field( unsigned int, usec_delayed )
+ ),
+
+ TP_fast_assign(
+ __entry->usec_timeout = usec_timeout;
+ __entry->usec_delayed = usec_delayed;
+ ),
+
+ TP_printk("usec_timeout=%u usec_delayed=%u",
+ __entry->usec_timeout,
+ __entry->usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
+
+ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+ TP_ARGS(usec_timeout, usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
+
+ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+ TP_ARGS(usec_timeout, usec_delayed)
+);
+
#endif /* _TRACE_WRITEBACK_H */
/* This part must be outside protection */
diff --git a/include/xen/Kbuild b/include/xen/Kbuild
index 4e65c16a445b..84ad8f02fee5 100644
--- a/include/xen/Kbuild
+++ b/include/xen/Kbuild
@@ -1 +1,2 @@
header-y += evtchn.h
+header-y += privcmd.h
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index d3938d3e71f8..d7a6c13bde69 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -186,6 +186,35 @@ struct xen_translate_gpfn_list {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
+/*
+ * Returns the pseudo-physical memory map as it was when the domain
+ * was started (specified by XENMEM_set_memory_map).
+ * arg == addr of struct xen_memory_map.
+ */
+#define XENMEM_memory_map 9
+struct xen_memory_map {
+ /*
+ * On call the number of entries which can be stored in buffer. On
+ * return the number of entries which have been stored in
+ * buffer.
+ */
+ unsigned int nr_entries;
+
+ /*
+ * Entries in the buffer are in the same format as returned by the
+ * BIOS INT 0x15 EAX=0xE820 call.
+ */
+ GUEST_HANDLE(void) buffer;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
+
+/*
+ * Returns the real physical memory map. Passes the same structure as
+ * XENMEM_memory_map.
+ * arg == addr of struct xen_memory_map.
+ */
+#define XENMEM_machine_memory_map 10
+
/*
* Prevent the balloon driver from changing the memory reservation
diff --git a/include/xen/privcmd.h b/include/xen/privcmd.h
new file mode 100644
index 000000000000..b42cdfd92fee
--- /dev/null
+++ b/include/xen/privcmd.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ * privcmd.h
+ *
+ * Interface to /proc/xen/privcmd.
+ *
+ * Copyright (c) 2003-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
+#define __LINUX_PUBLIC_PRIVCMD_H__
+
+#include <linux/types.h>
+
+typedef unsigned long xen_pfn_t;
+
+#ifndef __user
+#define __user
+#endif
+
+struct privcmd_hypercall {
+ __u64 op;
+ __u64 arg[5];
+};
+
+struct privcmd_mmap_entry {
+ __u64 va;
+ __u64 mfn;
+ __u64 npages;
+};
+
+struct privcmd_mmap {
+ int num;
+ domid_t dom; /* target domain */
+ struct privcmd_mmap_entry __user *entry;
+};
+
+struct privcmd_mmapbatch {
+ int num; /* number of pages to populate */
+ domid_t dom; /* target domain */
+ __u64 addr; /* virtual address */
+ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
+};
+
+/*
+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
+ * @arg: &privcmd_hypercall_t
+ * Return: Value returned from execution of the specified hypercall.
+ */
+#define IOCTL_PRIVCMD_HYPERCALL \
+ _IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall))
+#define IOCTL_PRIVCMD_MMAP \
+ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap))
+#define IOCTL_PRIVCMD_MMAPBATCH \
+ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch))
+
+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 351f4051f6d8..98b92154a264 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -23,4 +23,9 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long mfn, int nr,
+ pgprot_t prot, unsigned domid);
+
#endif /* INCLUDE_XEN_OPS_H */
diff --git a/init/Kconfig b/init/Kconfig
index 3ae8ffe738eb..88c10468db46 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -518,7 +518,6 @@ if CGROUPS
config CGROUP_DEBUG
bool "Example debug cgroup subsystem"
- depends on CGROUPS
default n
help
This option enables a simple cgroup subsystem that
@@ -529,7 +528,6 @@ config CGROUP_DEBUG
config CGROUP_NS
bool "Namespace cgroup subsystem"
- depends on CGROUPS
help
Provides a simple namespace cgroup subsystem to
provide hierarchical naming of sets of namespaces,
@@ -538,21 +536,18 @@ config CGROUP_NS
config CGROUP_FREEZER
bool "Freezer cgroup subsystem"
- depends on CGROUPS
help
Provides a way to freeze and unfreeze all tasks in a
cgroup.
config CGROUP_DEVICE
bool "Device controller for cgroups"
- depends on CGROUPS && EXPERIMENTAL
help
Provides a cgroup implementing whitelists for devices which
a process in the cgroup can mknod or open.
config CPUSETS
bool "Cpuset support"
- depends on CGROUPS
help
This option will let you create and manage CPUSETs which
allow dynamically partitioning a system into sets of CPUs and
@@ -568,7 +563,6 @@ config PROC_PID_CPUSET
config CGROUP_CPUACCT
bool "Simple CPU accounting cgroup subsystem"
- depends on CGROUPS
help
Provides a simple Resource Controller for monitoring the
total CPU consumed by the tasks in a cgroup.
@@ -578,11 +572,10 @@ config RESOURCE_COUNTERS
help
This option enables controller independent resource accounting
infrastructure that works with cgroups.
- depends on CGROUPS
config CGROUP_MEM_RES_CTLR
bool "Memory Resource Controller for Control Groups"
- depends on CGROUPS && RESOURCE_COUNTERS
+ depends on RESOURCE_COUNTERS
select MM_OWNER
help
Provides a memory resource controller that manages both anonymous
@@ -623,7 +616,7 @@ config CGROUP_MEM_RES_CTLR_SWAP
menuconfig CGROUP_SCHED
bool "Group CPU scheduler"
- depends on EXPERIMENTAL && CGROUPS
+ depends on EXPERIMENTAL
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
@@ -652,7 +645,7 @@ endif #CGROUP_SCHED
config BLK_CGROUP
tristate "Block IO controller"
- depends on CGROUPS && BLOCK
+ depends on BLOCK
default n
---help---
Generic block IO controller cgroup interface. This is the common
@@ -682,6 +675,59 @@ config DEBUG_BLK_CGROUP
endif # CGROUPS
+menuconfig NAMESPACES
+ bool "Namespaces support" if EMBEDDED
+ default !EMBEDDED
+ help
+ Provides the way to make tasks work with different objects using
+ the same id. For example same IPC id may refer to different objects
+ or same user id or pid may refer to different tasks when used in
+ different namespaces.
+
+if NAMESPACES
+
+config UTS_NS
+ bool "UTS namespace"
+ default y
+ help
+ In this namespace tasks see different info provided with the
+ uname() system call
+
+config IPC_NS
+ bool "IPC namespace"
+ depends on (SYSVIPC || POSIX_MQUEUE)
+ default y
+ help
+ In this namespace tasks work with IPC ids which correspond to
+ different IPC objects in different namespaces.
+
+config USER_NS
+ bool "User namespace (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default y
+ help
+ This allows containers, i.e. vservers, to use user namespaces
+ to provide different user info for different servers.
+ If unsure, say N.
+
+config PID_NS
+ bool "PID Namespaces"
+ default y
+ help
+ Support process id namespaces. This allows having multiple
+ processes with the same pid as long as they are in different
+ pid namespaces. This is a building block of containers.
+
+config NET_NS
+ bool "Network namespace"
+ depends on NET
+ default y
+ help
+ Allow user space to create what appear to be multiple instances
+ of the network stack.
+
+endif # NAMESPACES
+
config MM_OWNER
bool
@@ -734,57 +780,6 @@ config RELAY
If unsure, say N.
-config NAMESPACES
- bool "Namespaces support" if EMBEDDED
- default !EMBEDDED
- help
- Provides the way to make tasks work with different objects using
- the same id. For example same IPC id may refer to different objects
- or same user id or pid may refer to different tasks when used in
- different namespaces.
-
-config UTS_NS
- bool "UTS namespace"
- depends on NAMESPACES
- help
- In this namespace tasks see different info provided with the
- uname() system call
-
-config IPC_NS
- bool "IPC namespace"
- depends on NAMESPACES && (SYSVIPC || POSIX_MQUEUE)
- help
- In this namespace tasks work with IPC ids which correspond to
- different IPC objects in different namespaces.
-
-config USER_NS
- bool "User namespace (EXPERIMENTAL)"
- depends on NAMESPACES && EXPERIMENTAL
- help
- This allows containers, i.e. vservers, to use user namespaces
- to provide different user info for different servers.
- If unsure, say N.
-
-config PID_NS
- bool "PID Namespaces (EXPERIMENTAL)"
- default n
- depends on NAMESPACES && EXPERIMENTAL
- help
- Support process id namespaces. This allows having multiple
- processes with the same pid as long as they are in different
- pid namespaces. This is a building block of containers.
-
- Unless you want to work with an experimental feature
- say N here.
-
-config NET_NS
- bool "Network namespace"
- default n
- depends on NAMESPACES && EXPERIMENTAL && NET
- help
- Allow user space to create what appear to be multiple instances
- of the network stack.
-
config BLK_DEV_INITRD
bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
depends on BROKEN || !FRV
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 62a47eafa8e9..830aaec9c7d5 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -291,7 +291,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
if (err)
return err;
- sys_chdir("/root");
+ sys_chdir((const char __user __force *)"/root");
ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
current->fs->pwd.mnt->mnt_sb->s_type->name,
@@ -488,5 +488,5 @@ void __init prepare_namespace(void)
out:
devtmpfs_mount("dev");
sys_mount(".", "/", NULL, MS_MOVE, NULL);
- sys_chroot(".");
+ sys_chroot((const char __user __force *)".");
}
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 69aebbf8fd2d..32c4799b8c91 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
wait_for_device_probe();
- fd = sys_open("/dev/md0", 0, 0);
+ fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
if (fd >= 0) {
sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
sys_close(fd);
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index bf3ef667bf36..6e1ee6987c78 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -168,7 +168,7 @@ int __init rd_load_image(char *from)
char rotator[4] = { '|' , '/' , '-' , '\\' };
#endif
- out_fd = sys_open("/dev/ram", O_RDWR, 0);
+ out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0);
if (out_fd < 0)
goto out;
@@ -267,7 +267,7 @@ noclose_input:
sys_close(out_fd);
out:
kfree(buf);
- sys_unlink("/dev/ram");
+ sys_unlink((const char __user __force *) "/dev/ram");
return res;
}
diff --git a/init/initramfs.c b/init/initramfs.c
index 4b9c20205092..d9c6e782ff53 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -528,7 +528,7 @@ static void __init clean_rootfs(void)
struct linux_dirent64 *dirp;
int num;
- fd = sys_open("/", O_RDONLY, 0);
+ fd = sys_open((const char __user __force *) "/", O_RDONLY, 0);
WARN_ON(fd < 0);
if (fd < 0)
return;
@@ -590,7 +590,8 @@ static int __init populate_rootfs(void)
}
printk(KERN_INFO "rootfs image is not initramfs (%s)"
"; looks like an initrd\n", err);
- fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
+ fd = sys_open((const char __user __force *) "/initrd.image",
+ O_WRONLY|O_CREAT, 0700);
if (fd >= 0) {
sys_write(fd, (char *)initrd_start,
initrd_end - initrd_start);
diff --git a/init/noinitramfs.c b/init/noinitramfs.c
index f4c1a3a1b8c5..267739d85179 100644
--- a/init/noinitramfs.c
+++ b/init/noinitramfs.c
@@ -29,17 +29,17 @@ static int __init default_rootfs(void)
{
int err;
- err = sys_mkdir("/dev", 0755);
+ err = sys_mkdir((const char __user __force *) "/dev", 0755);
if (err < 0)
goto out;
- err = sys_mknod((const char __user *) "/dev/console",
+ err = sys_mknod((const char __user __force *) "/dev/console",
S_IFCHR | S_IRUSR | S_IWUSR,
new_encode_dev(MKDEV(5, 1)));
if (err < 0)
goto out;
- err = sys_mkdir("/root", 0700);
+ err = sys_mkdir((const char __user __force *) "/root", 0700);
if (err < 0)
goto out;
diff --git a/ipc/compat.c b/ipc/compat.c
index 9dc2c7d3c9e6..845a28738d3a 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -241,6 +241,8 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
struct semid64_ds __user *up64;
int version = compat_ipc_parse_version(&third);
+ memset(&s64, 0, sizeof(s64));
+
if (!uptr)
return -EINVAL;
if (get_user(pad, (u32 __user *) uptr))
@@ -421,6 +423,8 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
int version = compat_ipc_parse_version(&second);
void __user *p;
+ memset(&m64, 0, sizeof(m64));
+
switch (second & (~IPC_64)) {
case IPC_INFO:
case IPC_RMID:
@@ -594,6 +598,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
int err, err2;
int version = compat_ipc_parse_version(&second);
+ memset(&s64, 0, sizeof(s64));
+
switch (second & (~IPC_64)) {
case IPC_RMID:
case SHM_LOCK:
diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
index d8d1e9ff4e88..380ea4fe08e7 100644
--- a/ipc/compat_mq.c
+++ b/ipc/compat_mq.c
@@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
void __user *p = NULL;
if (u_attr && oflag & O_CREAT) {
struct mq_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+
p = compat_alloc_user_space(sizeof(attr));
if (get_compat_mq_attr(&attr, u_attr) ||
copy_to_user(p, &attr, sizeof(attr)))
@@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
long ret;
+ memset(&mqstat, 0, sizeof(mqstat));
+
if (u_mqstat) {
if (get_compat_mq_attr(&mqstat, u_mqstat) ||
copy_to_user(p, &mqstat, sizeof(mqstat)))
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index e1e7b9635f5d..3a61ffefe884 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -116,6 +116,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
@@ -769,7 +770,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
inode = dentry->d_inode;
if (inode)
- atomic_inc(&inode->i_count);
+ ihold(inode);
err = mnt_want_write(ipc_ns->mq_mnt);
if (err)
goto out_err;
diff --git a/ipc/shm.c b/ipc/shm.c
index 7bc46a9fe1f8..fd658a1c2b88 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -108,7 +108,11 @@ void __init shm_init (void)
{
shm_init_ns(&init_ipc_ns);
ipc_init_proc_interface("sysvipc/shm",
- " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
+#if BITS_PER_LONG <= 32
+ " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
+#else
+ " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
+#endif
IPC_SHM_IDS, sysvipc_shm_proc_show);
}
@@ -544,6 +548,34 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
}
/*
+ * Calculate and add used RSS and swap pages of a shm.
+ * Called with shm_ids.rw_mutex held as a reader
+ */
+static void shm_add_rss_swap(struct shmid_kernel *shp,
+ unsigned long *rss_add, unsigned long *swp_add)
+{
+ struct inode *inode;
+
+ inode = shp->shm_file->f_path.dentry->d_inode;
+
+ if (is_file_hugepages(shp->shm_file)) {
+ struct address_space *mapping = inode->i_mapping;
+ struct hstate *h = hstate_file(shp->shm_file);
+ *rss_add += pages_per_huge_page(h) * mapping->nrpages;
+ } else {
+#ifdef CONFIG_SHMEM
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ spin_lock(&info->lock);
+ *rss_add += inode->i_mapping->nrpages;
+ *swp_add += info->swapped;
+ spin_unlock(&info->lock);
+#else
+ *rss_add += inode->i_mapping->nrpages;
+#endif
+ }
+}
+
+/*
* Called with shm_ids.rw_mutex held as a reader
*/
static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
@@ -560,30 +592,13 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
for (total = 0, next_id = 0; total < in_use; next_id++) {
struct kern_ipc_perm *ipc;
struct shmid_kernel *shp;
- struct inode *inode;
ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
if (ipc == NULL)
continue;
shp = container_of(ipc, struct shmid_kernel, shm_perm);
- inode = shp->shm_file->f_path.dentry->d_inode;
-
- if (is_file_hugepages(shp->shm_file)) {
- struct address_space *mapping = inode->i_mapping;
- struct hstate *h = hstate_file(shp->shm_file);
- *rss += pages_per_huge_page(h) * mapping->nrpages;
- } else {
-#ifdef CONFIG_SHMEM
- struct shmem_inode_info *info = SHMEM_I(inode);
- spin_lock(&info->lock);
- *rss += inode->i_mapping->nrpages;
- *swp += info->swapped;
- spin_unlock(&info->lock);
-#else
- *rss += inode->i_mapping->nrpages;
-#endif
- }
+ shm_add_rss_swap(shp, rss, swp);
total++;
}
@@ -1072,6 +1087,9 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
{
struct shmid_kernel *shp = it;
+ unsigned long rss = 0, swp = 0;
+
+ shm_add_rss_swap(shp, &rss, &swp);
#if BITS_PER_LONG <= 32
#define SIZE_SPEC "%10lu"
@@ -1081,7 +1099,8 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
return seq_printf(s,
"%10d %10d %4o " SIZE_SPEC " %5u %5u "
- "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
+ "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
+ SIZE_SPEC " " SIZE_SPEC "\n",
shp->shm_perm.key,
shp->shm_perm.id,
shp->shm_perm.mode,
@@ -1095,6 +1114,8 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
shp->shm_perm.cgid,
shp->shm_atim,
shp->shm_dtim,
- shp->shm_ctim);
+ shp->shm_ctim,
+ rss * PAGE_SIZE,
+ swp * PAGE_SIZE);
}
#endif
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7b69b8d0313d..5cf366965d0c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -243,6 +243,11 @@ static int notify_on_release(const struct cgroup *cgrp)
return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
}
+static int clone_children(const struct cgroup *cgrp)
+{
+ return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+}
+
/*
* for_each_subsys() allows you to iterate on each subsystem attached to
* an active hierarchy
@@ -777,6 +782,7 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
@@ -1039,6 +1045,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",noprefix");
if (strlen(root->release_agent_path))
seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+ if (clone_children(&root->top_cgroup))
+ seq_puts(seq, ",clone_children");
if (strlen(root->name))
seq_printf(seq, ",name=%s", root->name);
mutex_unlock(&cgroup_mutex);
@@ -1049,6 +1057,7 @@ struct cgroup_sb_opts {
unsigned long subsys_bits;
unsigned long flags;
char *release_agent;
+ bool clone_children;
char *name;
/* User explicitly requested empty subsystem */
bool none;
@@ -1065,7 +1074,8 @@ struct cgroup_sb_opts {
*/
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
{
- char *token, *o = data ?: "all";
+ char *token, *o = data;
+ bool all_ss = false, one_ss = false;
unsigned long mask = (unsigned long)-1;
int i;
bool module_pin_failed = false;
@@ -1081,22 +1091,27 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
while ((token = strsep(&o, ",")) != NULL) {
if (!*token)
return -EINVAL;
- if (!strcmp(token, "all")) {
- /* Add all non-disabled subsystems */
- opts->subsys_bits = 0;
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss == NULL)
- continue;
- if (!ss->disabled)
- opts->subsys_bits |= 1ul << i;
- }
- } else if (!strcmp(token, "none")) {
+ if (!strcmp(token, "none")) {
/* Explicitly have no subsystems */
opts->none = true;
- } else if (!strcmp(token, "noprefix")) {
+ continue;
+ }
+ if (!strcmp(token, "all")) {
+ /* Mutually exclusive option 'all' + subsystem name */
+ if (one_ss)
+ return -EINVAL;
+ all_ss = true;
+ continue;
+ }
+ if (!strcmp(token, "noprefix")) {
set_bit(ROOT_NOPREFIX, &opts->flags);
- } else if (!strncmp(token, "release_agent=", 14)) {
+ continue;
+ }
+ if (!strcmp(token, "clone_children")) {
+ opts->clone_children = true;
+ continue;
+ }
+ if (!strncmp(token, "release_agent=", 14)) {
/* Specifying two release agents is forbidden */
if (opts->release_agent)
return -EINVAL;
@@ -1104,7 +1119,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
if (!opts->release_agent)
return -ENOMEM;
- } else if (!strncmp(token, "name=", 5)) {
+ continue;
+ }
+ if (!strncmp(token, "name=", 5)) {
const char *name = token + 5;
/* Can't specify an empty name */
if (!strlen(name))
@@ -1126,20 +1143,44 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
GFP_KERNEL);
if (!opts->name)
return -ENOMEM;
- } else {
- struct cgroup_subsys *ss;
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- ss = subsys[i];
- if (ss == NULL)
- continue;
- if (!strcmp(token, ss->name)) {
- if (!ss->disabled)
- set_bit(i, &opts->subsys_bits);
- break;
- }
- }
- if (i == CGROUP_SUBSYS_COUNT)
- return -ENOENT;
+
+ continue;
+ }
+
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ if (ss == NULL)
+ continue;
+ if (strcmp(token, ss->name))
+ continue;
+ if (ss->disabled)
+ continue;
+
+ /* Mutually exclusive option 'all' + subsystem name */
+ if (all_ss)
+ return -EINVAL;
+ set_bit(i, &opts->subsys_bits);
+ one_ss = true;
+
+ break;
+ }
+ if (i == CGROUP_SUBSYS_COUNT)
+ return -ENOENT;
+ }
+
+ /*
+ * If the 'all' option was specified select all the subsystems,
+ * otherwise 'all, 'none' and a subsystem name options were not
+ * specified, let's default to 'all'
+ */
+ if (all_ss || (!all_ss && !one_ss && !opts->none)) {
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ if (ss == NULL)
+ continue;
+ if (ss->disabled)
+ continue;
+ set_bit(i, &opts->subsys_bits);
}
}
@@ -1354,6 +1395,8 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
strcpy(root->release_agent_path, opts->release_agent);
if (opts->name)
strcpy(root->name, opts->name);
+ if (opts->clone_children)
+ set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
return root;
}
@@ -1879,6 +1922,8 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
const char *buffer)
{
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+ if (strlen(buffer) >= PATH_MAX)
+ return -EINVAL;
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
strcpy(cgrp->root->release_agent_path, buffer);
@@ -3172,6 +3217,23 @@ fail:
return ret;
}
+static u64 cgroup_clone_children_read(struct cgroup *cgrp,
+ struct cftype *cft)
+{
+ return clone_children(cgrp);
+}
+
+static int cgroup_clone_children_write(struct cgroup *cgrp,
+ struct cftype *cft,
+ u64 val)
+{
+ if (val)
+ set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+ else
+ clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+ return 0;
+}
+
/*
* for the common functions, 'private' gives the type of file
*/
@@ -3202,6 +3264,11 @@ static struct cftype files[] = {
.write_string = cgroup_write_event_control,
.mode = S_IWUGO,
},
+ {
+ .name = "cgroup.clone_children",
+ .read_u64 = cgroup_clone_children_read,
+ .write_u64 = cgroup_clone_children_write,
+ },
};
static struct cftype cft_release_agent = {
@@ -3331,6 +3398,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
if (notify_on_release(parent))
set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+ if (clone_children(parent))
+ set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
+
for_each_subsys(root, ss) {
struct cgroup_subsys_state *css = ss->create(ss, cgrp);
@@ -3345,6 +3415,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
goto err_destroy;
}
/* At error, ->destroy() callback has to free assigned ID. */
+ if (clone_children(parent) && ss->post_clone)
+ ss->post_clone(ss, cgrp);
}
cgroup_lock_hierarchy(root);
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index ce71ed53e88f..e7bebb7c6c38 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,20 +48,19 @@ static inline struct freezer *task_freezer(struct task_struct *task)
struct freezer, css);
}
-int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
{
- struct freezer *freezer;
- enum freezer_state state;
+ enum freezer_state state = task_freezer(task)->state;
+ return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+}
+int cgroup_freezing_or_frozen(struct task_struct *task)
+{
+ int result;
task_lock(task);
- freezer = task_freezer(task);
- if (!freezer->css.cgroup->parent)
- state = CGROUP_THAWED; /* root cgroup can't be frozen */
- else
- state = freezer->state;
+ result = __cgroup_freezing_or_frozen(task);
task_unlock(task);
-
- return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
+ return result;
}
/*
@@ -154,13 +153,6 @@ static void freezer_destroy(struct cgroup_subsys *ss,
kfree(cgroup_freezer(cgroup));
}
-/* Task is frozen or will freeze immediately when next it gets woken */
-static bool is_task_frozen_enough(struct task_struct *task)
-{
- return frozen(task) ||
- (task_is_stopped_or_traced(task) && freezing(task));
-}
-
/*
* The call to cgroup_lock() in the freezer.state write method prevents
* a write to that file racing against an attach, and hence the
@@ -174,24 +166,25 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
/*
* Anything frozen can't move or be moved to/from.
- *
- * Since orig_freezer->state == FROZEN means that @task has been
- * frozen, so it's sufficient to check the latter condition.
*/
- if (is_task_frozen_enough(task))
+ freezer = cgroup_freezer(new_cgroup);
+ if (freezer->state != CGROUP_THAWED)
return -EBUSY;
- freezer = cgroup_freezer(new_cgroup);
- if (freezer->state == CGROUP_FROZEN)
+ rcu_read_lock();
+ if (__cgroup_freezing_or_frozen(task)) {
+ rcu_read_unlock();
return -EBUSY;
+ }
+ rcu_read_unlock();
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
- if (is_task_frozen_enough(c)) {
+ if (__cgroup_freezing_or_frozen(c)) {
rcu_read_unlock();
return -EBUSY;
}
@@ -236,31 +229,30 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
/*
* caller must hold freezer->lock
*/
-static void update_freezer_state(struct cgroup *cgroup,
+static void update_if_frozen(struct cgroup *cgroup,
struct freezer *freezer)
{
struct cgroup_iter it;
struct task_struct *task;
unsigned int nfrozen = 0, ntotal = 0;
+ enum freezer_state old_state = freezer->state;
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
- if (is_task_frozen_enough(task))
+ if (frozen(task))
nfrozen++;
}
- /*
- * Transition to FROZEN when no new tasks can be added ensures
- * that we never exist in the FROZEN state while there are unfrozen
- * tasks.
- */
- if (nfrozen == ntotal)
- freezer->state = CGROUP_FROZEN;
- else if (nfrozen > 0)
- freezer->state = CGROUP_FREEZING;
- else
- freezer->state = CGROUP_THAWED;
+ if (old_state == CGROUP_THAWED) {
+ BUG_ON(nfrozen > 0);
+ } else if (old_state == CGROUP_FREEZING) {
+ if (nfrozen == ntotal)
+ freezer->state = CGROUP_FROZEN;
+ } else { /* old_state == CGROUP_FROZEN */
+ BUG_ON(nfrozen != ntotal);
+ }
+
cgroup_iter_end(cgroup, &it);
}
@@ -279,7 +271,7 @@ static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
if (state == CGROUP_FREEZING) {
/* We change from FREEZING to FROZEN lazily if the cgroup was
* only partially frozen when we exitted write. */
- update_freezer_state(cgroup, freezer);
+ update_if_frozen(cgroup, freezer);
state = freezer->state;
}
spin_unlock_irq(&freezer->lock);
@@ -301,7 +293,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
while ((task = cgroup_iter_next(cgroup, &it))) {
if (!freeze_task(task, true))
continue;
- if (is_task_frozen_enough(task))
+ if (frozen(task))
continue;
if (!freezing(task) && !freezer_should_skip(task))
num_cant_freeze_now++;
@@ -335,7 +327,7 @@ static int freezer_change_state(struct cgroup *cgroup,
spin_lock_irq(&freezer->lock);
- update_freezer_state(cgroup, freezer);
+ update_if_frozen(cgroup, freezer);
if (goal_state == freezer->state)
goto out;
diff --git a/kernel/cred.c b/kernel/cred.c
index 9a3e22641fe7..6a1aa004e376 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -325,7 +325,7 @@ EXPORT_SYMBOL(prepare_creds);
/*
* Prepare credentials for current to perform an execve()
- * - The caller must hold current->cred_guard_mutex
+ * - The caller must hold ->cred_guard_mutex
*/
struct cred *prepare_exec_creds(void)
{
@@ -384,8 +384,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
struct cred *new;
int ret;
- mutex_init(&p->cred_guard_mutex);
-
if (
#ifdef CONFIG_KEYS
!p->cred->thread_keyring &&
diff --git a/kernel/exit.c b/kernel/exit.c
index e2bdf37f9fde..b194febf5799 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -50,6 +50,7 @@
#include <linux/perf_event.h>
#include <trace/events/sched.h>
#include <linux/hw_breakpoint.h>
+#include <linux/oom.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -687,6 +688,8 @@ static void exit_mm(struct task_struct * tsk)
enter_lazy_tlb(mm, current);
/* We don't want this task to be frozen prematurely */
clear_freeze_flag(tsk);
+ if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ atomic_dec(&mm->oom_disable_count);
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
@@ -700,6 +703,8 @@ static void exit_mm(struct task_struct * tsk)
* space.
*/
static struct task_struct *find_new_reaper(struct task_struct *father)
+ __releases(&tasklist_lock)
+ __acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *thread;
diff --git a/kernel/fork.c b/kernel/fork.c
index c445f8cc408d..3b159c5991b7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -65,6 +65,7 @@
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
+#include <linux/oom.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -488,6 +489,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
mm->cached_hole_size = ~0UL;
mm_init_aio(mm);
mm_init_owner(mm, p);
+ atomic_set(&mm->oom_disable_count, 0);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
@@ -741,6 +743,8 @@ good_mm:
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
+ if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ atomic_inc(&mm->oom_disable_count);
tsk->mm = mm;
tsk->active_mm = mm;
@@ -904,6 +908,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->oom_adj = current->signal->oom_adj;
sig->oom_score_adj = current->signal->oom_score_adj;
+ mutex_init(&sig->cred_guard_mutex);
+
return 0;
}
@@ -1299,8 +1305,13 @@ bad_fork_cleanup_io:
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
- if (p->mm)
+ if (p->mm) {
+ task_lock(p);
+ if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ atomic_dec(&p->mm->oom_disable_count);
+ task_unlock(p);
mmput(p->mm);
+ }
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
@@ -1693,6 +1704,10 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
active_mm = current->active_mm;
current->mm = new_mm;
current->active_mm = new_mm;
+ if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+ atomic_dec(&mm->oom_disable_count);
+ atomic_inc(&new_mm->oom_disable_count);
+ }
activate_mm(active_mm, new_mm);
new_mm = mm;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index a118bf160e0b..6c683b37f2ce 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -169,7 +169,7 @@ static void get_futex_key_refs(union futex_key *key)
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
- atomic_inc(&key->shared.inode->i_count);
+ ihold(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
atomic_inc(&key->private.mm->mm_count);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9d917ff72675..9988d03797f5 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -393,3 +393,18 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
struct irq_desc *desc = irq_to_desc(irq);
return desc ? desc->kstat_irqs[cpu] : 0;
}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+unsigned int kstat_irqs(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int cpu;
+ int sum = 0;
+
+ if (!desc)
+ return 0;
+ for_each_possible_cpu(cpu)
+ sum += desc->kstat_irqs[cpu];
+ return sum;
+}
+#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c0613f7d6730..b55045bc7563 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -816,7 +816,7 @@ static int kimage_load_normal_segment(struct kimage *image,
ptr = kmap(page);
/* Start with a clear page */
- memset(ptr, 0, PAGE_SIZE);
+ clear_page(ptr);
ptr += maddr & ~PAGE_MASK;
mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
if (mchunk > mbytes)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 56a891914273..99865c33a60d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -74,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_all_disarmed;
-static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
+/* This protects kprobe_table and optimizing_list */
+static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
spinlock_t lock ____cacheline_aligned_in_smp;
@@ -595,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
}
#ifdef CONFIG_SYSCTL
+/* This should be called with kprobe_mutex locked */
static void __kprobes optimize_all_kprobes(void)
{
struct hlist_head *head;
@@ -607,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void)
return;
kprobes_allow_optimization = true;
- mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
- mutex_unlock(&text_mutex);
printk(KERN_INFO "Kprobes globally optimized\n");
}
+/* This should be called with kprobe_mutex locked */
static void __kprobes unoptimize_all_kprobes(void)
{
struct hlist_head *head;
diff --git a/kernel/module.c b/kernel/module.c
index 2df46301a7a4..437a74a7524a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2037,7 +2037,7 @@ static inline void layout_symtab(struct module *mod, struct load_info *info)
{
}
-static void add_kallsyms(struct module *mod, struct load_info *info)
+static void add_kallsyms(struct module *mod, const struct load_info *info)
{
}
#endif /* CONFIG_KALLSYMS */
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 2a5dfec8efe0..2c98ad94ba0e 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -85,6 +85,14 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
return ERR_PTR(-EPERM);
if (!cgroup_is_descendant(cgroup, current))
return ERR_PTR(-EPERM);
+ if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) {
+ printk("ns_cgroup can't be created with parent "
+ "'clone_children' set.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ printk_once("ns_cgroup deprecated: consider using the "
+ "'clone_children' flag without the ns_cgroup.\n");
ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
if (!ns_cgroup)
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index f309e8014c78..517d827f4982 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event)
return event->cpu == -1 || event->cpu == smp_processor_id();
}
-static int
-__event_sched_out(struct perf_event *event,
+static void
+event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
@@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event,
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
- return 0;
+ return;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
+ event->tstamp_stopped = ctx->time;
event->pmu->del(event, 0);
event->oncpu = -1;
@@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event,
ctx->nr_active--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
- return 1;
-}
-
-static void
-event_sched_out(struct perf_event *event,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- int ret;
-
- ret = __event_sched_out(event, cpuctx, ctx);
- if (ret)
- event->tstamp_stopped = ctx->time;
}
static void
@@ -664,7 +652,7 @@ retry:
}
static int
-__event_sched_in(struct perf_event *event,
+event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
@@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event,
return -EAGAIN;
}
+ event->tstamp_running += ctx->time - event->tstamp_stopped;
+
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
@@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event,
return 0;
}
-static inline int
-event_sched_in(struct perf_event *event,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- int ret = __event_sched_in(event, cpuctx, ctx);
- if (ret)
- return ret;
- event->tstamp_running += ctx->time - event->tstamp_stopped;
- return 0;
-}
-
-static void
-group_commit_event_sched_in(struct perf_event *group_event,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- struct perf_event *event;
- u64 now = ctx->time;
-
- group_event->tstamp_running += now - group_event->tstamp_stopped;
- /*
- * Schedule in siblings as one group (if any):
- */
- list_for_each_entry(event, &group_event->sibling_list, group_entry) {
- event->tstamp_running += now - event->tstamp_stopped;
- }
-}
-
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
@@ -730,19 +691,15 @@ group_sched_in(struct perf_event *group_event,
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu;
+ u64 now = ctx->time;
+ bool simulate = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
pmu->start_txn(pmu);
- /*
- * use __event_sched_in() to delay updating tstamp_running
- * until the transaction is committed. In case of failure
- * we will keep an unmodified tstamp_running which is a
- * requirement to get correct timing information
- */
- if (__event_sched_in(group_event, cpuctx, ctx)) {
+ if (event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
return -EAGAIN;
}
@@ -751,31 +708,42 @@ group_sched_in(struct perf_event *group_event,
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
- if (__event_sched_in(event, cpuctx, ctx)) {
+ if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}
- if (!pmu->commit_txn(pmu)) {
- /* commit tstamp_running */
- group_commit_event_sched_in(group_event, cpuctx, ctx);
+ if (!pmu->commit_txn(pmu))
return 0;
- }
+
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
+ * The events up to the failed event are scheduled out normally,
+ * tstamp_stopped will be updated.
*
- * use __event_sched_out() to avoid updating tstamp_stopped
- * because the event never actually ran
+ * The failed events and the remaining siblings need to have
+ * their timings updated as if they had gone thru event_sched_in()
+ * and event_sched_out(). This is required to get consistent timings
+ * across the group. This also takes care of the case where the group
+ * could never be scheduled by ensuring tstamp_stopped is set to mark
+ * the time the event was actually stopped, such that time delta
+ * calculation in update_event_times() is correct.
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
- break;
- __event_sched_out(event, cpuctx, ctx);
+ simulate = true;
+
+ if (simulate) {
+ event->tstamp_running += now - event->tstamp_stopped;
+ event->tstamp_stopped = now;
+ } else {
+ event_sched_out(event, cpuctx, ctx);
+ }
}
- __event_sched_out(group_event, cpuctx, ctx);
+ event_sched_out(group_event, cpuctx, ctx);
pmu->cancel_txn(pmu);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index ac7eb109f196..0dac75ea4456 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -984,8 +984,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
src = kmap_atomic(s_page, KM_USER0);
dst = kmap_atomic(d_page, KM_USER1);
do_copy_page(dst, src);
- kunmap_atomic(src, KM_USER0);
kunmap_atomic(dst, KM_USER1);
+ kunmap_atomic(src, KM_USER0);
} else {
if (PageHighMem(d_page)) {
/* Page pointed to by src may contain some kernel
@@ -993,7 +993,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
*/
safe_copy_page(buffer, s_page);
dst = kmap_atomic(d_page, KM_USER0);
- memcpy(dst, buffer, PAGE_SIZE);
+ copy_page(dst, buffer);
kunmap_atomic(dst, KM_USER0);
} else {
safe_copy_page(page_address(d_page), s_page);
@@ -1687,7 +1687,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(&copy_bm);
} else if (handle->cur <= nr_meta_pages) {
- memset(buffer, 0, PAGE_SIZE);
+ clear_page(buffer);
pack_pfns(buffer, &orig_bm);
} else {
struct page *page;
@@ -1701,7 +1701,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
void *kaddr;
kaddr = kmap_atomic(page, KM_USER0);
- memcpy(buffer, kaddr, PAGE_SIZE);
+ copy_page(buffer, kaddr);
kunmap_atomic(kaddr, KM_USER0);
handle->buffer = buffer;
} else {
@@ -1984,7 +1984,7 @@ static void copy_last_highmem_page(void)
void *dst;
dst = kmap_atomic(last_highmem_page, KM_USER0);
- memcpy(dst, buffer, PAGE_SIZE);
+ copy_page(dst, buffer);
kunmap_atomic(dst, KM_USER0);
last_highmem_page = NULL;
}
@@ -2270,11 +2270,11 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
kaddr1 = kmap_atomic(p1, KM_USER0);
kaddr2 = kmap_atomic(p2, KM_USER1);
- memcpy(buf, kaddr1, PAGE_SIZE);
- memcpy(kaddr1, kaddr2, PAGE_SIZE);
- memcpy(kaddr2, buf, PAGE_SIZE);
- kunmap_atomic(kaddr1, KM_USER0);
+ copy_page(buf, kaddr1);
+ copy_page(kaddr1, kaddr2);
+ copy_page(kaddr2, buf);
kunmap_atomic(kaddr2, KM_USER1);
+ kunmap_atomic(kaddr1, KM_USER0);
}
/**
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 916eaa790399..a0e4a86ccf94 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -251,7 +251,7 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
if (bio_chain) {
src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
if (src) {
- memcpy(src, buf, PAGE_SIZE);
+ copy_page(src, buf);
} else {
WARN_ON_ONCE(1);
bio_chain = NULL; /* Go synchronous */
@@ -325,7 +325,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
error = write_page(handle->cur, handle->cur_swap, NULL);
if (error)
goto out;
- memset(handle->cur, 0, PAGE_SIZE);
+ clear_page(handle->cur);
handle->cur_swap = offset;
handle->k = 0;
}
@@ -910,7 +910,7 @@ int swsusp_check(void)
hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE);
- memset(swsusp_header, 0, PAGE_SIZE);
+ clear_page(swsusp_header);
error = hib_bio_read_page(swsusp_resume_block,
swsusp_header, NULL);
if (error)
diff --git a/kernel/printk.c b/kernel/printk.c
index 2531017795f6..b2ebaee8c377 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -210,7 +210,7 @@ __setup("log_buf_len=", log_buf_len_setup);
#ifdef CONFIG_BOOT_PRINTK_DELAY
-static unsigned int boot_delay; /* msecs delay after each printk during bootup */
+static int boot_delay; /* msecs delay after each printk during bootup */
static unsigned long long loops_per_msec; /* based on boot_delay */
static int __init boot_delay_setup(char *str)
@@ -647,6 +647,7 @@ static inline int can_use_console(unsigned int cpu)
* released but interrupts still disabled.
*/
static int acquire_console_semaphore_for_printk(unsigned int cpu)
+ __releases(&logbuf_lock)
{
int retval = 0;
@@ -1511,7 +1512,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper)
}
EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
-static const char const *kmsg_reasons[] = {
+static const char * const kmsg_reasons[] = {
[KMSG_DUMP_OOPS] = "oops",
[KMSG_DUMP_PANIC] = "panic",
[KMSG_DUMP_KEXEC] = "kexec",
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index f34d798ef4a2..99bbaa3e5b0d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -181,7 +181,7 @@ int ptrace_attach(struct task_struct *task)
* under ptrace.
*/
retval = -ERESTARTNOINTR;
- if (mutex_lock_interruptible(&task->cred_guard_mutex))
+ if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
goto out;
task_lock(task);
@@ -208,7 +208,7 @@ int ptrace_attach(struct task_struct *task)
unlock_tasklist:
write_unlock_irq(&tasklist_lock);
unlock_creds:
- mutex_unlock(&task->cred_guard_mutex);
+ mutex_unlock(&task->signal->cred_guard_mutex);
out:
return retval;
}
@@ -329,6 +329,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
* and reacquire the lock.
*/
void exit_ptrace(struct task_struct *tracer)
+ __releases(&tasklist_lock)
+ __acquires(&tasklist_lock)
{
struct task_struct *p, *n;
LIST_HEAD(ptrace_dead);
@@ -402,7 +404,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
return copied;
}
-static int ptrace_setoptions(struct task_struct *child, long data)
+static int ptrace_setoptions(struct task_struct *child, unsigned long data)
{
child->ptrace &= ~PT_TRACE_MASK;
@@ -481,7 +483,8 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
#define is_sysemu_singlestep(request) 0
#endif
-static int ptrace_resume(struct task_struct *child, long request, long data)
+static int ptrace_resume(struct task_struct *child, long request,
+ unsigned long data)
{
if (!valid_signal(data))
return -EIO;
@@ -558,10 +561,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
#endif
int ptrace_request(struct task_struct *child, long request,
- long addr, long data)
+ unsigned long addr, unsigned long data)
{
int ret = -EIO;
siginfo_t siginfo;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = datavp;
switch (request) {
case PTRACE_PEEKTEXT:
@@ -578,19 +583,17 @@ int ptrace_request(struct task_struct *child, long request,
ret = ptrace_setoptions(child, data);
break;
case PTRACE_GETEVENTMSG:
- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
+ ret = put_user(child->ptrace_message, datalp);
break;
case PTRACE_GETSIGINFO:
ret = ptrace_getsiginfo(child, &siginfo);
if (!ret)
- ret = copy_siginfo_to_user((siginfo_t __user *) data,
- &siginfo);
+ ret = copy_siginfo_to_user(datavp, &siginfo);
break;
case PTRACE_SETSIGINFO:
- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
- sizeof siginfo))
+ if (copy_from_user(&siginfo, datavp, sizeof siginfo))
ret = -EFAULT;
else
ret = ptrace_setsiginfo(child, &siginfo);
@@ -621,7 +624,7 @@ int ptrace_request(struct task_struct *child, long request,
}
mmput(mm);
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
#endif
@@ -650,7 +653,7 @@ int ptrace_request(struct task_struct *child, long request,
case PTRACE_SETREGSET:
{
struct iovec kiov;
- struct iovec __user *uiov = (struct iovec __user *) data;
+ struct iovec __user *uiov = datavp;
if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
return -EFAULT;
@@ -691,7 +694,8 @@ static struct task_struct *ptrace_get_task_struct(pid_t pid)
#define arch_ptrace_attach(child) do { } while (0)
#endif
-SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
+SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ unsigned long, data)
{
struct task_struct *child;
long ret;
@@ -732,7 +736,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
return ret;
}
-int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data)
{
unsigned long tmp;
int copied;
@@ -743,7 +748,8 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
return put_user(tmp, (unsigned long __user *)data);
}
-int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data)
{
int copied;
diff --git a/kernel/resource.c b/kernel/resource.c
index 7b36976e5dea..9fad33efd0db 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -40,6 +40,23 @@ EXPORT_SYMBOL(iomem_resource);
static DEFINE_RWLOCK(resource_lock);
+/*
+ * By default, we allocate free space bottom-up. The architecture can request
+ * top-down by clearing this flag. The user can override the architecture's
+ * choice with the "resource_alloc_from_bottom" kernel boot option, but that
+ * should only be a debugging tool.
+ */
+int resource_alloc_from_bottom = 1;
+
+static __init int setup_alloc_from_bottom(char *s)
+{
+ printk(KERN_INFO
+ "resource: allocating from bottom-up; please report a bug\n");
+ resource_alloc_from_bottom = 1;
+ return 0;
+}
+early_param("resource_alloc_from_bottom", setup_alloc_from_bottom);
+
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
{
struct resource *p = v;
@@ -357,8 +374,97 @@ int __weak page_is_ram(unsigned long pfn)
return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
}
+static resource_size_t simple_align_resource(void *data,
+ const struct resource *avail,
+ resource_size_t size,
+ resource_size_t align)
+{
+ return avail->start;
+}
+
+static void resource_clip(struct resource *res, resource_size_t min,
+ resource_size_t max)
+{
+ if (res->start < min)
+ res->start = min;
+ if (res->end > max)
+ res->end = max;
+}
+
+static bool resource_contains(struct resource *res1, struct resource *res2)
+{
+ return res1->start <= res2->start && res1->end >= res2->end;
+}
+
+/*
+ * Find the resource before "child" in the sibling list of "root" children.
+ */
+static struct resource *find_sibling_prev(struct resource *root, struct resource *child)
+{
+ struct resource *this;
+
+ for (this = root->child; this; this = this->sibling)
+ if (this->sibling == child)
+ return this;
+
+ return NULL;
+}
+
+/*
+ * Find empty slot in the resource tree given range and alignment.
+ * This version allocates from the end of the root resource first.
+ */
+static int find_resource_from_top(struct resource *root, struct resource *new,
+ resource_size_t size, resource_size_t min,
+ resource_size_t max, resource_size_t align,
+ resource_size_t (*alignf)(void *,
+ const struct resource *,
+ resource_size_t,
+ resource_size_t),
+ void *alignf_data)
+{
+ struct resource *this;
+ struct resource tmp, avail, alloc;
+
+ tmp.start = root->end;
+ tmp.end = root->end;
+
+ this = find_sibling_prev(root, NULL);
+ for (;;) {
+ if (this) {
+ if (this->end < root->end)
+ tmp.start = this->end + 1;
+ } else
+ tmp.start = root->start;
+
+ resource_clip(&tmp, min, max);
+
+ /* Check for overflow after ALIGN() */
+ avail = *new;
+ avail.start = ALIGN(tmp.start, align);
+ avail.end = tmp.end;
+ if (avail.start >= tmp.start) {
+ alloc.start = alignf(alignf_data, &avail, size, align);
+ alloc.end = alloc.start + size - 1;
+ if (resource_contains(&avail, &alloc)) {
+ new->start = alloc.start;
+ new->end = alloc.end;
+ return 0;
+ }
+ }
+
+ if (!this || this->start == root->start)
+ break;
+
+ tmp.end = this->start - 1;
+ this = find_sibling_prev(root, this);
+ }
+ return -EBUSY;
+}
+
/*
* Find empty slot in the resource tree given range and alignment.
+ * This version allocates from the beginning of the root resource first.
*/
static int find_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
@@ -370,36 +476,43 @@ static int find_resource(struct resource *root, struct resource *new,
void *alignf_data)
{
struct resource *this = root->child;
- struct resource tmp = *new;
+ struct resource tmp = *new, avail, alloc;
tmp.start = root->start;
/*
- * Skip past an allocated resource that starts at 0, since the assignment
- * of this->start - 1 to tmp->end below would cause an underflow.
+ * Skip past an allocated resource that starts at 0, since the
+ * assignment of this->start - 1 to tmp->end below would cause an
+ * underflow.
*/
if (this && this->start == 0) {
tmp.start = this->end + 1;
this = this->sibling;
}
- for(;;) {
+ for (;;) {
if (this)
tmp.end = this->start - 1;
else
tmp.end = root->end;
- if (tmp.start < min)
- tmp.start = min;
- if (tmp.end > max)
- tmp.end = max;
- tmp.start = ALIGN(tmp.start, align);
- if (alignf)
- tmp.start = alignf(alignf_data, &tmp, size, align);
- if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
- new->start = tmp.start;
- new->end = tmp.start + size - 1;
- return 0;
+
+ resource_clip(&tmp, min, max);
+
+ /* Check for overflow after ALIGN() */
+ avail = *new;
+ avail.start = ALIGN(tmp.start, align);
+ avail.end = tmp.end;
+ if (avail.start >= tmp.start) {
+ alloc.start = alignf(alignf_data, &avail, size, align);
+ alloc.end = alloc.start + size - 1;
+ if (resource_contains(&avail, &alloc)) {
+ new->start = alloc.start;
+ new->end = alloc.end;
+ return 0;
+ }
}
+
if (!this)
break;
+
tmp.start = this->end + 1;
this = this->sibling;
}
@@ -428,8 +541,14 @@ int allocate_resource(struct resource *root, struct resource *new,
{
int err;
+ if (!alignf)
+ alignf = simple_align_resource;
+
write_lock(&resource_lock);
- err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
+ if (resource_alloc_from_bottom)
+ err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
+ else
+ err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data);
if (err >= 0 && __request_resource(root, new))
err = -EBUSY;
write_unlock(&resource_lock);
@@ -453,6 +572,8 @@ static struct resource * __insert_resource(struct resource *parent, struct resou
if (first == parent)
return first;
+ if (WARN_ON(first == new)) /* duplicated insertion */
+ return first;
if ((first->start > new->start) || (first->end < new->end))
break;
diff --git a/kernel/signal.c b/kernel/signal.c
index 919562c3d6b7..4e3cff10fdce 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1105,7 +1105,8 @@ int zap_other_threads(struct task_struct *p)
return count;
}
-struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
{
struct sighand_struct *sighand;
@@ -1617,6 +1618,8 @@ static int sigkill_pending(struct task_struct *tsk)
* is gone, we keep current->exit_code unless clear_code.
*/
static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
+ __releases(&current->sighand->siglock)
+ __acquires(&current->sighand->siglock)
{
if (arch_ptrace_stop_needed(exit_code, info)) {
/*
diff --git a/kernel/smp.c b/kernel/smp.c
index ed6aacfcb7ef..12ed8b013e2d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -267,7 +267,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
*
* Returns 0 on success, else a negative status code.
*/
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int wait)
{
struct call_single_data d = {
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(smp_call_function_single);
* 3) any other online cpu in @mask
*/
int smp_call_function_any(const struct cpumask *mask,
- void (*func)(void *info), void *info, int wait)
+ smp_call_func_t func, void *info, int wait)
{
unsigned int cpu;
const struct cpumask *nodemask;
@@ -416,7 +416,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
- void (*func)(void *), void *info, bool wait)
+ smp_call_func_t func, void *info, bool wait)
{
struct call_function_data *data;
unsigned long flags;
@@ -500,7 +500,7 @@ EXPORT_SYMBOL(smp_call_function_many);
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function(void (*func)(void *), void *info, int wait)
+int smp_call_function(smp_call_func_t func, void *info, int wait)
{
preempt_disable();
smp_call_function_many(cpu_online_mask, func, info, wait);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f02a9dfa19bc..18f4be0d5fe0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -229,18 +229,20 @@ restart:
do {
if (pending & 1) {
+ unsigned int vec_nr = h - softirq_vec;
int prev_count = preempt_count();
- kstat_incr_softirqs_this_cpu(h - softirq_vec);
- trace_softirq_entry(h, softirq_vec);
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
h->action(h);
- trace_softirq_exit(h, softirq_vec);
+ trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
- printk(KERN_ERR "huh, entered softirq %td %s %p"
+ printk(KERN_ERR "huh, entered softirq %u %s %p"
"with preempt_count %08x,"
- " exited with %08x?\n", h - softirq_vec,
- softirq_to_name[h - softirq_vec],
- h->action, prev_count, preempt_count());
+ " exited with %08x?\n", vec_nr,
+ softirq_to_name[vec_nr], h->action,
+ prev_count, preempt_count());
preempt_count() = prev_count;
}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 090c28812ce1..2df820b03beb 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -262,7 +262,7 @@ repeat:
cpu_stop_fn_t fn = work->fn;
void *arg = work->arg;
struct cpu_stop_done *done = work->done;
- char ksym_buf[KSYM_NAME_LEN];
+ char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
__set_current_state(TASK_RUNNING);
@@ -304,7 +304,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
p = kthread_create(cpu_stopper_thread, stopper, "migration/%d",
cpu);
if (IS_ERR(p))
- return NOTIFY_BAD;
+ return notifier_from_errno(PTR_ERR(p));
get_task_struct(p);
kthread_bind(p, cpu);
sched_set_stop_task(cpu, p);
@@ -372,7 +372,7 @@ static int __init cpu_stop_init(void)
/* start one for the boot cpu */
err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
bcpu);
- BUG_ON(err == NOTIFY_BAD);
+ BUG_ON(err != NOTIFY_OK);
cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
register_cpu_notifier(&cpu_stop_cpu_notifier);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3a45c224770f..c33a1edb799f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -161,8 +161,6 @@ extern int no_unaligned_warning;
extern int unaligned_dump_stack;
#endif
-extern struct ratelimit_state printk_ratelimit_state;
-
#ifdef CONFIG_PROC_SYSCTL
static int proc_do_cad_pid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -1340,28 +1338,28 @@ static struct ctl_table fs_table[] = {
.data = &inodes_stat,
.maxlen = 2*sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_nr_inodes,
},
{
.procname = "inode-state",
.data = &inodes_stat,
.maxlen = 7*sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_nr_inodes,
},
{
.procname = "file-nr",
.data = &files_stat,
- .maxlen = 3*sizeof(int),
+ .maxlen = sizeof(files_stat),
.mode = 0444,
.proc_handler = proc_nr_files,
},
{
.procname = "file-max",
.data = &files_stat.max_files,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(files_stat.max_files),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_doulongvec_minmax,
},
{
.procname = "nr_open",
@@ -1377,7 +1375,7 @@ static struct ctl_table fs_table[] = {
.data = &dentry_stat,
.maxlen = 6*sizeof(int),
.mode = 0444,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_nr_dentry,
},
{
.procname = "overflowuid",
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 11281d5792bd..c8231fb15708 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -175,22 +175,8 @@ static void send_cpu_listeners(struct sk_buff *skb,
up_write(&listeners->sem);
}
-static int fill_pid(pid_t pid, struct task_struct *tsk,
- struct taskstats *stats)
+static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
{
- int rc = 0;
-
- if (!tsk) {
- rcu_read_lock();
- tsk = find_task_by_vpid(pid);
- if (tsk)
- get_task_struct(tsk);
- rcu_read_unlock();
- if (!tsk)
- return -ESRCH;
- } else
- get_task_struct(tsk);
-
memset(stats, 0, sizeof(*stats));
/*
* Each accounting subsystem adds calls to its functions to
@@ -209,17 +195,27 @@ static int fill_pid(pid_t pid, struct task_struct *tsk,
/* fill in extended acct fields */
xacct_add_tsk(stats, tsk);
+}
- /* Define err: label here if needed */
- put_task_struct(tsk);
- return rc;
+static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
+{
+ struct task_struct *tsk;
+ rcu_read_lock();
+ tsk = find_task_by_vpid(pid);
+ if (tsk)
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ if (!tsk)
+ return -ESRCH;
+ fill_stats(tsk, stats);
+ put_task_struct(tsk);
+ return 0;
}
-static int fill_tgid(pid_t tgid, struct task_struct *first,
- struct taskstats *stats)
+static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
{
- struct task_struct *tsk;
+ struct task_struct *tsk, *first;
unsigned long flags;
int rc = -ESRCH;
@@ -228,8 +224,7 @@ static int fill_tgid(pid_t tgid, struct task_struct *first,
* leaders who are already counted with the dead tasks
*/
rcu_read_lock();
- if (!first)
- first = find_task_by_vpid(tgid);
+ first = find_task_by_vpid(tgid);
if (!first || !lock_task_sighand(first, &flags))
goto out;
@@ -268,7 +263,6 @@ out:
return rc;
}
-
static void fill_tgid_exit(struct task_struct *tsk)
{
unsigned long flags;
@@ -360,6 +354,12 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
struct nlattr *na, *ret;
int aggr;
+ /* If we don't pad, we end up with alignment on a 4 byte boundary.
+ * This causes lots of runtime warnings on systems requiring 8 byte
+ * alignment */
+ u32 pids[2] = { pid, 0 };
+ int pid_size = ALIGN(sizeof(pid), sizeof(long));
+
aggr = (type == TASKSTATS_TYPE_PID)
? TASKSTATS_TYPE_AGGR_PID
: TASKSTATS_TYPE_AGGR_TGID;
@@ -367,7 +367,7 @@ static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
na = nla_nest_start(skb, aggr);
if (!na)
goto err;
- if (nla_put(skb, type, sizeof(pid), &pid) < 0)
+ if (nla_put(skb, type, pid_size, pids) < 0)
goto err;
ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
if (!ret)
@@ -424,39 +424,46 @@ err:
return rc;
}
-static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+static int cmd_attr_register_cpumask(struct genl_info *info)
{
- int rc;
- struct sk_buff *rep_skb;
- struct taskstats *stats;
- size_t size;
cpumask_var_t mask;
+ int rc;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
-
rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
if (rc < 0)
- goto free_return_rc;
- if (rc == 0) {
- rc = add_del_listener(info->snd_pid, mask, REGISTER);
- goto free_return_rc;
- }
+ goto out;
+ rc = add_del_listener(info->snd_pid, mask, REGISTER);
+out:
+ free_cpumask_var(mask);
+ return rc;
+}
+
+static int cmd_attr_deregister_cpumask(struct genl_info *info)
+{
+ cpumask_var_t mask;
+ int rc;
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
if (rc < 0)
- goto free_return_rc;
- if (rc == 0) {
- rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
-free_return_rc:
- free_cpumask_var(mask);
- return rc;
- }
+ goto out;
+ rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
+out:
free_cpumask_var(mask);
+ return rc;
+}
+
+static int cmd_attr_pid(struct genl_info *info)
+{
+ struct taskstats *stats;
+ struct sk_buff *rep_skb;
+ size_t size;
+ u32 pid;
+ int rc;
- /*
- * Size includes space for nested attributes
- */
size = nla_total_size(sizeof(u32)) +
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
@@ -465,33 +472,64 @@ free_return_rc:
return rc;
rc = -EINVAL;
- if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
- u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
- stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
- if (!stats)
- goto err;
-
- rc = fill_pid(pid, NULL, stats);
- if (rc < 0)
- goto err;
- } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
- u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
- stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
- if (!stats)
- goto err;
-
- rc = fill_tgid(tgid, NULL, stats);
- if (rc < 0)
- goto err;
- } else
+ pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
+ stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
+ if (!stats)
+ goto err;
+
+ rc = fill_stats_for_pid(pid, stats);
+ if (rc < 0)
+ goto err;
+ return send_reply(rep_skb, info);
+err:
+ nlmsg_free(rep_skb);
+ return rc;
+}
+
+static int cmd_attr_tgid(struct genl_info *info)
+{
+ struct taskstats *stats;
+ struct sk_buff *rep_skb;
+ size_t size;
+ u32 tgid;
+ int rc;
+
+ size = nla_total_size(sizeof(u32)) +
+ nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
+
+ rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
+ if (rc < 0)
+ return rc;
+
+ rc = -EINVAL;
+ tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
+ stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
+ if (!stats)
goto err;
+ rc = fill_stats_for_tgid(tgid, stats);
+ if (rc < 0)
+ goto err;
return send_reply(rep_skb, info);
err:
nlmsg_free(rep_skb);
return rc;
}
+static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
+ return cmd_attr_register_cpumask(info);
+ else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
+ return cmd_attr_deregister_cpumask(info);
+ else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
+ return cmd_attr_pid(info);
+ else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
+ return cmd_attr_tgid(info);
+ else
+ return -EINVAL;
+}
+
static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
@@ -555,9 +593,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
if (!stats)
goto err;
- rc = fill_pid(-1, tsk, stats);
- if (rc < 0)
- goto err;
+ fill_stats(tsk, stats);
/*
* Doesn't matter if tsk is the leader or the last group member leaving
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c3dab054d18e..9ed509a015d8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -224,6 +224,9 @@ enum {
RB_LEN_TIME_STAMP = 16,
};
+#define skip_time_extend(event) \
+ ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
+
static inline int rb_null_event(struct ring_buffer_event *event)
{
return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
@@ -248,8 +251,12 @@ rb_event_data_length(struct ring_buffer_event *event)
return length + RB_EVNT_HDR_SIZE;
}
-/* inline for ring buffer fast paths */
-static unsigned
+/*
+ * Return the length of the given event. Will return
+ * the length of the time extend if the event is a
+ * time extend.
+ */
+static inline unsigned
rb_event_length(struct ring_buffer_event *event)
{
switch (event->type_len) {
@@ -274,13 +281,41 @@ rb_event_length(struct ring_buffer_event *event)
return 0;
}
+/*
+ * Return total length of time extend and data,
+ * or just the event length for all other events.
+ */
+static inline unsigned
+rb_event_ts_length(struct ring_buffer_event *event)
+{
+ unsigned len = 0;
+
+ if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+ /* time extends include the data event after it */
+ len = RB_LEN_TIME_EXTEND;
+ event = skip_time_extend(event);
+ }
+ return len + rb_event_length(event);
+}
+
/**
* ring_buffer_event_length - return the length of the event
* @event: the event to get the length of
+ *
+ * Returns the size of the data load of a data event.
+ * If the event is something other than a data event, it
+ * returns the size of the event itself. With the exception
+ * of a TIME EXTEND, where it still returns the size of the
+ * data load of the data event after it.
*/
unsigned ring_buffer_event_length(struct ring_buffer_event *event)
{
- unsigned length = rb_event_length(event);
+ unsigned length;
+
+ if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+ event = skip_time_extend(event);
+
+ length = rb_event_length(event);
if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
return length;
length -= RB_EVNT_HDR_SIZE;
@@ -294,6 +329,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_length);
static void *
rb_event_data(struct ring_buffer_event *event)
{
+ if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+ event = skip_time_extend(event);
BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
/* If length is in len field, then array[0] has the data */
if (event->type_len)
@@ -404,9 +441,6 @@ static inline int test_time_stamp(u64 delta)
/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
-/* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
-
int ring_buffer_print_page_header(struct trace_seq *s)
{
struct buffer_data_page field;
@@ -1546,6 +1580,25 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
iter->head = 0;
}
+/* Slow path, do not inline */
+static noinline struct ring_buffer_event *
+rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
+{
+ event->type_len = RINGBUF_TYPE_TIME_EXTEND;
+
+ /* Not the first event on the page? */
+ if (rb_event_index(event)) {
+ event->time_delta = delta & TS_MASK;
+ event->array[0] = delta >> TS_SHIFT;
+ } else {
+ /* nope, just zero it */
+ event->time_delta = 0;
+ event->array[0] = 0;
+ }
+
+ return skip_time_extend(event);
+}
+
/**
* ring_buffer_update_event - update event type and data
* @event: the even to update
@@ -1558,28 +1611,31 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
* data field.
*/
static void
-rb_update_event(struct ring_buffer_event *event,
- unsigned type, unsigned length)
+rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event *event, unsigned length,
+ int add_timestamp, u64 delta)
{
- event->type_len = type;
-
- switch (type) {
-
- case RINGBUF_TYPE_PADDING:
- case RINGBUF_TYPE_TIME_EXTEND:
- case RINGBUF_TYPE_TIME_STAMP:
- break;
+ /* Only a commit updates the timestamp */
+ if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
+ delta = 0;
- case 0:
- length -= RB_EVNT_HDR_SIZE;
- if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
- event->array[0] = length;
- else
- event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
- break;
- default:
- BUG();
+ /*
+ * If we need to add a timestamp, then we
+ * add it to the start of the resevered space.
+ */
+ if (unlikely(add_timestamp)) {
+ event = rb_add_time_stamp(event, delta);
+ length -= RB_LEN_TIME_EXTEND;
+ delta = 0;
}
+
+ event->time_delta = delta;
+ length -= RB_EVNT_HDR_SIZE;
+ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
+ event->type_len = 0;
+ event->array[0] = length;
+ } else
+ event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
}
/*
@@ -1823,10 +1879,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
local_sub(length, &tail_page->write);
}
-static struct ring_buffer_event *
+/*
+ * This is the slow path, force gcc not to inline it.
+ */
+static noinline struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length, unsigned long tail,
- struct buffer_page *tail_page, u64 *ts)
+ struct buffer_page *tail_page, u64 ts)
{
struct buffer_page *commit_page = cpu_buffer->commit_page;
struct ring_buffer *buffer = cpu_buffer->buffer;
@@ -1909,8 +1968,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
- *ts = rb_time_stamp(buffer);
- next_page->page->time_stamp = *ts;
+ ts = rb_time_stamp(buffer);
+ next_page->page->time_stamp = ts;
}
out_again:
@@ -1929,12 +1988,21 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
- unsigned type, unsigned long length, u64 *ts)
+ unsigned long length, u64 ts,
+ u64 delta, int add_timestamp)
{
struct buffer_page *tail_page;
struct ring_buffer_event *event;
unsigned long tail, write;
+ /*
+ * If the time delta since the last event is too big to
+ * hold in the time field of the event, then we append a
+ * TIME EXTEND event ahead of the data event.
+ */
+ if (unlikely(add_timestamp))
+ length += RB_LEN_TIME_EXTEND;
+
tail_page = cpu_buffer->tail_page;
write = local_add_return(length, &tail_page->write);
@@ -1943,7 +2011,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
tail = write - length;
/* See if we shot pass the end of this buffer page */
- if (write > BUF_PAGE_SIZE)
+ if (unlikely(write > BUF_PAGE_SIZE))
return rb_move_tail(cpu_buffer, length, tail,
tail_page, ts);
@@ -1951,18 +2019,16 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
event = __rb_page_index(tail_page, tail);
kmemcheck_annotate_bitfield(event, bitfield);
- rb_update_event(event, type, length);
+ rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
- /* The passed in type is zero for DATA */
- if (likely(!type))
- local_inc(&tail_page->entries);
+ local_inc(&tail_page->entries);
/*
* If this is the first commit on the page, then update
* its timestamp.
*/
if (!tail)
- tail_page->page->time_stamp = *ts;
+ tail_page->page->time_stamp = ts;
return event;
}
@@ -1977,7 +2043,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long addr;
new_index = rb_event_index(event);
- old_index = new_index + rb_event_length(event);
+ old_index = new_index + rb_event_ts_length(event);
addr = (unsigned long)event;
addr &= PAGE_MASK;
@@ -2003,76 +2069,13 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
return 0;
}
-static int
-rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
- u64 *ts, u64 *delta)
-{
- struct ring_buffer_event *event;
- int ret;
-
- WARN_ONCE(*delta > (1ULL << 59),
- KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
- (unsigned long long)*delta,
- (unsigned long long)*ts,
- (unsigned long long)cpu_buffer->write_stamp);
-
- /*
- * The delta is too big, we to add a
- * new timestamp.
- */
- event = __rb_reserve_next(cpu_buffer,
- RINGBUF_TYPE_TIME_EXTEND,
- RB_LEN_TIME_EXTEND,
- ts);
- if (!event)
- return -EBUSY;
-
- if (PTR_ERR(event) == -EAGAIN)
- return -EAGAIN;
-
- /* Only a commited time event can update the write stamp */
- if (rb_event_is_commit(cpu_buffer, event)) {
- /*
- * If this is the first on the page, then it was
- * updated with the page itself. Try to discard it
- * and if we can't just make it zero.
- */
- if (rb_event_index(event)) {
- event->time_delta = *delta & TS_MASK;
- event->array[0] = *delta >> TS_SHIFT;
- } else {
- /* try to discard, since we do not need this */
- if (!rb_try_to_discard(cpu_buffer, event)) {
- /* nope, just zero it */
- event->time_delta = 0;
- event->array[0] = 0;
- }
- }
- cpu_buffer->write_stamp = *ts;
- /* let the caller know this was the commit */
- ret = 1;
- } else {
- /* Try to discard the event */
- if (!rb_try_to_discard(cpu_buffer, event)) {
- /* Darn, this is just wasted space */
- event->time_delta = 0;
- event->array[0] = 0;
- }
- ret = 0;
- }
-
- *delta = 0;
-
- return ret;
-}
-
static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
local_inc(&cpu_buffer->committing);
local_inc(&cpu_buffer->commits);
}
-static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned long commits;
@@ -2110,9 +2113,10 @@ rb_reserve_next_event(struct ring_buffer *buffer,
unsigned long length)
{
struct ring_buffer_event *event;
- u64 ts, delta = 0;
- int commit = 0;
+ u64 ts, delta;
int nr_loops = 0;
+ int add_timestamp;
+ u64 diff;
rb_start_commit(cpu_buffer);
@@ -2133,6 +2137,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,
length = rb_calculate_event_length(length);
again:
+ add_timestamp = 0;
+ delta = 0;
+
/*
* We allow for interrupts to reenter here and do a trace.
* If one does, it will cause this original code to loop
@@ -2146,56 +2153,32 @@ rb_reserve_next_event(struct ring_buffer *buffer,
goto out_fail;
ts = rb_time_stamp(cpu_buffer->buffer);
+ diff = ts - cpu_buffer->write_stamp;
- /*
- * Only the first commit can update the timestamp.
- * Yes there is a race here. If an interrupt comes in
- * just after the conditional and it traces too, then it
- * will also check the deltas. More than one timestamp may
- * also be made. But only the entry that did the actual
- * commit will be something other than zero.
- */
- if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
- rb_page_write(cpu_buffer->tail_page) ==
- rb_commit_index(cpu_buffer))) {
- u64 diff;
-
- diff = ts - cpu_buffer->write_stamp;
-
- /* make sure this diff is calculated here */
- barrier();
-
- /* Did the write stamp get updated already? */
- if (unlikely(ts < cpu_buffer->write_stamp))
- goto get_event;
+ /* make sure this diff is calculated here */
+ barrier();
+ /* Did the write stamp get updated already? */
+ if (likely(ts >= cpu_buffer->write_stamp)) {
delta = diff;
if (unlikely(test_time_stamp(delta))) {
-
- commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
- if (commit == -EBUSY)
- goto out_fail;
-
- if (commit == -EAGAIN)
- goto again;
-
- RB_WARN_ON(cpu_buffer, commit < 0);
+ WARN_ONCE(delta > (1ULL << 59),
+ KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
+ (unsigned long long)delta,
+ (unsigned long long)ts,
+ (unsigned long long)cpu_buffer->write_stamp);
+ add_timestamp = 1;
}
}
- get_event:
- event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
+ event = __rb_reserve_next(cpu_buffer, length, ts,
+ delta, add_timestamp);
if (unlikely(PTR_ERR(event) == -EAGAIN))
goto again;
if (!event)
goto out_fail;
- if (!rb_event_is_commit(cpu_buffer, event))
- delta = 0;
-
- event->time_delta = delta;
-
return event;
out_fail:
@@ -2207,13 +2190,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,
#define TRACE_RECURSIVE_DEPTH 16
-static int trace_recursive_lock(void)
+/* Keep this code out of the fast path cache */
+static noinline void trace_recursive_fail(void)
{
- current->trace_recursion++;
-
- if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
- return 0;
-
/* Disable all tracing before we do anything else */
tracing_off_permanent();
@@ -2225,10 +2204,21 @@ static int trace_recursive_lock(void)
in_nmi());
WARN_ON_ONCE(1);
+}
+
+static inline int trace_recursive_lock(void)
+{
+ current->trace_recursion++;
+
+ if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
+ return 0;
+
+ trace_recursive_fail();
+
return -1;
}
-static void trace_recursive_unlock(void)
+static inline void trace_recursive_unlock(void)
{
WARN_ON_ONCE(!current->trace_recursion);
@@ -2308,12 +2298,28 @@ static void
rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
+ u64 delta;
+
/*
* The event first in the commit queue updates the
* time stamp.
*/
- if (rb_event_is_commit(cpu_buffer, event))
- cpu_buffer->write_stamp += event->time_delta;
+ if (rb_event_is_commit(cpu_buffer, event)) {
+ /*
+ * A commit event that is first on a page
+ * updates the write timestamp with the page stamp
+ */
+ if (!rb_event_index(event))
+ cpu_buffer->write_stamp =
+ cpu_buffer->commit_page->page->time_stamp;
+ else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
+ delta = event->array[0];
+ delta <<= TS_SHIFT;
+ delta += event->time_delta;
+ cpu_buffer->write_stamp += delta;
+ } else
+ cpu_buffer->write_stamp += event->time_delta;
+ }
}
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
@@ -2353,6 +2359,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
static inline void rb_event_discard(struct ring_buffer_event *event)
{
+ if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
+ event = skip_time_extend(event);
+
/* array[0] holds the actual length for the discarded event */
event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
event->type_len = RINGBUF_TYPE_PADDING;
@@ -3049,12 +3058,12 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
again:
/*
- * We repeat when a timestamp is encountered. It is possible
- * to get multiple timestamps from an interrupt entering just
- * as one timestamp is about to be written, or from discarded
- * commits. The most that we can have is the number on a single page.
+ * We repeat when a time extend is encountered.
+ * Since the time extend is always attached to a data event,
+ * we should never loop more than once.
+ * (We never hit the following condition more than twice).
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
+ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
return NULL;
reader = rb_get_reader_page(cpu_buffer);
@@ -3130,14 +3139,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return NULL;
/*
- * We repeat when a timestamp is encountered.
- * We can get multiple timestamps by nested interrupts or also
- * if filtering is on (discarding commits). Since discarding
- * commits can be frequent we can get a lot of timestamps.
- * But we limit them by not adding timestamps if they begin
- * at the start of a page.
+ * We repeat when a time extend is encountered.
+ * Since the time extend is always attached to a data event,
+ * we should never loop more than once.
+ * (We never hit the following condition more than twice).
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
+ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
return NULL;
if (rb_per_cpu_empty(cpu_buffer))
@@ -3835,7 +3842,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if (len > (commit - read))
len = (commit - read);
- size = rb_event_length(event);
+ /* Always keep the time extend and data together */
+ size = rb_event_ts_length(event);
if (len < size)
goto out_unlock;
@@ -3857,7 +3865,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
break;
event = rb_reader_event(cpu_buffer);
- size = rb_event_length(event);
+ /* Always keep the time extend and data together */
+ size = rb_event_ts_length(event);
} while (len > size);
/* update bpage */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 001bcd2ccf4a..82d9b8106cd0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3996,13 +3996,9 @@ static void tracing_init_debugfs_percpu(long cpu)
{
struct dentry *d_percpu = tracing_dentry_percpu();
struct dentry *d_cpu;
- /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
- char cpu_dir[7];
+ char cpu_dir[30]; /* 30 characters should be more than enough */
- if (cpu > 999 || cpu < 0)
- return;
-
- sprintf(cpu_dir, "cpu%ld", cpu);
+ snprintf(cpu_dir, 30, "cpu%ld", cpu);
d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
if (!d_cpu) {
pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b8d2852baa4a..2dec9bcde8b4 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -31,7 +31,6 @@
#include <linux/perf_event.h>
#include <linux/stringify.h>
#include <linux/limits.h>
-#include <linux/uaccess.h>
#include <asm/bitsperlong.h>
#include "trace.h"
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 0a67e041edf8..24dc60d9fa1f 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -63,12 +63,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
stats->ac_ppid = pid_alive(tsk) ?
rcu_dereference(tsk->real_parent)->tgid : 0;
rcu_read_unlock();
- stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
- stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
- stats->ac_utimescaled =
- cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
- stats->ac_stimescaled =
- cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
+ stats->ac_utime = cputime_to_usecs(tsk->utime);
+ stats->ac_stime = cputime_to_usecs(tsk->stime);
+ stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled);
+ stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled);
stats->ac_minflt = tsk->min_flt;
stats->ac_majflt = tsk->maj_flt;
diff --git a/kernel/user.c b/kernel/user.c
index 7e72614b736d..2c7d8d5914b1 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -91,6 +91,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
* upon function exit.
*/
static void free_user(struct user_struct *up, unsigned long flags)
+ __releases(&uidhash_lock)
{
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
diff --git a/kernel/wait.c b/kernel/wait.c
index c4bd3d825f35..b0310eb6cc1e 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -92,7 +92,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
-/*
+/**
* finish_wait - clean up after waiting in a queue
* @q: waitqueue waited on
* @wait: wait descriptor
@@ -127,11 +127,11 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
}
EXPORT_SYMBOL(finish_wait);
-/*
+/**
* abort_exclusive_wait - abort exclusive waiting in a queue
* @q: waitqueue waited on
* @wait: wait descriptor
- * @state: runstate of the waiter to be woken
+ * @mode: runstate of the waiter to be woken
* @key: key to identify a wait bit queue or %NULL
*
* Sets current thread back to running state and removes
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e5ff2cbaadc2..90db1bd1a978 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2064,7 +2064,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
* checks and call back into the fixup functions where we
* might deadlock.
*/
- INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
+ INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
init_completion(&barr->done);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 69a32664c289..28b42b9274d0 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -317,6 +317,14 @@ config DEBUG_OBJECTS_RCU_HEAD
help
Enable this to turn on debugging of RCU list heads (call_rcu() usage).
+config DEBUG_OBJECTS_PERCPU_COUNTER
+ bool "Debug percpu counter objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ percpu counter routines to track the life time of percpu counter
+ objects and validate the percpu counter operations.
+
config DEBUG_OBJECTS_ENABLE_DEFAULT
int "debug_objects bootup default value (0-1)"
range 0 1
@@ -366,7 +374,7 @@ config SLUB_STATS
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
- (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
+ (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
@@ -740,6 +748,15 @@ config DEBUG_LIST
If unsure, say N.
+config TEST_LIST_SORT
+ bool "Linked list sorting test"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on 'list_sort()' function test. This test is
+ executed only once during system boot, so affects only boot time.
+
+ If unsure, say N.
+
config DEBUG_SG
bool "Debug SG table operations"
depends on DEBUG_KERNEL
@@ -1200,6 +1217,19 @@ config ATOMIC64_SELFTEST
If unsure, say N.
+config ASYNC_RAID6_TEST
+ tristate "Self test for hardware accelerated raid6 recovery"
+ depends on ASYNC_RAID6_RECOV
+ select ASYNC_MEMCPY
+ ---help---
+ This is a one-shot self test that permutes through the
+ recovery of all the possible two disk failure scenarios for a
+ N-disk array. Recovery is performed with the asynchronous
+ raid6 recovery routines, and will optionally use an offload
+ engine if one is available.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ffb78c916ccd..741fae905ae3 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -359,7 +359,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
-#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
@@ -466,7 +465,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
return -EOVERFLOW;
- chunk = (chunk << 4) | unhex(c);
+ chunk = (chunk << 4) | hex_to_bin(c);
ndigits++; totaldigits++;
}
if (ndigits == 0)
diff --git a/lib/div64.c b/lib/div64.c
index a111eb8de9cf..5b4919191778 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
EXPORT_SYMBOL(div_s64_rem);
#endif
-/* 64bit divisor, dividend and result. dynamic precision */
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ *
+ * This implementation is a modified version of the algorithm proposed
+ * by the book 'Hacker's Delight'. The original source and full proof
+ * can be found here and is available for use without restriction.
+ *
+ * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
+ */
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{
- u32 high, d;
+ u32 high = divisor >> 32;
+ u64 quot;
- high = divisor >> 32;
- if (high) {
- unsigned int shift = fls(high);
+ if (high == 0) {
+ quot = div_u64(dividend, divisor);
+ } else {
+ int n = 1 + fls(high);
+ quot = div_u64(dividend >> n, divisor >> n);
- d = divisor >> shift;
- dividend >>= shift;
- } else
- d = divisor;
+ if (quot != 0)
+ quot--;
+ if ((dividend - quot * divisor) >= divisor)
+ quot++;
+ }
- return div_u64(dividend, d);
+ return quot;
}
EXPORT_SYMBOL(div64_u64);
#endif
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ */
+#ifndef div64_s64
+s64 div64_s64(s64 dividend, s64 divisor)
+{
+ s64 quot, t;
+
+ quot = div64_u64(abs64(dividend), abs64(divisor));
+ t = (dividend ^ divisor) >> 63;
+
+ return (quot ^ t) - t;
+}
+EXPORT_SYMBOL(div64_s64);
+#endif
+
#endif /* BITS_PER_LONG == 32 */
/*
diff --git a/lib/idr.c b/lib/idr.c
index 5e0966be0f7c..e15502e8b21e 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,16 +106,17 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
/**
- * idr_pre_get - reserver resources for idr allocation
+ * idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
- * This function should be called prior to locking and calling the
- * idr_get_new* functions. It preallocates enough memory to satisfy
- * the worst possible allocation.
+ * This function should be called prior to calling the idr_get_new* functions.
+ * It preallocates enough memory to satisfy the worst possible allocation. The
+ * caller should pass in GFP_KERNEL if possible. This of course requires that
+ * no spinning locks be held.
*
- * If the system is REALLY out of memory this function returns 0,
- * otherwise 1.
+ * If the system is REALLY out of memory this function returns %0,
+ * otherwise %1.
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
@@ -290,11 +291,13 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
* This is the allocate id function. It should be called with any
* required locks.
*
- * If memory is required, it will return -EAGAIN, you should unlock
- * and go back to the idr_pre_get() call. If the idr is full, it will
- * return -ENOSPC.
+ * If allocation from IDR's private freelist fails, idr_get_new_above() will
+ * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
+ * IDR's preallocation and then retry the idr_get_new_above() call.
*
- * @id returns a value in the range @starting_id ... 0x7fffffff
+ * If the idr is full idr_get_new_above() will return %-ENOSPC.
+ *
+ * @id returns a value in the range @starting_id ... %0x7fffffff
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
@@ -318,14 +321,13 @@ EXPORT_SYMBOL(idr_get_new_above);
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
- * This is the allocate id function. It should be called with any
- * required locks.
+ * If allocation from IDR's private freelist fails, idr_get_new_above() will
+ * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
+ * IDR's preallocation and then retry the idr_get_new_above() call.
*
- * If memory is required, it will return -EAGAIN, you should unlock
- * and go back to the idr_pre_get() call. If the idr is full, it will
- * return -ENOSPC.
+ * If the idr is full idr_get_new_above() will return %-ENOSPC.
*
- * @id returns a value in the range 0 ... 0x7fffffff
+ * @id returns a value in the range %0 ... %0x7fffffff
*/
int idr_get_new(struct idr *idp, void *ptr, int *id)
{
@@ -388,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
}
/**
- * idr_remove - remove the given id and free it's slot
+ * idr_remove - remove the given id and free its slot
* @idp: idr handle
* @id: unique key
*/
@@ -437,7 +439,7 @@ EXPORT_SYMBOL(idr_remove);
* function will remove all id mappings and leave all idp_layers
* unused.
*
- * A typical clean-up sequence for objects stored in an idr tree, will
+ * A typical clean-up sequence for objects stored in an idr tree will
* use idr_for_each() to free all objects, if necessay, then
* idr_remove_all() to remove all ids, and idr_destroy() to free
* up the cached idr_layers.
@@ -542,7 +544,7 @@ EXPORT_SYMBOL(idr_find);
* not allowed.
*
* We check the return of @fn each time. If it returns anything other
- * than 0, we break out and return that value.
+ * than %0, we break out and return that value.
*
* The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
*/
@@ -637,8 +639,8 @@ EXPORT_SYMBOL(idr_get_next);
* @id: lookup key
*
* Replace the pointer registered with an id and return the old value.
- * A -ENOENT return indicates that @id was not found.
- * A -EINVAL return indicates that @id was not within valid constraints.
+ * A %-ENOENT return indicates that @id was not found.
+ * A %-EINVAL return indicates that @id was not within valid constraints.
*
* The caller must serialize with writers.
*/
@@ -696,10 +698,11 @@ void idr_init(struct idr *idp)
EXPORT_SYMBOL(idr_init);
-/*
+/**
+ * DOC: IDA description
* IDA - IDR based ID allocator
*
- * this is id allocator without id -> pointer translation. Memory
+ * This is id allocator without id -> pointer translation. Memory
* usage is much lower than full blown idr because each id only
* occupies a bit. ida uses a custom leaf node which contains
* IDA_BITMAP_BITS slots.
@@ -732,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
* following function. It preallocates enough memory to satisfy the
* worst possible allocation.
*
- * If the system is REALLY out of memory this function returns 0,
- * otherwise 1.
+ * If the system is REALLY out of memory this function returns %0,
+ * otherwise %1.
*/
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
{
@@ -765,11 +768,11 @@ EXPORT_SYMBOL(ida_pre_get);
* Allocate new ID above or equal to @ida. It should be called with
* any required locks.
*
- * If memory is required, it will return -EAGAIN, you should unlock
+ * If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will
- * return -ENOSPC.
+ * return %-ENOSPC.
*
- * @p_id returns a value in the range @starting_id ... 0x7fffffff.
+ * @p_id returns a value in the range @starting_id ... %0x7fffffff.
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
@@ -851,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above);
*
* Allocate new ID. It should be called with any required locks.
*
- * If memory is required, it will return -EAGAIN, you should unlock
+ * If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it will
- * return -ENOSPC.
+ * return %-ENOSPC.
*
- * @id returns a value in the range 0 ... 0x7fffffff.
+ * @id returns a value in the range %0 ... %0x7fffffff.
*/
int ida_get_new(struct ida *ida, int *p_id)
{
diff --git a/lib/list_sort.c b/lib/list_sort.c
index a7616fa3162e..d7325c6b103f 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -141,77 +141,151 @@ void list_sort(void *priv, struct list_head *head,
}
EXPORT_SYMBOL(list_sort);
-#ifdef DEBUG_LIST_SORT
+#ifdef CONFIG_TEST_LIST_SORT
+
+#include <linux/random.h>
+
+/*
+ * The pattern of set bits in the list length determines which cases
+ * are hit in list_sort().
+ */
+#define TEST_LIST_LEN (512+128+2) /* not including head */
+
+#define TEST_POISON1 0xDEADBEEF
+#define TEST_POISON2 0xA324354C
+
struct debug_el {
- struct list_head l_h;
+ unsigned int poison1;
+ struct list_head list;
+ unsigned int poison2;
int value;
unsigned serial;
};
-static int cmp(void *priv, struct list_head *a, struct list_head *b)
+/* Array, containing pointers to all elements in the test list */
+static struct debug_el **elts __initdata;
+
+static int __init check(struct debug_el *ela, struct debug_el *elb)
{
- return container_of(a, struct debug_el, l_h)->value
- - container_of(b, struct debug_el, l_h)->value;
+ if (ela->serial >= TEST_LIST_LEN) {
+ printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
+ ela->serial);
+ return -EINVAL;
+ }
+ if (elb->serial >= TEST_LIST_LEN) {
+ printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
+ elb->serial);
+ return -EINVAL;
+ }
+ if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
+ printk(KERN_ERR "list_sort_test: error: phantom element\n");
+ return -EINVAL;
+ }
+ if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
+ printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
+ ela->poison1, ela->poison2);
+ return -EINVAL;
+ }
+ if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
+ printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
+ elb->poison1, elb->poison2);
+ return -EINVAL;
+ }
+ return 0;
}
-/*
- * The pattern of set bits in the list length determines which cases
- * are hit in list_sort().
- */
-#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */
+static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct debug_el *ela, *elb;
+
+ ela = container_of(a, struct debug_el, list);
+ elb = container_of(b, struct debug_el, list);
+
+ check(ela, elb);
+ return ela->value - elb->value;
+}
static int __init list_sort_test(void)
{
- int i, r = 1, count;
- struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL);
- struct list_head *cur;
+ int i, count = 1, err = -EINVAL;
+ struct debug_el *el;
+ struct list_head *cur, *tmp;
+ LIST_HEAD(head);
+
+ printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
- printk(KERN_WARNING "testing list_sort()\n");
+ elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
+ if (!elts) {
+ printk(KERN_ERR "list_sort_test: error: cannot allocate "
+ "memory\n");
+ goto exit;
+ }
- cur = head;
- for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) {
- struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL);
- BUG_ON(!el);
+ for (i = 0; i < TEST_LIST_LEN; i++) {
+ el = kmalloc(sizeof(*el), GFP_KERNEL);
+ if (!el) {
+ printk(KERN_ERR "list_sort_test: error: cannot "
+ "allocate memory\n");
+ goto exit;
+ }
/* force some equivalencies */
- el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3);
+ el->value = random32() % (TEST_LIST_LEN/3);
el->serial = i;
-
- el->l_h.prev = cur;
- cur->next = &el->l_h;
- cur = cur->next;
+ el->poison1 = TEST_POISON1;
+ el->poison2 = TEST_POISON2;
+ elts[i] = el;
+ list_add_tail(&el->list, &head);
}
- head->prev = cur;
- list_sort(NULL, head, cmp);
+ list_sort(NULL, &head, cmp);
+
+ for (cur = head.next; cur->next != &head; cur = cur->next) {
+ struct debug_el *el1;
+ int cmp_result;
- count = 1;
- for (cur = head->next; cur->next != head; cur = cur->next) {
- struct debug_el *el = container_of(cur, struct debug_el, l_h);
- int cmp_result = cmp(NULL, cur, cur->next);
if (cur->next->prev != cur) {
- printk(KERN_EMERG "list_sort() returned "
- "a corrupted list!\n");
- return 1;
- } else if (cmp_result > 0) {
- printk(KERN_EMERG "list_sort() failed to sort!\n");
- return 1;
- } else if (cmp_result == 0 &&
- el->serial >= container_of(cur->next,
- struct debug_el, l_h)->serial) {
- printk(KERN_EMERG "list_sort() failed to preserve order"
- " of equivalent elements!\n");
- return 1;
+ printk(KERN_ERR "list_sort_test: error: list is "
+ "corrupted\n");
+ goto exit;
+ }
+
+ cmp_result = cmp(NULL, cur, cur->next);
+ if (cmp_result > 0) {
+ printk(KERN_ERR "list_sort_test: error: list is not "
+ "sorted\n");
+ goto exit;
+ }
+
+ el = container_of(cur, struct debug_el, list);
+ el1 = container_of(cur->next, struct debug_el, list);
+ if (cmp_result == 0 && el->serial >= el1->serial) {
+ printk(KERN_ERR "list_sort_test: error: order of "
+ "equivalent elements not preserved\n");
+ goto exit;
+ }
+
+ if (check(el, el1)) {
+ printk(KERN_ERR "list_sort_test: error: element check "
+ "failed\n");
+ goto exit;
}
- kfree(cur->prev);
count++;
}
- kfree(cur);
- if (count != LIST_SORT_TEST_LENGTH) {
- printk(KERN_EMERG "list_sort() returned list of"
- "different length!\n");
- return 1;
+
+ if (count != TEST_LIST_LEN) {
+ printk(KERN_ERR "list_sort_test: error: bad list length %d",
+ count);
+ goto exit;
}
- return 0;
+
+ err = 0;
+exit:
+ kfree(elts);
+ list_for_each_safe(cur, tmp, &head) {
+ list_del(cur);
+ kfree(container_of(cur, struct debug_el, list));
+ }
+ return err;
}
module_init(list_sort_test);
-#endif
+#endif /* CONFIG_TEST_LIST_SORT */
diff --git a/lib/parser.c b/lib/parser.c
index fb34977246bb..6e89eca5cca0 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -128,12 +128,13 @@ static int match_number(substring_t *s, int *result, int base)
char *endp;
char *buf;
int ret;
+ size_t len = s->to - s->from;
- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
+ buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, s->to - s->from);
- buf[s->to - s->from] = '\0';
+ memcpy(buf, s->from, len);
+ buf[len] = '\0';
*result = simple_strtol(buf, &endp, base);
ret = 0;
if (endp == buf)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ec9048e74f44..604678d7d06d 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -8,10 +8,53 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/debugobjects.h>
static LIST_HEAD(percpu_counters);
static DEFINE_MUTEX(percpu_counters_lock);
+#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
+
+static struct debug_obj_descr percpu_counter_debug_descr;
+
+static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct percpu_counter *fbc = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ percpu_counter_destroy(fbc);
+ debug_object_free(fbc, &percpu_counter_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static struct debug_obj_descr percpu_counter_debug_descr = {
+ .name = "percpu_counter",
+ .fixup_free = percpu_counter_fixup_free,
+};
+
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{
+ debug_object_init(fbc, &percpu_counter_debug_descr);
+ debug_object_activate(fbc, &percpu_counter_debug_descr);
+}
+
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{
+ debug_object_deactivate(fbc, &percpu_counter_debug_descr);
+ debug_object_free(fbc, &percpu_counter_debug_descr);
+}
+
+#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{ }
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{ }
+#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
@@ -30,9 +73,9 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
s32 *pcount;
- int cpu = get_cpu();
- pcount = per_cpu_ptr(fbc->counters, cpu);
+ preempt_disable();
+ pcount = this_cpu_ptr(fbc->counters);
count = *pcount + amount;
if (count >= batch || count <= -batch) {
spin_lock(&fbc->lock);
@@ -42,7 +85,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
} else {
*pcount = count;
}
- put_cpu();
+ preempt_enable();
}
EXPORT_SYMBOL(__percpu_counter_add);
@@ -75,7 +118,11 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
fbc->counters = alloc_percpu(s32);
if (!fbc->counters)
return -ENOMEM;
+
+ debug_percpu_counter_activate(fbc);
+
#ifdef CONFIG_HOTPLUG_CPU
+ INIT_LIST_HEAD(&fbc->list);
mutex_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
mutex_unlock(&percpu_counters_lock);
@@ -89,6 +136,8 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
if (!fbc->counters)
return;
+ debug_percpu_counter_deactivate(fbc);
+
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
list_del(&fbc->list);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7af9d841c43b..c150d3dafff4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -988,8 +988,15 @@ static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
- if (!ptr)
+ if (!ptr) {
+ /*
+ * Print (null) with the same width as a pointer so it makes
+ * tabular output look nice.
+ */
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
return string(buf, end, "(null)", spec);
+ }
switch (*fmt) {
case 'F':
@@ -1031,7 +1038,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
- spec.field_width = 2*sizeof(void *);
+ spec.field_width = 2 * sizeof(void *);
spec.flags |= ZEROPAD;
}
spec.base = 16;
@@ -1497,7 +1504,7 @@ EXPORT_SYMBOL(snprintf);
* @...: Arguments for the format string
*
* The return value is the number of characters written into @buf not including
- * the trailing '\0'. If @size is <= 0 the function returns 0.
+ * the trailing '\0'. If @size is == 0 the function returns 0.
*/
int scnprintf(char *buf, size_t size, const char *fmt, ...)
@@ -1509,7 +1516,11 @@ int scnprintf(char *buf, size_t size, const char *fmt, ...)
i = vsnprintf(buf, size, fmt, args);
va_end(args);
- return (i >= size) ? (size - 1) : i;
+ if (likely(i < size))
+ return i;
+ if (size != 0)
+ return size - 1;
+ return 0;
}
EXPORT_SYMBOL(scnprintf);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 65d420499a61..027100d30227 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -74,11 +74,11 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
nr_wb = nr_dirty = nr_io = nr_more_io = 0;
spin_lock(&inode_lock);
- list_for_each_entry(inode, &wb->b_dirty, i_list)
+ list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
nr_dirty++;
- list_for_each_entry(inode, &wb->b_io, i_list)
+ list_for_each_entry(inode, &wb->b_io, i_wb_list)
nr_io++;
- list_for_each_entry(inode, &wb->b_more_io, i_list)
+ list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
nr_more_io++;
spin_unlock(&inode_lock);
@@ -362,7 +362,7 @@ static int bdi_forker_thread(void *ptr)
{
struct bdi_writeback *me = ptr;
- current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ current->flags |= PF_SWAPWRITE;
set_freezable();
/*
@@ -729,6 +729,7 @@ static wait_queue_head_t congestion_wqh[2] = {
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
};
+static atomic_t nr_bdi_congested[2];
void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
{
@@ -736,7 +737,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
wait_queue_head_t *wqh = &congestion_wqh[sync];
bit = sync ? BDI_sync_congested : BDI_async_congested;
- clear_bit(bit, &bdi->state);
+ if (test_and_clear_bit(bit, &bdi->state))
+ atomic_dec(&nr_bdi_congested[sync]);
smp_mb__after_clear_bit();
if (waitqueue_active(wqh))
wake_up(wqh);
@@ -748,7 +750,8 @@ void set_bdi_congested(struct backing_dev_info *bdi, int sync)
enum bdi_state bit;
bit = sync ? BDI_sync_congested : BDI_async_congested;
- set_bit(bit, &bdi->state);
+ if (!test_and_set_bit(bit, &bdi->state))
+ atomic_inc(&nr_bdi_congested[sync]);
}
EXPORT_SYMBOL(set_bdi_congested);
@@ -764,13 +767,72 @@ EXPORT_SYMBOL(set_bdi_congested);
long congestion_wait(int sync, long timeout)
{
long ret;
+ unsigned long start = jiffies;
DEFINE_WAIT(wait);
wait_queue_head_t *wqh = &congestion_wqh[sync];
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
ret = io_schedule_timeout(timeout);
finish_wait(wqh, &wait);
+
+ trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
+ jiffies_to_usecs(jiffies - start));
+
return ret;
}
EXPORT_SYMBOL(congestion_wait);
+/**
+ * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
+ * @zone: A zone to check if it is heavily congested
+ * @sync: SYNC or ASYNC IO
+ * @timeout: timeout in jiffies
+ *
+ * In the event of a congested backing_dev (any backing_dev) and the given
+ * @zone has experienced recent congestion, this waits for up to @timeout
+ * jiffies for either a BDI to exit congestion of the given @sync queue
+ * or a write to complete.
+ *
+ * In the absense of zone congestion, cond_resched() is called to yield
+ * the processor if necessary but otherwise does not sleep.
+ *
+ * The return value is 0 if the sleep is for the full timeout. Otherwise,
+ * it is the number of jiffies that were still remaining when the function
+ * returned. return_value == timeout implies the function did not sleep.
+ */
+long wait_iff_congested(struct zone *zone, int sync, long timeout)
+{
+ long ret;
+ unsigned long start = jiffies;
+ DEFINE_WAIT(wait);
+ wait_queue_head_t *wqh = &congestion_wqh[sync];
+
+ /*
+ * If there is no congestion, or heavy congestion is not being
+ * encountered in the current zone, yield if necessary instead
+ * of sleeping on the congestion queue
+ */
+ if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
+ !zone_is_reclaim_congested(zone)) {
+ cond_resched();
+
+ /* In case we scheduled, work out time remaining */
+ ret = timeout - (jiffies - start);
+ if (ret < 0)
+ ret = 0;
+
+ goto out;
+ }
+
+ /* Sleep until uncongested or a write happens */
+ prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+ ret = io_schedule_timeout(timeout);
+ finish_wait(wqh, &wait);
+
+out:
+ trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
+ jiffies_to_usecs(jiffies - start));
+
+ return ret;
+}
+EXPORT_SYMBOL(wait_iff_congested);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 3df063706f53..4df2de77e069 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -311,6 +311,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
size_t offset;
void *retval;
+ might_sleep_if(mem_flags & __GFP_WAIT);
+
spin_lock_irqsave(&pool->lock, flags);
restart:
list_for_each_entry(page, &pool->page_list, page_list) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 3d4df44e4221..75572b5f2374 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -612,6 +612,19 @@ void __lock_page_nosync(struct page *page)
TASK_UNINTERRUPTIBLE);
}
+int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+ unsigned int flags)
+{
+ if (!(flags & FAULT_FLAG_ALLOW_RETRY)) {
+ __lock_page(page);
+ return 1;
+ } else {
+ up_read(&mm->mmap_sem);
+ wait_on_page_locked(page);
+ return 0;
+ }
+}
+
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
@@ -1539,25 +1552,28 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* waiting for the lock.
*/
do_async_mmap_readahead(vma, ra, file, page, offset);
- lock_page(page);
-
- /* Did it get truncated? */
- if (unlikely(page->mapping != mapping)) {
- unlock_page(page);
- put_page(page);
- goto no_cached_page;
- }
} else {
/* No page in the page cache at all */
do_sync_mmap_readahead(vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
- page = find_lock_page(mapping, offset);
+ page = find_get_page(mapping, offset);
if (!page)
goto no_cached_page;
}
+ if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+ return ret | VM_FAULT_RETRY;
+
+ /* Did it get truncated? */
+ if (unlikely(page->mapping != mapping)) {
+ unlock_page(page);
+ put_page(page);
+ goto retry_find;
+ }
+ VM_BUG_ON(page->index != offset);
+
/*
* We have a locked page in the page cache, now we need to check
* that it's up-to-date. If not, it is going to be due to an error.
@@ -2177,12 +2193,12 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
}
if (written > 0) {
- loff_t end = pos + written;
- if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
- i_size_write(inode, end);
+ pos += written;
+ if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
+ i_size_write(inode, pos);
mark_inode_dirty(inode);
}
- *ppos = end;
+ *ppos = pos;
}
out:
return written;
diff --git a/mm/highmem.c b/mm/highmem.c
index 7a0aa1be4993..693394daa2ed 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -29,6 +29,11 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
+
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
+#endif
+
/*
* Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped
@@ -42,6 +47,9 @@
unsigned long totalhigh_pages __read_mostly;
EXPORT_SYMBOL(totalhigh_pages);
+
+EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
+
unsigned int nr_free_highpages (void)
{
pg_data_t *pgdat;
@@ -422,61 +430,3 @@ void __init page_address_init(void)
}
#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-
-void debug_kmap_atomic(enum km_type type)
-{
- static int warn_count = 10;
-
- if (unlikely(warn_count < 0))
- return;
-
- if (unlikely(in_interrupt())) {
- if (in_nmi()) {
- if (type != KM_NMI && type != KM_NMI_PTE) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (in_irq()) {
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
- type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (!irqs_disabled()) { /* softirq */
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
- type != KM_SKB_SUNRPC_DATA &&
- type != KM_SKB_DATA_SOFTIRQ &&
- type != KM_BOUNCE_READ) {
- WARN_ON(1);
- warn_count--;
- }
- }
- }
-
- if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
- type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
- type == KM_IRQ_PTE || type == KM_NMI ||
- type == KM_NMI_PTE ) {
- if (!irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
- if (irq_count() == 0 && !irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- }
-#ifdef CONFIG_KGDB_KDB
- if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) {
- WARN_ON(1);
- warn_count--;
- }
-#endif /* CONFIG_KGDB_KDB */
-}
-
-#endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c03273807182..c4a3558589ab 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -423,14 +423,14 @@ static void clear_huge_page(struct page *page,
}
}
-static void copy_gigantic_page(struct page *dst, struct page *src,
+static void copy_user_gigantic_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma)
{
int i;
struct hstate *h = hstate_vma(vma);
struct page *dst_base = dst;
struct page *src_base = src;
- might_sleep();
+
for (i = 0; i < pages_per_huge_page(h); ) {
cond_resched();
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
@@ -440,14 +440,15 @@ static void copy_gigantic_page(struct page *dst, struct page *src,
src = mem_map_next(src, src_base, i);
}
}
-static void copy_huge_page(struct page *dst, struct page *src,
+
+static void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma)
{
int i;
struct hstate *h = hstate_vma(vma);
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
- copy_gigantic_page(dst, src, addr, vma);
+ copy_user_gigantic_page(dst, src, addr, vma);
return;
}
@@ -458,6 +459,40 @@ static void copy_huge_page(struct page *dst, struct page *src,
}
}
+static void copy_gigantic_page(struct page *dst, struct page *src)
+{
+ int i;
+ struct hstate *h = page_hstate(src);
+ struct page *dst_base = dst;
+ struct page *src_base = src;
+
+ for (i = 0; i < pages_per_huge_page(h); ) {
+ cond_resched();
+ copy_highpage(dst, src);
+
+ i++;
+ dst = mem_map_next(dst, dst_base, i);
+ src = mem_map_next(src, src_base, i);
+ }
+}
+
+void copy_huge_page(struct page *dst, struct page *src)
+{
+ int i;
+ struct hstate *h = page_hstate(src);
+
+ if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
+ copy_gigantic_page(dst, src);
+ return;
+ }
+
+ might_sleep();
+ for (i = 0; i < pages_per_huge_page(h); i++) {
+ cond_resched();
+ copy_highpage(dst + i, src + i);
+ }
+}
+
static void enqueue_huge_page(struct hstate *h, struct page *page)
{
int nid = page_to_nid(page);
@@ -466,11 +501,24 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
h->free_huge_pages_node[nid]++;
}
+static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
+{
+ struct page *page;
+
+ if (list_empty(&h->hugepage_freelists[nid]))
+ return NULL;
+ page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
+ list_del(&page->lru);
+ set_page_refcounted(page);
+ h->free_huge_pages--;
+ h->free_huge_pages_node[nid]--;
+ return page;
+}
+
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve)
{
- int nid;
struct page *page = NULL;
struct mempolicy *mpol;
nodemask_t *nodemask;
@@ -496,19 +544,13 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
- nid = zone_to_nid(zone);
- if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
- !list_empty(&h->hugepage_freelists[nid])) {
- page = list_entry(h->hugepage_freelists[nid].next,
- struct page, lru);
- list_del(&page->lru);
- h->free_huge_pages--;
- h->free_huge_pages_node[nid]--;
-
- if (!avoid_reserve)
- decrement_hugepage_resv_vma(h, vma);
-
- break;
+ if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
+ page = dequeue_huge_page_node(h, zone_to_nid(zone));
+ if (page) {
+ if (!avoid_reserve)
+ decrement_hugepage_resv_vma(h, vma);
+ break;
+ }
}
}
err:
@@ -770,11 +812,10 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
return ret;
}
-static struct page *alloc_buddy_huge_page(struct hstate *h,
- struct vm_area_struct *vma, unsigned long address)
+static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
{
struct page *page;
- unsigned int nid;
+ unsigned int r_nid;
if (h->order >= MAX_ORDER)
return NULL;
@@ -812,9 +853,14 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
}
spin_unlock(&hugetlb_lock);
- page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
- __GFP_REPEAT|__GFP_NOWARN,
- huge_page_order(h));
+ if (nid == NUMA_NO_NODE)
+ page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+ __GFP_REPEAT|__GFP_NOWARN,
+ huge_page_order(h));
+ else
+ page = alloc_pages_exact_node(nid,
+ htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+ __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
if (page && arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
@@ -823,19 +869,13 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
spin_lock(&hugetlb_lock);
if (page) {
- /*
- * This page is now managed by the hugetlb allocator and has
- * no users -- drop the buddy allocator's reference.
- */
- put_page_testzero(page);
- VM_BUG_ON(page_count(page));
- nid = page_to_nid(page);
+ r_nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
/*
* We incremented the global counters already
*/
- h->nr_huge_pages_node[nid]++;
- h->surplus_huge_pages_node[nid]++;
+ h->nr_huge_pages_node[r_nid]++;
+ h->surplus_huge_pages_node[r_nid]++;
__count_vm_event(HTLB_BUDDY_PGALLOC);
} else {
h->nr_huge_pages--;
@@ -848,6 +888,25 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
}
/*
+ * This allocation function is useful in the context where vma is irrelevant.
+ * E.g. soft-offlining uses this function because it only cares physical
+ * address of error page.
+ */
+struct page *alloc_huge_page_node(struct hstate *h, int nid)
+{
+ struct page *page;
+
+ spin_lock(&hugetlb_lock);
+ page = dequeue_huge_page_node(h, nid);
+ spin_unlock(&hugetlb_lock);
+
+ if (!page)
+ page = alloc_buddy_huge_page(h, nid);
+
+ return page;
+}
+
+/*
* Increase the hugetlb pool such that it can accomodate a reservation
* of size 'delta'.
*/
@@ -871,17 +930,14 @@ static int gather_surplus_pages(struct hstate *h, int delta)
retry:
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
- page = alloc_buddy_huge_page(h, NULL, 0);
- if (!page) {
+ page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
+ if (!page)
/*
* We were not able to allocate enough pages to
* satisfy the entire reservation so we free what
* we've allocated so far.
*/
- spin_lock(&hugetlb_lock);
- needed = 0;
goto free;
- }
list_add(&page->lru, &surplus_list);
}
@@ -908,31 +964,31 @@ retry:
needed += allocated;
h->resv_huge_pages += delta;
ret = 0;
-free:
+
+ spin_unlock(&hugetlb_lock);
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
list_del(&page->lru);
+ /*
+ * This page is now managed by the hugetlb allocator and has
+ * no users -- drop the buddy allocator's reference.
+ */
+ put_page_testzero(page);
+ VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
/* Free unnecessary surplus pages to the buddy allocator */
+free:
if (!list_empty(&surplus_list)) {
- spin_unlock(&hugetlb_lock);
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
list_del(&page->lru);
- /*
- * The page has a reference count of zero already, so
- * call free_huge_page directly instead of using
- * put_page. This must be done with hugetlb_lock
- * unlocked which is safe because free_huge_page takes
- * hugetlb_lock before deciding how to free the page.
- */
- free_huge_page(page);
+ put_page(page);
}
- spin_lock(&hugetlb_lock);
}
+ spin_lock(&hugetlb_lock);
return ret;
}
@@ -1052,14 +1108,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
spin_unlock(&hugetlb_lock);
if (!page) {
- page = alloc_buddy_huge_page(h, vma, addr);
+ page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugetlb_put_quota(inode->i_mapping, chg);
return ERR_PTR(-VM_FAULT_SIGBUS);
}
}
- set_page_refcounted(page);
set_page_private(page, (unsigned long) mapping);
vma_commit_reservation(h, vma, addr);
@@ -2153,6 +2208,19 @@ nomem:
return -ENOMEM;
}
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+ swp_entry_t swp;
+
+ if (huge_pte_none(pte) || pte_present(pte))
+ return 0;
+ swp = pte_to_swp_entry(pte);
+ if (non_swap_entry(swp) && is_migration_entry(swp)) {
+ return 1;
+ } else
+ return 0;
+}
+
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;
@@ -2380,10 +2448,13 @@ retry_avoidcopy:
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
- if (unlikely(anon_vma_prepare(vma)))
+ if (unlikely(anon_vma_prepare(vma))) {
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
return VM_FAULT_OOM;
+ }
- copy_huge_page(new_page, old_page, address, vma);
+ copy_user_huge_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
/*
@@ -2515,22 +2586,20 @@ retry:
hugepage_add_new_anon_rmap(page, vma, address);
}
} else {
+ /*
+ * If memory error occurs between mmap() and fault, some process
+ * don't have hwpoisoned swap entry for errored virtual address.
+ * So we need to block hugepage fault by PG_hwpoison bit check.
+ */
+ if (unlikely(PageHWPoison(page))) {
+ ret = VM_FAULT_HWPOISON |
+ VM_FAULT_SET_HINDEX(h - hstates);
+ goto backout_unlocked;
+ }
page_dup_rmap(page);
}
/*
- * Since memory error handler replaces pte into hwpoison swap entry
- * at the time of error handling, a process which reserved but not have
- * the mapping to the error hugepage does not have hwpoison swap entry.
- * So we need to block accesses from such a process by checking
- * PG_hwpoison bit here.
- */
- if (unlikely(PageHWPoison(page))) {
- ret = VM_FAULT_HWPOISON;
- goto backout_unlocked;
- }
-
- /*
* If we are going to COW a private mapping later, we examine the
* pending reservations for this page now. This will ensure that
* any allocations necessary to record that reservation occur outside
@@ -2587,8 +2656,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
- if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
- return VM_FAULT_HWPOISON;
+ if (unlikely(is_hugetlb_entry_migration(entry))) {
+ migration_entry_wait(mm, (pmd_t *)ptep, address);
+ return 0;
+ } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
+ return VM_FAULT_HWPOISON_LARGE |
+ VM_FAULT_SET_HINDEX(h - hstates);
}
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
@@ -2878,18 +2951,41 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
hugetlb_acct_memory(h, -(chg - freed));
}
+#ifdef CONFIG_MEMORY_FAILURE
+
+/* Should be called in hugetlb_lock */
+static int is_hugepage_on_freelist(struct page *hpage)
+{
+ struct page *page;
+ struct page *tmp;
+ struct hstate *h = page_hstate(hpage);
+ int nid = page_to_nid(hpage);
+
+ list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
+ if (page == hpage)
+ return 1;
+ return 0;
+}
+
/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
*/
-void __isolate_hwpoisoned_huge_page(struct page *hpage)
+int dequeue_hwpoisoned_huge_page(struct page *hpage)
{
struct hstate *h = page_hstate(hpage);
int nid = page_to_nid(hpage);
+ int ret = -EBUSY;
spin_lock(&hugetlb_lock);
- list_del(&hpage->lru);
- h->free_huge_pages--;
- h->free_huge_pages_node[nid]--;
+ if (is_hugepage_on_freelist(hpage)) {
+ list_del(&hpage->lru);
+ set_page_refcounted(hpage);
+ h->free_huge_pages--;
+ h->free_huge_pages_node[nid]--;
+ ret = 0;
+ }
spin_unlock(&hugetlb_lock);
+ return ret;
}
+#endif
diff --git a/mm/internal.h b/mm/internal.h
index 6a697bb97fc5..dedb0aff673f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -62,7 +62,7 @@ extern bool is_free_buddy_page(struct page *page);
*/
static inline unsigned long page_order(struct page *page)
{
- VM_BUG_ON(!PageBuddy(page));
+ /* PageBuddy() must be checked by the caller */
return page_private(page);
}
diff --git a/mm/maccess.c b/mm/maccess.c
index 4e348dbaecd7..e2b6f5634e0d 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -1,9 +1,9 @@
/*
* Access kernel memory without faulting.
*/
-#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/uaccess.h>
/**
* probe_kernel_read(): safely attempt to read from a location
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9be3cf8a5da4..9a99cfaf0a19 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -89,7 +89,10 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
- MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */
+ MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
+ /* incremented at every pagein/pageout */
+ MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
+ MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
MEM_CGROUP_STAT_NSTATS,
};
@@ -254,6 +257,12 @@ struct mem_cgroup {
* percpu counter.
*/
struct mem_cgroup_stat_cpu *stat;
+ /*
+ * used when a cpu is offlined or other synchronizations
+ * See mem_cgroup_read_stat().
+ */
+ struct mem_cgroup_stat_cpu nocpu_base;
+ spinlock_t pcp_counter_lock;
};
/* Stuffs for move charges at task migration. */
@@ -530,14 +539,40 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
return mz;
}
+/*
+ * Implementation Note: reading percpu statistics for memcg.
+ *
+ * Both of vmstat[] and percpu_counter has threshold and do periodic
+ * synchronization to implement "quick" read. There are trade-off between
+ * reading cost and precision of value. Then, we may have a chance to implement
+ * a periodic synchronizion of counter in memcg's counter.
+ *
+ * But this _read() function is used for user interface now. The user accounts
+ * memory usage by memory cgroup and he _always_ requires exact value because
+ * he accounts memory. Even if we provide quick-and-fuzzy read, we always
+ * have to visit all online cpus and make sum. So, for now, unnecessary
+ * synchronization is not implemented. (just implemented for cpu hotplug)
+ *
+ * If there are kernel internal actions which can make use of some not-exact
+ * value, and reading all cpu value can be performance bottleneck in some
+ * common workload, threashold and synchonization as vmstat[] should be
+ * implemented.
+ */
static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
enum mem_cgroup_stat_index idx)
{
int cpu;
s64 val = 0;
- for_each_possible_cpu(cpu)
+ get_online_cpus();
+ for_each_online_cpu(cpu)
val += per_cpu(mem->stat->count[idx], cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+ spin_lock(&mem->pcp_counter_lock);
+ val += mem->nocpu_base.count[idx];
+ spin_unlock(&mem->pcp_counter_lock);
+#endif
+ put_online_cpus();
return val;
}
@@ -659,40 +694,83 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
return mem;
}
-/*
- * Call callback function against all cgroup under hierarchy tree.
- */
-static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
- int (*func)(struct mem_cgroup *, void *))
+/* The caller has to guarantee "mem" exists before calling this */
+static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
{
- int found, ret, nextid;
struct cgroup_subsys_state *css;
- struct mem_cgroup *mem;
-
- if (!root->use_hierarchy)
- return (*func)(root, data);
+ int found;
- nextid = 1;
- do {
- ret = 0;
+ if (!mem) /* ROOT cgroup has the smallest ID */
+ return root_mem_cgroup; /*css_put/get against root is ignored*/
+ if (!mem->use_hierarchy) {
+ if (css_tryget(&mem->css))
+ return mem;
+ return NULL;
+ }
+ rcu_read_lock();
+ /*
+ * searching a memory cgroup which has the smallest ID under given
+ * ROOT cgroup. (ID >= 1)
+ */
+ css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
+ if (css && css_tryget(css))
+ mem = container_of(css, struct mem_cgroup, css);
+ else
mem = NULL;
+ rcu_read_unlock();
+ return mem;
+}
+
+static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
+ struct mem_cgroup *root,
+ bool cond)
+{
+ int nextid = css_id(&iter->css) + 1;
+ int found;
+ int hierarchy_used;
+ struct cgroup_subsys_state *css;
+
+ hierarchy_used = iter->use_hierarchy;
+ css_put(&iter->css);
+ /* If no ROOT, walk all, ignore hierarchy */
+ if (!cond || (root && !hierarchy_used))
+ return NULL;
+
+ if (!root)
+ root = root_mem_cgroup;
+
+ do {
+ iter = NULL;
rcu_read_lock();
- css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
- &found);
+
+ css = css_get_next(&mem_cgroup_subsys, nextid,
+ &root->css, &found);
if (css && css_tryget(css))
- mem = container_of(css, struct mem_cgroup, css);
+ iter = container_of(css, struct mem_cgroup, css);
rcu_read_unlock();
-
- if (mem) {
- ret = (*func)(mem, data);
- css_put(&mem->css);
- }
+ /* If css is NULL, no more cgroups will be found */
nextid = found + 1;
- } while (!ret && css);
+ } while (css && !iter);
- return ret;
+ return iter;
}
+/*
+ * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
+ * be careful that "break" loop is not allowed. We have reference count.
+ * Instead of that modify "cond" to be false and "continue" to exit the loop.
+ */
+#define for_each_mem_cgroup_tree_cond(iter, root, cond) \
+ for (iter = mem_cgroup_start_loop(root);\
+ iter != NULL;\
+ iter = mem_cgroup_get_next(iter, root, cond))
+
+#define for_each_mem_cgroup_tree(iter, root) \
+ for_each_mem_cgroup_tree_cond(iter, root, true)
+
+#define for_each_mem_cgroup_all(iter) \
+ for_each_mem_cgroup_tree_cond(iter, NULL, true)
+
static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
{
@@ -1051,7 +1129,52 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg)
return swappiness;
}
-/* A routine for testing mem is not under move_account */
+static void mem_cgroup_start_move(struct mem_cgroup *mem)
+{
+ int cpu;
+
+ get_online_cpus();
+ spin_lock(&mem->pcp_counter_lock);
+ for_each_online_cpu(cpu)
+ per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
+ mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
+ spin_unlock(&mem->pcp_counter_lock);
+ put_online_cpus();
+
+ synchronize_rcu();
+}
+
+static void mem_cgroup_end_move(struct mem_cgroup *mem)
+{
+ int cpu;
+
+ if (!mem)
+ return;
+ get_online_cpus();
+ spin_lock(&mem->pcp_counter_lock);
+ for_each_online_cpu(cpu)
+ per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
+ mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
+ spin_unlock(&mem->pcp_counter_lock);
+ put_online_cpus();
+}
+/*
+ * 2 routines for checking "mem" is under move_account() or not.
+ *
+ * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
+ * for avoiding race in accounting. If true,
+ * pc->mem_cgroup may be overwritten.
+ *
+ * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
+ * under hierarchy of moving cgroups. This is for
+ * waiting at hith-memory prressure caused by "move".
+ */
+
+static bool mem_cgroup_stealed(struct mem_cgroup *mem)
+{
+ VM_BUG_ON(!rcu_read_lock_held());
+ return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
+}
static bool mem_cgroup_under_move(struct mem_cgroup *mem)
{
@@ -1092,13 +1215,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
return false;
}
-static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
-{
- int *val = data;
- (*val)++;
- return 0;
-}
-
/**
* mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
* @memcg: The memory cgroup that went over limit
@@ -1173,7 +1289,10 @@ done:
static int mem_cgroup_count_children(struct mem_cgroup *mem)
{
int num = 0;
- mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
+ struct mem_cgroup *iter;
+
+ for_each_mem_cgroup_tree(iter, mem)
+ num++;
return num;
}
@@ -1322,49 +1441,39 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
return total;
}
-static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
-{
- int *val = (int *)data;
- int x;
- /*
- * Logically, we can stop scanning immediately when we find
- * a memcg is already locked. But condidering unlock ops and
- * creation/removal of memcg, scan-all is simple operation.
- */
- x = atomic_inc_return(&mem->oom_lock);
- *val = max(x, *val);
- return 0;
-}
/*
* Check OOM-Killer is already running under our hierarchy.
* If someone is running, return false.
*/
static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
{
- int lock_count = 0;
+ int x, lock_count = 0;
+ struct mem_cgroup *iter;
- mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
+ for_each_mem_cgroup_tree(iter, mem) {
+ x = atomic_inc_return(&iter->oom_lock);
+ lock_count = max(x, lock_count);
+ }
if (lock_count == 1)
return true;
return false;
}
-static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
+static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
{
+ struct mem_cgroup *iter;
+
/*
* When a new child is created while the hierarchy is under oom,
* mem_cgroup_oom_lock() may not be called. We have to use
* atomic_add_unless() here.
*/
- atomic_add_unless(&mem->oom_lock, -1, 0);
+ for_each_mem_cgroup_tree(iter, mem)
+ atomic_add_unless(&iter->oom_lock, -1, 0);
return 0;
}
-static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
-{
- mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
-}
static DEFINE_MUTEX(memcg_oom_mutex);
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
@@ -1462,34 +1571,73 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
/*
* Currently used to update mapped file statistics, but the routine can be
* generalized to update other statistics as well.
+ *
+ * Notes: Race condition
+ *
+ * We usually use page_cgroup_lock() for accessing page_cgroup member but
+ * it tends to be costly. But considering some conditions, we doesn't need
+ * to do so _always_.
+ *
+ * Considering "charge", lock_page_cgroup() is not required because all
+ * file-stat operations happen after a page is attached to radix-tree. There
+ * are no race with "charge".
+ *
+ * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
+ * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
+ * if there are race with "uncharge". Statistics itself is properly handled
+ * by flags.
+ *
+ * Considering "move", this is an only case we see a race. To make the race
+ * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
+ * possibility of race condition. If there is, we take a lock.
*/
-void mem_cgroup_update_file_mapped(struct page *page, int val)
+
+static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
{
struct mem_cgroup *mem;
- struct page_cgroup *pc;
+ struct page_cgroup *pc = lookup_page_cgroup(page);
+ bool need_unlock = false;
- pc = lookup_page_cgroup(page);
if (unlikely(!pc))
return;
- lock_page_cgroup(pc);
+ rcu_read_lock();
mem = pc->mem_cgroup;
- if (!mem || !PageCgroupUsed(pc))
- goto done;
+ if (unlikely(!mem || !PageCgroupUsed(pc)))
+ goto out;
+ /* pc->mem_cgroup is unstable ? */
+ if (unlikely(mem_cgroup_stealed(mem))) {
+ /* take a lock against to access pc->mem_cgroup */
+ lock_page_cgroup(pc);
+ need_unlock = true;
+ mem = pc->mem_cgroup;
+ if (!mem || !PageCgroupUsed(pc))
+ goto out;
+ }
- /*
- * Preemption is already disabled. We can use __this_cpu_xxx
- */
- if (val > 0) {
- __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
- SetPageCgroupFileMapped(pc);
- } else {
- __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
- ClearPageCgroupFileMapped(pc);
+ this_cpu_add(mem->stat->count[idx], val);
+
+ switch (idx) {
+ case MEM_CGROUP_STAT_FILE_MAPPED:
+ if (val > 0)
+ SetPageCgroupFileMapped(pc);
+ else if (!page_mapped(page))
+ ClearPageCgroupFileMapped(pc);
+ break;
+ default:
+ BUG();
}
-done:
- unlock_page_cgroup(pc);
+out:
+ if (unlikely(need_unlock))
+ unlock_page_cgroup(pc);
+ rcu_read_unlock();
+ return;
+}
+
+void mem_cgroup_update_file_mapped(struct page *page, int val)
+{
+ mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
}
/*
@@ -1605,15 +1753,55 @@ static void drain_all_stock_sync(void)
atomic_dec(&memcg_drain_count);
}
-static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
+/*
+ * This function drains percpu counter value from DEAD cpu and
+ * move it to local cpu. Note that this function can be preempted.
+ */
+static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
+{
+ int i;
+
+ spin_lock(&mem->pcp_counter_lock);
+ for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
+ s64 x = per_cpu(mem->stat->count[i], cpu);
+
+ per_cpu(mem->stat->count[i], cpu) = 0;
+ mem->nocpu_base.count[i] += x;
+ }
+ /* need to clear ON_MOVE value, works as a kind of lock. */
+ per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
+ spin_unlock(&mem->pcp_counter_lock);
+}
+
+static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
+{
+ int idx = MEM_CGROUP_ON_MOVE;
+
+ spin_lock(&mem->pcp_counter_lock);
+ per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
+ spin_unlock(&mem->pcp_counter_lock);
+}
+
+static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
int cpu = (unsigned long)hcpu;
struct memcg_stock_pcp *stock;
+ struct mem_cgroup *iter;
+
+ if ((action == CPU_ONLINE)) {
+ for_each_mem_cgroup_all(iter)
+ synchronize_mem_cgroup_on_move(iter, cpu);
+ return NOTIFY_OK;
+ }
- if (action != CPU_DEAD)
+ if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
+
+ for_each_mem_cgroup_all(iter)
+ mem_cgroup_drain_pcp_counter(iter, cpu);
+
stock = &per_cpu(memcg_stock, cpu);
drain_stock(stock);
return NOTIFY_OK;
@@ -3038,6 +3226,7 @@ move_account:
lru_add_drain_all();
drain_all_stock_sync();
ret = 0;
+ mem_cgroup_start_move(mem);
for_each_node_state(node, N_HIGH_MEMORY) {
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
enum lru_list l;
@@ -3051,6 +3240,7 @@ move_account:
if (ret)
break;
}
+ mem_cgroup_end_move(mem);
memcg_oom_recover(mem);
/* it seems parent cgroup doesn't have enough mem */
if (ret == -ENOMEM)
@@ -3137,33 +3327,25 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
return retval;
}
-struct mem_cgroup_idx_data {
- s64 val;
- enum mem_cgroup_stat_index idx;
-};
-static int
-mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
+static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
+ enum mem_cgroup_stat_index idx)
{
- struct mem_cgroup_idx_data *d = data;
- d->val += mem_cgroup_read_stat(mem, d->idx);
- return 0;
-}
+ struct mem_cgroup *iter;
+ s64 val = 0;
-static void
-mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
- enum mem_cgroup_stat_index idx, s64 *val)
-{
- struct mem_cgroup_idx_data d;
- d.idx = idx;
- d.val = 0;
- mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
- *val = d.val;
+ /* each per cpu's value can be minus.Then, use s64 */
+ for_each_mem_cgroup_tree(iter, mem)
+ val += mem_cgroup_read_stat(iter, idx);
+
+ if (val < 0) /* race ? */
+ val = 0;
+ return val;
}
static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
{
- u64 idx_val, val;
+ u64 val;
if (!mem_cgroup_is_root(mem)) {
if (!swap)
@@ -3172,16 +3354,12 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
return res_counter_read_u64(&mem->memsw, RES_USAGE);
}
- mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
- val = idx_val;
- mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
- val += idx_val;
+ val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
+ val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
- if (swap) {
- mem_cgroup_get_recursive_idx_stat(mem,
- MEM_CGROUP_STAT_SWAPOUT, &idx_val);
- val += idx_val;
- }
+ if (swap)
+ val += mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_SWAPOUT);
return val << PAGE_SHIFT;
}
@@ -3389,9 +3567,9 @@ struct {
};
-static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
+static void
+mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
{
- struct mcs_total_stat *s = data;
s64 val;
/* per cpu stat */
@@ -3421,13 +3599,15 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
- return 0;
}
static void
mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
{
- mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
+ struct mem_cgroup *iter;
+
+ for_each_mem_cgroup_tree(iter, mem)
+ mem_cgroup_get_local_stat(iter, s);
}
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
@@ -3604,7 +3784,7 @@ static int compare_thresholds(const void *a, const void *b)
return _a->threshold - _b->threshold;
}
-static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
+static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
{
struct mem_cgroup_eventfd_list *ev;
@@ -3615,7 +3795,10 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
{
- mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
+ struct mem_cgroup *iter;
+
+ for_each_mem_cgroup_tree(iter, mem)
+ mem_cgroup_oom_notify_cb(iter);
}
static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
@@ -4032,6 +4215,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
vfree(mem);
mem = NULL;
}
+ spin_lock_init(&mem->pcp_counter_lock);
return mem;
}
@@ -4158,7 +4342,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
&per_cpu(memcg_stock, cpu);
INIT_WORK(&stock->work, drain_local_stock);
}
- hotcpu_notifier(memcg_stock_cpu_callback, 0);
+ hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
} else {
parent = mem_cgroup_from_cont(cont->parent);
mem->use_hierarchy = parent->use_hierarchy;
@@ -4513,6 +4697,7 @@ static void mem_cgroup_clear_mc(void)
mc.to = NULL;
mc.moving_task = NULL;
spin_unlock(&mc.lock);
+ mem_cgroup_end_move(from);
memcg_oom_recover(from);
memcg_oom_recover(to);
wake_up_all(&mc.waitq);
@@ -4543,6 +4728,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
VM_BUG_ON(mc.moved_charge);
VM_BUG_ON(mc.moved_swap);
VM_BUG_ON(mc.moving_task);
+ mem_cgroup_start_move(from);
spin_lock(&mc.lock);
mc.from = from;
mc.to = mem;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 757f6b0accfe..124324134ff6 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -7,21 +7,26 @@
* Free Software Foundation.
*
* High level machine check handler. Handles pages reported by the
- * hardware as being corrupted usually due to a 2bit ECC memory or cache
+ * hardware as being corrupted usually due to a multi-bit ECC memory or cache
* failure.
+ *
+ * In addition there is a "soft offline" entry point that allows stop using
+ * not-yet-corrupted-by-suspicious pages without killing anything.
*
* Handles page cache pages in various states. The tricky part
- * here is that we can access any page asynchronous to other VM
- * users, because memory failures could happen anytime and anywhere,
- * possibly violating some of their assumptions. This is why this code
- * has to be extremely careful. Generally it tries to use normal locking
- * rules, as in get the standard locks, even if that means the
- * error handling takes potentially a long time.
- *
- * The operation to map back from RMAP chains to processes has to walk
- * the complete process list and has non linear complexity with the number
- * mappings. In short it can be quite slow. But since memory corruptions
- * are rare we hope to get away with this.
+ * here is that we can access any page asynchronously in respect to
+ * other VM users, because memory failures could happen anytime and
+ * anywhere. This could violate some of their assumptions. This is why
+ * this code has to be extremely careful. Generally it tries to use
+ * normal locking rules, as in get the standard locks, even if that means
+ * the error handling takes potentially a long time.
+ *
+ * There are several operations here with exponential complexity because
+ * of unsuitable VM data structures. For example the operation to map back
+ * from RMAP chains to processes has to walk the complete process list and
+ * has non linear complexity with the number. But since memory corruptions
+ * are rare we hope to get away with this. This avoids impacting the core
+ * VM.
*/
/*
@@ -30,7 +35,6 @@
* - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
* - pass bad pages to kdump next kernel
*/
-#define DEBUG 1 /* remove me in 2.6.34 */
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
@@ -78,7 +82,7 @@ static int hwpoison_filter_dev(struct page *p)
return 0;
/*
- * page_mapping() does not accept slab page
+ * page_mapping() does not accept slab pages.
*/
if (PageSlab(p))
return -EINVAL;
@@ -268,7 +272,7 @@ struct to_kill {
struct list_head nd;
struct task_struct *tsk;
unsigned long addr;
- unsigned addr_valid:1;
+ char addr_valid;
};
/*
@@ -309,7 +313,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* a SIGKILL because the error is not contained anymore.
*/
if (tk->addr == -EFAULT) {
- pr_debug("MCE: Unable to find user space address %lx in %s\n",
+ pr_info("MCE: Unable to find user space address %lx in %s\n",
page_to_pfn(p), tsk->comm);
tk->addr_valid = 0;
}
@@ -577,7 +581,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
pfn, err);
} else if (page_has_private(p) &&
!try_to_release_page(p, GFP_NOIO)) {
- pr_debug("MCE %#lx: failed to release buffers\n", pfn);
+ pr_info("MCE %#lx: failed to release buffers\n", pfn);
} else {
ret = RECOVERED;
}
@@ -693,11 +697,10 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn)
* Issues:
* - Error on hugepage is contained in hugepage unit (not in raw page unit.)
* To narrow down kill region to one page, we need to break up pmd.
- * - To support soft-offlining for hugepage, we need to support hugepage
- * migration.
*/
static int me_huge_page(struct page *p, unsigned long pfn)
{
+ int res = 0;
struct page *hpage = compound_head(p);
/*
* We can safely recover from error on free or reserved (i.e.
@@ -710,8 +713,9 @@ static int me_huge_page(struct page *p, unsigned long pfn)
* so there is no race between isolation and mapping/unmapping.
*/
if (!(page_mapping(hpage) || PageAnon(hpage))) {
- __isolate_hwpoisoned_huge_page(hpage);
- return RECOVERED;
+ res = dequeue_hwpoisoned_huge_page(hpage);
+ if (!res)
+ return RECOVERED;
}
return DELAYED;
}
@@ -836,8 +840,6 @@ static int page_action(struct page_state *ps, struct page *p,
return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
}
-#define N_UNMAP_TRIES 5
-
/*
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
@@ -849,7 +851,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct address_space *mapping;
LIST_HEAD(tokill);
int ret;
- int i;
int kill = 1;
struct page *hpage = compound_head(p);
@@ -903,17 +904,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (kill)
collect_procs(hpage, &tokill);
- /*
- * try_to_unmap can fail temporarily due to races.
- * Try a few times (RED-PEN better strategy?)
- */
- for (i = 0; i < N_UNMAP_TRIES; i++) {
- ret = try_to_unmap(hpage, ttu);
- if (ret == SWAP_SUCCESS)
- break;
- pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret);
- }
-
+ ret = try_to_unmap(hpage, ttu);
if (ret != SWAP_SUCCESS)
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
@@ -981,7 +972,10 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
* We need/can do nothing about count=0 pages.
* 1) it's a free page, and therefore in safe hand:
* prep_new_page() will be the gate keeper.
- * 2) it's part of a non-compound high order page.
+ * 2) it's a free hugepage, which is also safe:
+ * an affected hugepage will be dequeued from hugepage freelist,
+ * so there's no concern about reusing it ever after.
+ * 3) it's part of a non-compound high order page.
* Implies some kernel user: cannot stop them from
* R/W the page; let's pray that the page has been
* used and will be freed some time later.
@@ -993,6 +987,24 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
if (is_free_buddy_page(p)) {
action_result(pfn, "free buddy", DELAYED);
return 0;
+ } else if (PageHuge(hpage)) {
+ /*
+ * Check "just unpoisoned", "filter hit", and
+ * "race with other subpage."
+ */
+ lock_page_nosync(hpage);
+ if (!PageHWPoison(hpage)
+ || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+ atomic_long_sub(nr_pages, &mce_bad_pages);
+ return 0;
+ }
+ set_page_hwpoison_huge_page(hpage);
+ res = dequeue_hwpoisoned_huge_page(hpage);
+ action_result(pfn, "free huge",
+ res ? IGNORED : DELAYED);
+ unlock_page(hpage);
+ return res;
} else {
action_result(pfn, "high order kernel", IGNORED);
return -EBUSY;
@@ -1147,16 +1159,26 @@ int unpoison_memory(unsigned long pfn)
page = compound_head(p);
if (!PageHWPoison(p)) {
- pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
+ pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
return 0;
}
nr_pages = 1 << compound_order(page);
if (!get_page_unless_zero(page)) {
+ /*
+ * Since HWPoisoned hugepage should have non-zero refcount,
+ * race between memory failure and unpoison seems to happen.
+ * In such case unpoison fails and memory failure runs
+ * to the end.
+ */
+ if (PageHuge(page)) {
+ pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
+ return 0;
+ }
if (TestClearPageHWPoison(p))
atomic_long_sub(nr_pages, &mce_bad_pages);
- pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
+ pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
return 0;
}
@@ -1168,12 +1190,12 @@ int unpoison_memory(unsigned long pfn)
* the free buddy page pool.
*/
if (TestClearPageHWPoison(page)) {
- pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
+ pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
atomic_long_sub(nr_pages, &mce_bad_pages);
freeit = 1;
+ if (PageHuge(page))
+ clear_page_hwpoison_huge_page(page);
}
- if (PageHuge(p))
- clear_page_hwpoison_huge_page(page);
unlock_page(page);
put_page(page);
@@ -1187,7 +1209,11 @@ EXPORT_SYMBOL(unpoison_memory);
static struct page *new_page(struct page *p, unsigned long private, int **x)
{
int nid = page_to_nid(p);
- return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+ if (PageHuge(p))
+ return alloc_huge_page_node(page_hstate(compound_head(p)),
+ nid);
+ else
+ return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
}
/*
@@ -1215,14 +1241,21 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
* was free.
*/
set_migratetype_isolate(p);
+ /*
+ * When the target page is a free hugepage, just remove it
+ * from free hugepage list.
+ */
if (!get_page_unless_zero(compound_head(p))) {
- if (is_free_buddy_page(p)) {
- pr_debug("get_any_page: %#lx free buddy page\n", pfn);
+ if (PageHuge(p)) {
+ pr_info("get_any_page: %#lx free huge page\n", pfn);
+ ret = dequeue_hwpoisoned_huge_page(compound_head(p));
+ } else if (is_free_buddy_page(p)) {
+ pr_info("get_any_page: %#lx free buddy page\n", pfn);
/* Set hwpoison bit while page is still isolated */
SetPageHWPoison(p);
ret = 0;
} else {
- pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
+ pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
pfn, p->flags);
ret = -EIO;
}
@@ -1235,6 +1268,46 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
return ret;
}
+static int soft_offline_huge_page(struct page *page, int flags)
+{
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
+ struct page *hpage = compound_head(page);
+ LIST_HEAD(pagelist);
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ goto done;
+
+ if (PageHWPoison(hpage)) {
+ put_page(hpage);
+ pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn);
+ return -EBUSY;
+ }
+
+ /* Keep page count to indicate a given hugepage is isolated. */
+
+ list_add(&hpage->lru, &pagelist);
+ ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+ if (ret) {
+ putback_lru_pages(&pagelist);
+ pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
+ pfn, ret, page->flags);
+ if (ret > 0)
+ ret = -EIO;
+ return ret;
+ }
+done:
+ if (!PageHWPoison(hpage))
+ atomic_long_add(1 << compound_order(hpage), &mce_bad_pages);
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ /* keep elevated page count for bad page */
+ return ret;
+}
+
/**
* soft_offline_page - Soft offline a page.
* @page: page to offline
@@ -1262,6 +1335,9 @@ int soft_offline_page(struct page *page, int flags)
int ret;
unsigned long pfn = page_to_pfn(page);
+ if (PageHuge(page))
+ return soft_offline_huge_page(page, flags);
+
ret = get_any_page(page, pfn, flags);
if (ret < 0)
return ret;
@@ -1288,7 +1364,7 @@ int soft_offline_page(struct page *page, int flags)
goto done;
}
if (!PageLRU(page)) {
- pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
+ pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
pfn, page->flags);
return -EIO;
}
@@ -1302,7 +1378,7 @@ int soft_offline_page(struct page *page, int flags)
if (PageHWPoison(page)) {
unlock_page(page);
put_page(page);
- pr_debug("soft offline: %#lx page already poisoned\n", pfn);
+ pr_info("soft offline: %#lx page already poisoned\n", pfn);
return -EBUSY;
}
@@ -1323,7 +1399,7 @@ int soft_offline_page(struct page *page, int flags)
put_page(page);
if (ret == 1) {
ret = 0;
- pr_debug("soft_offline: %#lx: invalidated\n", pfn);
+ pr_info("soft_offline: %#lx: invalidated\n", pfn);
goto done;
}
@@ -1339,13 +1415,13 @@ int soft_offline_page(struct page *page, int flags)
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
if (ret) {
- pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
+ pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
ret = -EIO;
}
} else {
- pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
+ pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
pfn, ret, page_count(page), page->flags);
}
if (ret)
diff --git a/mm/memory.c b/mm/memory.c
index 98b58fecedef..02e48aa0ed13 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -736,7 +736,7 @@ again:
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
if (!dst_pte)
return -ENOMEM;
- src_pte = pte_offset_map_nested(src_pmd, addr);
+ src_pte = pte_offset_map(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
orig_src_pte = src_pte;
@@ -767,7 +767,7 @@ again:
arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl);
- pte_unmap_nested(orig_src_pte);
+ pte_unmap(orig_src_pte);
add_mm_rss_vec(dst_mm, rss);
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
@@ -1450,7 +1450,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
if (ret &
- (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
+ (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE|
+ VM_FAULT_SIGBUS))
return i ? i : -EFAULT;
BUG();
}
@@ -1590,7 +1591,7 @@ struct page *get_dump_page(unsigned long addr)
}
#endif /* CONFIG_ELF_CORE */
-pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
@@ -2079,7 +2080,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
- memset(kaddr, 0, PAGE_SIZE);
+ clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(dst);
} else
@@ -2107,6 +2108,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
spinlock_t *ptl, pte_t orig_pte)
+ __releases(ptl)
{
struct page *old_page, *new_page;
pte_t entry;
@@ -2626,6 +2628,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, *swapcache = NULL;
swp_entry_t entry;
pte_t pte;
+ int locked;
struct mem_cgroup *ptr = NULL;
int exclusive = 0;
int ret = 0;
@@ -2676,8 +2679,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_release;
}
- lock_page(page);
+ locked = lock_page_or_retry(page, mm, flags);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ if (!locked) {
+ ret |= VM_FAULT_RETRY;
+ goto out_release;
+ }
/*
* Make sure try_to_free_swap or reuse_swap_page or swapoff did not
@@ -2926,7 +2933,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vmf.page = NULL;
ret = vma->vm_ops->fault(vma, &vmf);
- if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
+ VM_FAULT_RETRY)))
return ret;
if (unlikely(PageHWPoison(vmf.page))) {
@@ -3343,7 +3351,7 @@ int in_gate_area_no_task(unsigned long addr)
#endif /* __HAVE_ARCH_GATE_AREA */
-static int follow_pte(struct mm_struct *mm, unsigned long address,
+static int __follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
pgd_t *pgd;
@@ -3380,6 +3388,17 @@ out:
return -EINVAL;
}
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, spinlock_t **ptlp)
+{
+ int res;
+
+ /* (void) is needed to make gcc happy */
+ (void) __cond_lock(*ptlp,
+ !(res = __follow_pte(mm, address, ptepp, ptlp)));
+ return res;
+}
+
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index d4e940a26945..9260314a221e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -602,27 +602,14 @@ static struct page *next_active_pageblock(struct page *page)
/* Checks if this range of memory is likely to be hot-removable. */
int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
- int type;
struct page *page = pfn_to_page(start_pfn);
struct page *end_page = page + nr_pages;
/* Check the starting page of each pageblock within the range */
for (; page < end_page; page = next_active_pageblock(page)) {
- type = get_pageblock_migratetype(page);
-
- /*
- * A pageblock containing MOVABLE or free pages is considered
- * removable
- */
- if (type != MIGRATE_MOVABLE && !pageblock_free(page))
- return 0;
-
- /*
- * A pageblock starting with a PageReserved page is not
- * considered removable.
- */
- if (PageReserved(page))
+ if (!is_pageblock_removable_nolock(page))
return 0;
+ cond_resched();
}
/* All pageblocks in the memory block are likely to be hot-removable */
@@ -659,7 +646,7 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
* Scanning pfn is much easier than scanning lru list.
* Scan pfn from start to end and Find LRU page.
*/
-int scan_lru_pages(unsigned long start, unsigned long end)
+static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
@@ -709,29 +696,30 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
page_is_file_cache(page));
} else {
- /* Becasue we don't have big zone->lock. we should
- check this again here. */
- if (page_count(page))
- not_managed++;
#ifdef CONFIG_DEBUG_VM
printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
pfn);
dump_page(page);
#endif
+ /* Becasue we don't have big zone->lock. we should
+ check this again here. */
+ if (page_count(page)) {
+ not_managed++;
+ ret = -EBUSY;
+ break;
+ }
}
}
- ret = -EBUSY;
- if (not_managed) {
- if (!list_empty(&source))
+ if (!list_empty(&source)) {
+ if (not_managed) {
+ putback_lru_pages(&source);
+ goto out;
+ }
+ /* this function returns # of failed pages */
+ ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
+ if (ret)
putback_lru_pages(&source);
- goto out;
}
- ret = 0;
- if (list_empty(&source))
- goto out;
- /* this function returns # of failed pages */
- ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
-
out:
return ret;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f969da5dd8a2..4a57f135b76e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -924,15 +924,21 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
nodemask_t nmask;
LIST_HEAD(pagelist);
int err = 0;
+ struct vm_area_struct *vma;
nodes_clear(nmask);
node_set(source, nmask);
- check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- if (!list_empty(&pagelist))
+ if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest, 0);
+ if (err)
+ putback_lru_pages(&pagelist);
+ }
return err;
}
@@ -1147,9 +1153,12 @@ static long do_mbind(unsigned long start, unsigned long len,
err = mbind_range(mm, start, end, new);
- if (!list_empty(&pagelist))
+ if (!list_empty(&pagelist)) {
nr_failed = migrate_pages(&pagelist, new_vma_page,
(unsigned long)vma, 0);
+ if (nr_failed)
+ putback_lru_pages(&pagelist);
+ }
if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
@@ -1588,7 +1597,7 @@ unsigned slab_node(struct mempolicy *policy)
(void)first_zones_zonelist(zonelist, highest_zoneidx,
&policy->v.nodes,
&zone);
- return zone->node;
+ return zone ? zone->node : numa_node_id();
}
default:
diff --git a/mm/migrate.c b/mm/migrate.c
index 38e7cad782f4..fe5a3c6a5426 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/memcontrol.h>
#include <linux/syscalls.h>
+#include <linux/hugetlb.h>
#include <linux/gfp.h>
#include "internal.h"
@@ -95,26 +96,34 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
pte_t *ptep, pte;
spinlock_t *ptl;
- pgd = pgd_offset(mm, addr);
- if (!pgd_present(*pgd))
- goto out;
+ if (unlikely(PageHuge(new))) {
+ ptep = huge_pte_offset(mm, addr);
+ if (!ptep)
+ goto out;
+ ptl = &mm->page_table_lock;
+ } else {
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ goto out;
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud))
- goto out;
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ goto out;
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
- goto out;
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ goto out;
- ptep = pte_offset_map(pmd, addr);
+ ptep = pte_offset_map(pmd, addr);
- if (!is_swap_pte(*ptep)) {
- pte_unmap(ptep);
- goto out;
- }
+ if (!is_swap_pte(*ptep)) {
+ pte_unmap(ptep);
+ goto out;
+ }
+
+ ptl = pte_lockptr(mm, pmd);
+ }
- ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
@@ -130,10 +139,19 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
if (is_write_migration_entry(entry))
pte = pte_mkwrite(pte);
+#ifdef CONFIG_HUGETLB_PAGE
+ if (PageHuge(new))
+ pte = pte_mkhuge(pte);
+#endif
flush_cache_page(vma, addr, pte_pfn(pte));
set_pte_at(mm, addr, ptep, pte);
- if (PageAnon(new))
+ if (PageHuge(new)) {
+ if (PageAnon(new))
+ hugepage_add_anon_rmap(new, vma, addr);
+ else
+ page_dup_rmap(new);
+ } else if (PageAnon(new))
page_add_anon_rmap(new, vma, addr);
else
page_add_file_rmap(new);
@@ -276,11 +294,59 @@ static int migrate_page_move_mapping(struct address_space *mapping,
}
/*
+ * The expected number of remaining references is the same as that
+ * of migrate_page_move_mapping().
+ */
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ int expected_count;
+ void **pslot;
+
+ if (!mapping) {
+ if (page_count(page) != 1)
+ return -EAGAIN;
+ return 0;
+ }
+
+ spin_lock_irq(&mapping->tree_lock);
+
+ pslot = radix_tree_lookup_slot(&mapping->page_tree,
+ page_index(page));
+
+ expected_count = 2 + page_has_private(page);
+ if (page_count(page) != expected_count ||
+ (struct page *)radix_tree_deref_slot(pslot) != page) {
+ spin_unlock_irq(&mapping->tree_lock);
+ return -EAGAIN;
+ }
+
+ if (!page_freeze_refs(page, expected_count)) {
+ spin_unlock_irq(&mapping->tree_lock);
+ return -EAGAIN;
+ }
+
+ get_page(newpage);
+
+ radix_tree_replace_slot(pslot, newpage);
+
+ page_unfreeze_refs(page, expected_count);
+
+ __put_page(page);
+
+ spin_unlock_irq(&mapping->tree_lock);
+ return 0;
+}
+
+/*
* Copy the page to its new location
*/
-static void migrate_page_copy(struct page *newpage, struct page *page)
+void migrate_page_copy(struct page *newpage, struct page *page)
{
- copy_highpage(newpage, page);
+ if (PageHuge(page))
+ copy_huge_page(newpage, page);
+ else
+ copy_highpage(newpage, page);
if (PageError(page))
SetPageError(newpage);
@@ -431,7 +497,6 @@ static int writeout(struct address_space *mapping, struct page *page)
.nr_to_write = 1,
.range_start = 0,
.range_end = LLONG_MAX,
- .nonblocking = 1,
.for_reclaim = 1
};
int rc;
@@ -724,6 +789,92 @@ move_newpage:
}
/*
+ * Counterpart of unmap_and_move_page() for hugepage migration.
+ *
+ * This function doesn't wait the completion of hugepage I/O
+ * because there is no race between I/O and migration for hugepage.
+ * Note that currently hugepage I/O occurs only in direct I/O
+ * where no lock is held and PG_writeback is irrelevant,
+ * and writeback status of all subpages are counted in the reference
+ * count of the head page (i.e. if all subpages of a 2MB hugepage are
+ * under direct I/O, the reference of the head page is 512 and a bit more.)
+ * This means that when we try to migrate hugepage whose subpages are
+ * doing direct I/O, some references remain after try_to_unmap() and
+ * hugepage migration fails without data corruption.
+ *
+ * There is also no race when direct I/O is issued on the page under migration,
+ * because then pte is replaced with migration swap entry and direct I/O code
+ * will wait in the page fault for migration to complete.
+ */
+static int unmap_and_move_huge_page(new_page_t get_new_page,
+ unsigned long private, struct page *hpage,
+ int force, int offlining)
+{
+ int rc = 0;
+ int *result = NULL;
+ struct page *new_hpage = get_new_page(hpage, private, &result);
+ int rcu_locked = 0;
+ struct anon_vma *anon_vma = NULL;
+
+ if (!new_hpage)
+ return -ENOMEM;
+
+ rc = -EAGAIN;
+
+ if (!trylock_page(hpage)) {
+ if (!force)
+ goto out;
+ lock_page(hpage);
+ }
+
+ if (PageAnon(hpage)) {
+ rcu_read_lock();
+ rcu_locked = 1;
+
+ if (page_mapped(hpage)) {
+ anon_vma = page_anon_vma(hpage);
+ atomic_inc(&anon_vma->external_refcount);
+ }
+ }
+
+ try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+
+ if (!page_mapped(hpage))
+ rc = move_to_new_page(new_hpage, hpage, 1);
+
+ if (rc)
+ remove_migration_ptes(hpage, hpage);
+
+ if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount,
+ &anon_vma->lock)) {
+ int empty = list_empty(&anon_vma->head);
+ spin_unlock(&anon_vma->lock);
+ if (empty)
+ anon_vma_free(anon_vma);
+ }
+
+ if (rcu_locked)
+ rcu_read_unlock();
+out:
+ unlock_page(hpage);
+
+ if (rc != -EAGAIN) {
+ list_del(&hpage->lru);
+ put_page(hpage);
+ }
+
+ put_page(new_hpage);
+
+ if (result) {
+ if (rc)
+ *result = rc;
+ else
+ *result = page_to_nid(new_hpage);
+ }
+ return rc;
+}
+
+/*
* migrate_pages
*
* The function takes one list of pages to migrate and a function
@@ -732,8 +883,9 @@ move_newpage:
*
* The function returns after 10 attempts or if no pages
* are movable anymore because to has become empty
- * or no retryable pages exist anymore. All pages will be
- * returned to the LRU or freed.
+ * or no retryable pages exist anymore.
+ * Caller should call putback_lru_pages to return pages to the LRU
+ * or free list.
*
* Return: Number of pages not migrated or error code.
*/
@@ -780,7 +932,51 @@ out:
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
- putback_lru_pages(from);
+ if (rc)
+ return rc;
+
+ return nr_failed + retry;
+}
+
+int migrate_huge_pages(struct list_head *from,
+ new_page_t get_new_page, unsigned long private, int offlining)
+{
+ int retry = 1;
+ int nr_failed = 0;
+ int pass = 0;
+ struct page *page;
+ struct page *page2;
+ int rc;
+
+ for (pass = 0; pass < 10 && retry; pass++) {
+ retry = 0;
+
+ list_for_each_entry_safe(page, page2, from, lru) {
+ cond_resched();
+
+ rc = unmap_and_move_huge_page(get_new_page,
+ private, page, pass > 2, offlining);
+
+ switch(rc) {
+ case -ENOMEM:
+ goto out;
+ case -EAGAIN:
+ retry++;
+ break;
+ case 0:
+ break;
+ default:
+ /* Permanent failure */
+ nr_failed++;
+ break;
+ }
+ }
+ }
+ rc = 0;
+out:
+
+ list_for_each_entry_safe(page, page2, from, lru)
+ put_page(page);
if (rc)
return rc;
@@ -841,7 +1037,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
err = -EFAULT;
vma = find_vma(mm, pp->addr);
- if (!vma || !vma_migratable(vma))
+ if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
goto set_status;
page = follow_page(vma, pp->addr, FOLL_GET);
@@ -890,9 +1086,12 @@ set_status:
}
err = 0;
- if (!list_empty(&pagelist))
+ if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node,
(unsigned long)pm, 0);
+ if (err)
+ putback_lru_pages(&pagelist);
+ }
up_read(&mm->mmap_sem);
return err;
@@ -1005,7 +1204,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
int err = -EFAULT;
vma = find_vma(mm, addr);
- if (!vma)
+ if (!vma || addr < vma->vm_start)
goto set_status;
page = follow_page(vma, addr, 0);
diff --git a/mm/mremap.c b/mm/mremap.c
index cde56ee51ef7..563fbdd6293a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -101,7 +101,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
* pte locks because exclusive mmap_sem prevents deadlock.
*/
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
- new_pte = pte_offset_map_nested(new_pmd, new_addr);
+ new_pte = pte_offset_map(new_pmd, new_addr);
new_ptl = pte_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
arch_leave_lazy_mmu_mode();
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
- pte_unmap_nested(new_pte - 1);
+ pte_unmap(new_pte - 1);
pte_unmap_unlock(old_pte - 1, old_ptl);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
diff --git a/mm/nommu.c b/mm/nommu.c
index 88ff091eb07a..30b5c20eec15 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -293,11 +293,58 @@ void *vmalloc(unsigned long size)
}
EXPORT_SYMBOL(vmalloc);
+/*
+ * vzalloc - allocate virtually continguos memory with zero fill
+ *
+ * @size: allocation size
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into continguos kernel virtual space.
+ * The memory allocated is set to zero.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+void *vzalloc(unsigned long size)
+{
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
+}
+EXPORT_SYMBOL(vzalloc);
+
+/**
+ * vmalloc_node - allocate memory on a specific node
+ * @size: allocation size
+ * @node: numa node
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
void *vmalloc_node(unsigned long size, int node)
{
return vmalloc(size);
}
-EXPORT_SYMBOL(vmalloc_node);
+
+/**
+ * vzalloc_node - allocate memory on a specific node with zero fill
+ * @size: allocation size
+ * @node: numa node
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ * The memory allocated is set to zero.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+void *vzalloc_node(unsigned long size, int node)
+{
+ return vzalloc(size);
+}
+EXPORT_SYMBOL(vzalloc_node);
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 4029583a1024..7dcca55ede7c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -162,10 +162,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
return 0;
/*
- * Shortcut check for OOM_SCORE_ADJ_MIN so the entire heuristic doesn't
- * need to be executed for something that cannot be killed.
+ * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
+ * so the entire heuristic doesn't need to be executed for something
+ * that cannot be killed.
*/
- if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+ if (atomic_read(&p->mm->oom_disable_count)) {
task_unlock(p);
return 0;
}
@@ -403,16 +404,40 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
#define K(x) ((x) << (PAGE_SHIFT-10))
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
{
+ struct task_struct *q;
+ struct mm_struct *mm;
+
p = find_lock_task_mm(p);
if (!p)
return 1;
+ /* mm cannot be safely dereferenced after task_unlock(p) */
+ mm = p->mm;
+
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
task_pid_nr(p), p->comm, K(p->mm->total_vm),
K(get_mm_counter(p->mm, MM_ANONPAGES)),
K(get_mm_counter(p->mm, MM_FILEPAGES)));
task_unlock(p);
+ /*
+ * Kill all processes sharing p->mm in other thread groups, if any.
+ * They don't get access to memory reserves or a higher scheduler
+ * priority, though, to avoid depletion of all memory or task
+ * starvation. This prevents mm->mmap_sem livelock when an oom killed
+ * task cannot exit because it requires the semaphore and its contended
+ * by another thread trying to allocate memory itself. That thread will
+ * now get access to memory reserves since it has a pending fatal
+ * signal.
+ */
+ for_each_process(q)
+ if (q->mm == mm && !same_thread_group(q, p)) {
+ task_lock(q); /* Protect ->comm from prctl() */
+ pr_err("Kill process %d (%s) sharing same memory\n",
+ task_pid_nr(q), q->comm);
+ task_unlock(q);
+ force_sig(SIGKILL, q);
+ }
set_tsk_thread_flag(p, TIF_MEMDIE);
force_sig(SIGKILL, p);
@@ -680,7 +705,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
!oom_unkillable_task(current, NULL, nodemask) &&
- (current->signal->oom_adj != OOM_DISABLE)) {
+ current->mm && !atomic_read(&current->mm->oom_disable_count)) {
/*
* oom_kill_process() needs tasklist_lock held. If it returns
* non-zero, current could not be killed so we must fallback to
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index e3bccac1f025..b840afa89761 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -415,14 +415,8 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
if (vm_dirty_bytes)
dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
- else {
- int dirty_ratio;
-
- dirty_ratio = vm_dirty_ratio;
- if (dirty_ratio < 5)
- dirty_ratio = 5;
- dirty = (dirty_ratio * available_memory) / 100;
- }
+ else
+ dirty = (vm_dirty_ratio * available_memory) / 100;
if (dirty_background_bytes)
background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
@@ -510,7 +504,7 @@ static void balance_dirty_pages(struct address_space *mapping,
* catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up.
*/
- if (nr_reclaimable + nr_writeback <
+ if (nr_reclaimable + nr_writeback <=
(background_thresh + dirty_thresh) / 2)
break;
@@ -542,8 +536,8 @@ static void balance_dirty_pages(struct address_space *mapping,
* the last resort safeguard.
*/
dirty_exceeded =
- (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh)
- || (nr_reclaimable + nr_writeback >= dirty_thresh);
+ (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
+ || (nr_reclaimable + nr_writeback > dirty_thresh);
if (!dirty_exceeded)
break;
@@ -1121,6 +1115,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
{
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
+ __inc_zone_page_state(page, NR_DIRTIED);
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
@@ -1129,6 +1124,18 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
EXPORT_SYMBOL(account_page_dirtied);
/*
+ * Helper function for set_page_writeback family.
+ * NOTE: Unlike account_page_dirtied this does not rely on being atomic
+ * wrt interrupts.
+ */
+void account_page_writeback(struct page *page)
+{
+ inc_zone_page_state(page, NR_WRITEBACK);
+ inc_zone_page_state(page, NR_WRITTEN);
+}
+EXPORT_SYMBOL(account_page_writeback);
+
+/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
@@ -1366,7 +1373,7 @@ int test_set_page_writeback(struct page *page)
ret = TestSetPageWriteback(page);
}
if (!ret)
- inc_zone_page_state(page, NR_WRITEBACK);
+ account_page_writeback(page);
return ret;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2a362c52fdf4..07a654486f75 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -531,7 +531,7 @@ static inline void __free_one_page(struct page *page,
* so it's less likely to be used soon and more likely to be merged
* as a higher order page
*/
- if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
+ if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
struct page *higher_page, *higher_buddy;
combined_idx = __find_combined_index(page_idx, order);
higher_page = page + combined_idx - page_idx;
@@ -1907,7 +1907,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
preferred_zone, migratetype);
if (!page && gfp_mask & __GFP_NOFAIL)
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
} while (!page && (gfp_mask & __GFP_NOFAIL));
return page;
@@ -1932,7 +1932,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
const gfp_t wait = gfp_mask & __GFP_WAIT;
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
- BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
+ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
/*
* The caller may dip into page reserves a bit more if the caller
@@ -1940,7 +1940,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
* set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
*/
- alloc_flags |= (gfp_mask & __GFP_HIGH);
+ alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
if (!wait) {
alloc_flags |= ALLOC_HARDER;
@@ -2095,7 +2095,7 @@ rebalance:
pages_reclaimed += did_some_progress;
if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
/* Wait for some write requests to complete then retry */
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
goto rebalance;
}
@@ -5297,12 +5297,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
* page allocater never alloc memory from ISOLATE block.
*/
+static int
+__count_immobile_pages(struct zone *zone, struct page *page, int count)
+{
+ unsigned long pfn, iter, found;
+ /*
+ * For avoiding noise data, lru_add_drain_all() should be called
+ * If ZONE_MOVABLE, the zone never contains immobile pages
+ */
+ if (zone_idx(zone) == ZONE_MOVABLE)
+ return true;
+
+ if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+ return true;
+
+ pfn = page_to_pfn(page);
+ for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+ unsigned long check = pfn + iter;
+
+ if (!pfn_valid_within(check)) {
+ iter++;
+ continue;
+ }
+ page = pfn_to_page(check);
+ if (!page_count(page)) {
+ if (PageBuddy(page))
+ iter += (1 << page_order(page)) - 1;
+ continue;
+ }
+ if (!PageLRU(page))
+ found++;
+ /*
+ * If there are RECLAIMABLE pages, we need to check it.
+ * But now, memory offline itself doesn't call shrink_slab()
+ * and it still to be fixed.
+ */
+ /*
+ * If the page is not RAM, page_count()should be 0.
+ * we don't need more check. This is an _used_ not-movable page.
+ *
+ * The problematic thing here is PG_reserved pages. PG_reserved
+ * is set to both of a memory hole page and a _used_ kernel
+ * page at boot.
+ */
+ if (found > count)
+ return false;
+ }
+ return true;
+}
+
+bool is_pageblock_removable_nolock(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ return __count_immobile_pages(zone, page, 0);
+}
+
int set_migratetype_isolate(struct page *page)
{
struct zone *zone;
- struct page *curr_page;
- unsigned long flags, pfn, iter;
- unsigned long immobile = 0;
+ unsigned long flags, pfn;
struct memory_isolate_notify arg;
int notifier_ret;
int ret = -EBUSY;
@@ -5312,11 +5365,6 @@ int set_migratetype_isolate(struct page *page)
zone_idx = zone_idx(zone);
spin_lock_irqsave(&zone->lock, flags);
- if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
- zone_idx == ZONE_MOVABLE) {
- ret = 0;
- goto out;
- }
pfn = page_to_pfn(page);
arg.start_pfn = pfn;
@@ -5336,23 +5384,20 @@ int set_migratetype_isolate(struct page *page)
*/
notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
notifier_ret = notifier_to_errno(notifier_ret);
- if (notifier_ret || !arg.pages_found)
+ if (notifier_ret)
goto out;
-
- for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
- if (!pfn_valid_within(pfn))
- continue;
-
- curr_page = pfn_to_page(iter);
- if (!page_count(curr_page) || PageLRU(curr_page))
- continue;
-
- immobile++;
- }
-
- if (arg.pages_found == immobile)
+ /*
+ * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
+ * We just check MOVABLE pages.
+ */
+ if (__count_immobile_pages(zone, page, arg.pages_found))
ret = 0;
+ /*
+ * immobile means "not-on-lru" paes. If immobile is larger than
+ * removable-by-driver pages reported by notifier, we'll fail.
+ */
+
out:
if (!ret) {
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 5e0ffd967452..4ae42bb40892 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -86,7 +86,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
* all pages in [start_pfn...end_pfn) must be in the same zone.
* zone->lock must be held before call this.
*
- * Returns 0 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range is isolated.
*/
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
@@ -119,7 +119,6 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
struct zone *zone;
int ret;
- pfn = start_pfn;
/*
* Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
* is not aligned to pageblock_nr_pages.
diff --git a/mm/rmap.c b/mm/rmap.c
index 5f17fad1bee8..1a8bf76bfd03 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -80,7 +80,7 @@ static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
}
-void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
+static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
{
kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
}
@@ -314,7 +314,7 @@ void __init anon_vma_init(void)
* Getting a lock on a stable anon_vma from a page off the LRU is
* tricky: page_lock_anon_vma rely on RCU to guard against the races.
*/
-struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *__page_lock_anon_vma(struct page *page)
{
struct anon_vma *anon_vma, *root_anon_vma;
unsigned long anon_mapping;
@@ -348,6 +348,8 @@ out:
}
void page_unlock_anon_vma(struct anon_vma *anon_vma)
+ __releases(&anon_vma->root->lock)
+ __releases(RCU)
{
anon_vma_unlock(anon_vma);
rcu_read_unlock();
@@ -407,7 +409,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
*
* On success returns with pte mapped and locked.
*/
-pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
unsigned long address, spinlock_t **ptlp, int sync)
{
pgd_t *pgd;
@@ -780,10 +782,10 @@ void page_move_anon_rmap(struct page *page,
}
/**
- * __page_set_anon_rmap - setup new anonymous rmap
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
+ * __page_set_anon_rmap - set up new anonymous rmap
+ * @page: Page to add to rmap
+ * @vma: VM area to add page to.
+ * @address: User virtual address of the mapping
* @exclusive: the page is exclusively owned by the current process
*/
static void __page_set_anon_rmap(struct page *page,
@@ -793,25 +795,16 @@ static void __page_set_anon_rmap(struct page *page,
BUG_ON(!anon_vma);
+ if (PageAnon(page))
+ return;
+
/*
* If the page isn't exclusively mapped into this vma,
* we must use the _oldest_ possible anon_vma for the
* page mapping!
*/
- if (!exclusive) {
- if (PageAnon(page))
- return;
+ if (!exclusive)
anon_vma = anon_vma->root;
- } else {
- /*
- * In this case, swapped-out-but-not-discarded swap-cache
- * is remapped. So, no need to update page->mapping here.
- * We convice anon_vma poitned by page->mapping is not obsolete
- * because vma->anon_vma is necessary to be a family of it.
- */
- if (PageAnon(page))
- return;
- }
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
diff --git a/mm/shmem.c b/mm/shmem.c
index 080b09a57a8f..f6d350e8adc5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1586,6 +1586,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
@@ -1903,7 +1904,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
inc_nlink(inode);
- atomic_inc(&inode->i_count); /* New dentry reference */
+ ihold(inode); /* New dentry reference */
dget(dentry); /* Extra pinning count for the created dentry */
d_instantiate(dentry, inode);
out:
@@ -2146,7 +2147,7 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
if (*len < 3)
return 255;
- if (hlist_unhashed(&inode->i_hash)) {
+ if (inode_unhashed(inode)) {
/* Unfortunately insert_inode_hash is not idempotent,
* so as we hash inodes here rather than at creation
* time, we need a lock to ensure we only try
@@ -2154,7 +2155,7 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
*/
static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
- if (hlist_unhashed(&inode->i_hash))
+ if (inode_unhashed(inode))
__insert_inode_hash(inode,
inode->i_ino + inode->i_generation);
spin_unlock(&lock);
diff --git a/mm/slab.c b/mm/slab.c
index fcae9815d3b3..b1e40dafbab3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -901,7 +901,7 @@ static int transfer_objects(struct array_cache *to,
struct array_cache *from, unsigned int max)
{
/* Figure out how many entries to transfer */
- int nr = min(min(from->avail, max), to->limit - to->avail);
+ int nr = min3(from->avail, max, to->limit - to->avail);
if (!nr)
return 0;
diff --git a/mm/swap.c b/mm/swap.c
index 3ce7bc373a52..3f4854205b16 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -378,6 +378,7 @@ void release_pages(struct page **pages, int nr, int cold)
pagevec_free(&pages_to_free);
}
+EXPORT_SYMBOL(release_pages);
/*
* The pages which we're about to release may be in the deferred lru-addition
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9fc7bac7db0c..67ddaaf98c74 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -30,6 +30,7 @@
#include <linux/capability.h>
#include <linux/syscalls.h>
#include <linux/memcontrol.h>
+#include <linux/poll.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -58,6 +59,10 @@ static struct swap_info_struct *swap_info[MAX_SWAPFILES];
static DEFINE_MUTEX(swapon_mutex);
+static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
+/* Activity counter to indicate that a swapon or swapoff has occurred */
+static atomic_t proc_poll_event = ATOMIC_INIT(0);
+
static inline unsigned char swap_count(unsigned char ent)
{
return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
@@ -1680,6 +1685,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
}
filp_close(swap_file, NULL);
err = 0;
+ atomic_inc(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
out_dput:
filp_close(victim, NULL);
@@ -1688,6 +1695,25 @@ out:
}
#ifdef CONFIG_PROC_FS
+struct proc_swaps {
+ struct seq_file seq;
+ int event;
+};
+
+static unsigned swaps_poll(struct file *file, poll_table *wait)
+{
+ struct proc_swaps *s = file->private_data;
+
+ poll_wait(file, &proc_poll_wait, wait);
+
+ if (s->event != atomic_read(&proc_poll_event)) {
+ s->event = atomic_read(&proc_poll_event);
+ return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+ }
+
+ return POLLIN | POLLRDNORM;
+}
+
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
@@ -1771,7 +1797,24 @@ static const struct seq_operations swaps_op = {
static int swaps_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &swaps_op);
+ struct proc_swaps *s;
+ int ret;
+
+ s = kmalloc(sizeof(struct proc_swaps), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ file->private_data = s;
+
+ ret = seq_open(file, &swaps_op);
+ if (ret) {
+ kfree(s);
+ return ret;
+ }
+
+ s->seq.private = s;
+ s->event = atomic_read(&proc_poll_event);
+ return ret;
}
static const struct file_operations proc_swaps_operations = {
@@ -1779,6 +1822,7 @@ static const struct file_operations proc_swaps_operations = {
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
+ .poll = swaps_poll,
};
static int __init procswaps_init(void)
@@ -2084,6 +2128,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
swap_info[prev]->next = type;
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
+ atomic_inc(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+
error = 0;
goto out;
bad_swap:
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 9f909622a25e..a3d66b3dc5cb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -293,13 +293,13 @@ static void __insert_vmap_area(struct vmap_area *va)
struct rb_node *tmp;
while (*p) {
- struct vmap_area *tmp;
+ struct vmap_area *tmp_va;
parent = *p;
- tmp = rb_entry(parent, struct vmap_area, rb_node);
- if (va->va_start < tmp->va_end)
+ tmp_va = rb_entry(parent, struct vmap_area, rb_node);
+ if (va->va_start < tmp_va->va_end)
p = &(*p)->rb_left;
- else if (va->va_end > tmp->va_start)
+ else if (va->va_end > tmp_va->va_start)
p = &(*p)->rb_right;
else
BUG();
@@ -1596,6 +1596,13 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
}
EXPORT_SYMBOL(__vmalloc);
+static inline void *__vmalloc_node_flags(unsigned long size,
+ int node, gfp_t flags)
+{
+ return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
+ node, __builtin_return_address(0));
+}
+
/**
* vmalloc - allocate virtually contiguous memory
* @size: allocation size
@@ -1607,12 +1614,28 @@ EXPORT_SYMBOL(__vmalloc);
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
- -1, __builtin_return_address(0));
+ return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
}
EXPORT_SYMBOL(vmalloc);
/**
+ * vzalloc - allocate virtually contiguous memory with zero fill
+ * @size: allocation size
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ * The memory allocated is set to zero.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+void *vzalloc(unsigned long size)
+{
+ return __vmalloc_node_flags(size, -1,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+}
+EXPORT_SYMBOL(vzalloc);
+
+/**
* vmalloc_user - allocate zeroed virtually contiguous memory for userspace
* @size: allocation size
*
@@ -1653,6 +1676,25 @@ void *vmalloc_node(unsigned long size, int node)
}
EXPORT_SYMBOL(vmalloc_node);
+/**
+ * vzalloc_node - allocate memory on a specific node with zero fill
+ * @size: allocation size
+ * @node: numa node
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ * The memory allocated is set to zero.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc_node() instead.
+ */
+void *vzalloc_node(unsigned long size, int node)
+{
+ return __vmalloc_node_flags(size, node,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+}
+EXPORT_SYMBOL(vzalloc_node);
+
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif
@@ -2350,6 +2392,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
+ __acquires(&vmlist_lock)
{
loff_t n = *pos;
struct vm_struct *v;
@@ -2376,6 +2419,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
}
static void s_stop(struct seq_file *m, void *p)
+ __releases(&vmlist_lock)
{
read_unlock(&vmlist_lock);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b94c9464f262..b8a6fdc21312 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -51,6 +51,12 @@
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
+enum lumpy_mode {
+ LUMPY_MODE_NONE,
+ LUMPY_MODE_ASYNC,
+ LUMPY_MODE_SYNC,
+};
+
struct scan_control {
/* Incremented by the number of inactive pages that were scanned */
unsigned long nr_scanned;
@@ -82,7 +88,7 @@ struct scan_control {
* Intend to reclaim enough continuous memory rather than reclaim
* enough amount of memory. i.e, mode for high order allocation.
*/
- bool lumpy_reclaim_mode;
+ enum lumpy_mode lumpy_reclaim_mode;
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
@@ -265,6 +271,36 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
return ret;
}
+static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
+ bool sync)
+{
+ enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
+
+ /*
+ * Some reclaim have alredy been failed. No worth to try synchronous
+ * lumpy reclaim.
+ */
+ if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
+ return;
+
+ /*
+ * If we need a large contiguous chunk of memory, or have
+ * trouble getting a small set of contiguous pages, we
+ * will reclaim both active and inactive pages.
+ */
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+ sc->lumpy_reclaim_mode = mode;
+ else if (sc->order && priority < DEF_PRIORITY - 2)
+ sc->lumpy_reclaim_mode = mode;
+ else
+ sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+}
+
+static void disable_lumpy_reclaim_mode(struct scan_control *sc)
+{
+ sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+}
+
static inline int is_page_cache_freeable(struct page *page)
{
/*
@@ -275,7 +311,8 @@ static inline int is_page_cache_freeable(struct page *page)
return page_count(page) - page_has_private(page) == 2;
}
-static int may_write_to_queue(struct backing_dev_info *bdi)
+static int may_write_to_queue(struct backing_dev_info *bdi,
+ struct scan_control *sc)
{
if (current->flags & PF_SWAPWRITE)
return 1;
@@ -283,6 +320,10 @@ static int may_write_to_queue(struct backing_dev_info *bdi)
return 1;
if (bdi == current->backing_dev_info)
return 1;
+
+ /* lumpy reclaim for hugepage often need a lot of write */
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+ return 1;
return 0;
}
@@ -307,12 +348,6 @@ static void handle_write_error(struct address_space *mapping,
unlock_page(page);
}
-/* Request for sync pageout. */
-enum pageout_io {
- PAGEOUT_IO_ASYNC,
- PAGEOUT_IO_SYNC,
-};
-
/* possible outcome of pageout() */
typedef enum {
/* failed to write page out, page is locked */
@@ -330,7 +365,7 @@ typedef enum {
* Calls ->writepage().
*/
static pageout_t pageout(struct page *page, struct address_space *mapping,
- enum pageout_io sync_writeback)
+ struct scan_control *sc)
{
/*
* If the page is dirty, only perform writeback if that write
@@ -366,7 +401,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
}
if (mapping->a_ops->writepage == NULL)
return PAGE_ACTIVATE;
- if (!may_write_to_queue(mapping->backing_dev_info))
+ if (!may_write_to_queue(mapping->backing_dev_info, sc))
return PAGE_KEEP;
if (clear_page_dirty_for_io(page)) {
@@ -376,7 +411,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
.nr_to_write = SWAP_CLUSTER_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
- .nonblocking = 1,
.for_reclaim = 1,
};
@@ -394,7 +428,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* direct reclaiming a large contiguous area and the
* first attempt to free a range of pages fails.
*/
- if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+ if (PageWriteback(page) &&
+ sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
wait_on_page_writeback(page);
if (!PageWriteback(page)) {
@@ -402,7 +437,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
ClearPageReclaim(page);
}
trace_mm_vmscan_writepage(page,
- trace_reclaim_flags(page, sync_writeback));
+ trace_reclaim_flags(page, sc->lumpy_reclaim_mode));
inc_zone_page_state(page, NR_VMSCAN_WRITE);
return PAGE_SUCCESS;
}
@@ -580,7 +615,7 @@ static enum page_references page_check_references(struct page *page,
referenced_page = TestClearPageReferenced(page);
/* Lumpy reclaim - ignore references */
- if (sc->lumpy_reclaim_mode)
+ if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
return PAGEREF_RECLAIM;
/*
@@ -616,7 +651,7 @@ static enum page_references page_check_references(struct page *page,
}
/* Reclaim if clean, defer dirty pages to writeback */
- if (referenced_page)
+ if (referenced_page && !PageSwapBacked(page))
return PAGEREF_RECLAIM_CLEAN;
return PAGEREF_RECLAIM;
@@ -644,12 +679,14 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
* shrink_page_list() returns the number of reclaimed pages
*/
static unsigned long shrink_page_list(struct list_head *page_list,
- struct scan_control *sc,
- enum pageout_io sync_writeback)
+ struct zone *zone,
+ struct scan_control *sc)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
int pgactivate = 0;
+ unsigned long nr_dirty = 0;
+ unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0;
cond_resched();
@@ -669,6 +706,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep;
VM_BUG_ON(PageActive(page));
+ VM_BUG_ON(page_zone(page) != zone);
sc->nr_scanned++;
@@ -694,10 +732,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* for any page for which writeback has already
* started.
*/
- if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
+ if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
+ may_enter_fs)
wait_on_page_writeback(page);
- else
- goto keep_locked;
+ else {
+ unlock_page(page);
+ goto keep_lumpy;
+ }
}
references = page_check_references(page, sc);
@@ -743,6 +784,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
if (PageDirty(page)) {
+ nr_dirty++;
+
if (references == PAGEREF_RECLAIM_CLEAN)
goto keep_locked;
if (!may_enter_fs)
@@ -751,14 +794,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep_locked;
/* Page is dirty, try to write it out here */
- switch (pageout(page, mapping, sync_writeback)) {
+ switch (pageout(page, mapping, sc)) {
case PAGE_KEEP:
+ nr_congested++;
goto keep_locked;
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
- if (PageWriteback(page) || PageDirty(page))
+ if (PageWriteback(page))
+ goto keep_lumpy;
+ if (PageDirty(page))
goto keep;
+
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
@@ -841,6 +888,7 @@ cull_mlocked:
try_to_free_swap(page);
unlock_page(page);
putback_lru_page(page);
+ disable_lumpy_reclaim_mode(sc);
continue;
activate_locked:
@@ -853,10 +901,21 @@ activate_locked:
keep_locked:
unlock_page(page);
keep:
+ disable_lumpy_reclaim_mode(sc);
+keep_lumpy:
list_add(&page->lru, &ret_pages);
VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
}
+ /*
+ * Tag a zone as congested if all the dirty pages encountered were
+ * backed by a congested BDI. In this case, reclaimers should just
+ * back off and wait for congestion to clear because further reclaim
+ * will encounter the same problem
+ */
+ if (nr_dirty == nr_congested)
+ zone_set_flag(zone, ZONE_CONGESTED);
+
free_page_list(&free_pages);
list_splice(&ret_pages, page_list);
@@ -1006,7 +1065,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
/* Check that we have not crossed a zone boundary. */
if (unlikely(page_zone_id(cursor_page) != zone_id))
- continue;
+ break;
/*
* If we don't have enough swap space, reclaiming of
@@ -1014,8 +1073,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* pointless.
*/
if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
- !PageSwapCache(cursor_page))
- continue;
+ !PageSwapCache(cursor_page))
+ break;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
@@ -1026,11 +1085,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
nr_lumpy_dirty++;
scan++;
} else {
- if (mode == ISOLATE_BOTH &&
- page_count(cursor_page))
- nr_lumpy_failed++;
+ /* the page is freed already. */
+ if (!page_count(cursor_page))
+ continue;
+ break;
}
}
+
+ /* If we break out of the loop above, lumpy reclaim failed */
+ if (pfn < end_pfn)
+ nr_lumpy_failed++;
}
*scanned = scan;
@@ -1253,7 +1317,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
return false;
/* Only stall on lumpy reclaim */
- if (!sc->lumpy_reclaim_mode)
+ if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
return false;
/* If we have relaimed everything on the isolated list, no stall */
@@ -1286,7 +1350,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
- unsigned long nr_active;
unsigned long nr_anon;
unsigned long nr_file;
@@ -1298,15 +1361,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
return SWAP_CLUSTER_MAX;
}
-
+ set_lumpy_reclaim_mode(priority, sc, false);
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_to_scan,
&page_list, &nr_scanned, sc->order,
- sc->lumpy_reclaim_mode ?
- ISOLATE_BOTH : ISOLATE_INACTIVE,
+ sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
+ ISOLATE_INACTIVE : ISOLATE_BOTH,
zone, 0, file);
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
@@ -1318,8 +1381,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
} else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
&page_list, &nr_scanned, sc->order,
- sc->lumpy_reclaim_mode ?
- ISOLATE_BOTH : ISOLATE_INACTIVE,
+ sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
+ ISOLATE_INACTIVE : ISOLATE_BOTH,
zone, sc->mem_cgroup,
0, file);
/*
@@ -1337,20 +1400,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
spin_unlock_irq(&zone->lru_lock);
- nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+ nr_reclaimed = shrink_page_list(&page_list, zone, sc);
/* Check if we should syncronously wait for writeback */
if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
- congestion_wait(BLK_RW_ASYNC, HZ/10);
-
- /*
- * The attempt at page out may have made some
- * of the pages active, mark them inactive again.
- */
- nr_active = clear_active_flags(&page_list, NULL);
- count_vm_events(PGDEACTIVATE, nr_active);
-
- nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC);
+ set_lumpy_reclaim_mode(priority, sc, true);
+ nr_reclaimed += shrink_page_list(&page_list, zone, sc);
}
local_irq_disable();
@@ -1359,6 +1414,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
+
+ trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
+ zone_idx(zone),
+ nr_scanned, nr_reclaimed,
+ priority,
+ trace_shrink_flags(file, sc->lumpy_reclaim_mode));
return nr_reclaimed;
}
@@ -1506,6 +1567,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
spin_unlock_irq(&zone->lru_lock);
}
+#ifdef CONFIG_SWAP
static int inactive_anon_is_low_global(struct zone *zone)
{
unsigned long active, inactive;
@@ -1531,12 +1593,26 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
{
int low;
+ /*
+ * If we don't have swap space, anonymous page deactivation
+ * is pointless.
+ */
+ if (!total_swap_pages)
+ return 0;
+
if (scanning_global_lru(sc))
low = inactive_anon_is_low_global(zone);
else
low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
return low;
}
+#else
+static inline int inactive_anon_is_low(struct zone *zone,
+ struct scan_control *sc)
+{
+ return 0;
+}
+#endif
static int inactive_file_is_low_global(struct zone *zone)
{
@@ -1721,21 +1797,6 @@ out:
}
}
-static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
-{
- /*
- * If we need a large contiguous chunk of memory, or have
- * trouble getting a small set of contiguous pages, we
- * will reclaim both active and inactive pages.
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- sc->lumpy_reclaim_mode = 1;
- else if (sc->order && priority < DEF_PRIORITY - 2)
- sc->lumpy_reclaim_mode = 1;
- else
- sc->lumpy_reclaim_mode = 0;
-}
-
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
@@ -1750,8 +1811,6 @@ static void shrink_zone(int priority, struct zone *zone,
get_scan_count(zone, sc, nr, priority);
- set_lumpy_reclaim_mode(priority, sc);
-
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(l) {
@@ -1782,7 +1841,7 @@ static void shrink_zone(int priority, struct zone *zone,
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
- if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
+ if (inactive_anon_is_low(zone, sc))
shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
throttle_vm_writeout(sc->gfp_mask);
@@ -1937,21 +1996,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
/* Take a nap, wait for some writeback to complete */
if (!sc->hibernation_mode && sc->nr_scanned &&
- priority < DEF_PRIORITY - 2)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ priority < DEF_PRIORITY - 2) {
+ struct zone *preferred_zone;
+
+ first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
+ NULL, &preferred_zone);
+ wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
+ }
}
out:
- /*
- * Now that we've scanned all the zones at this priority level, note
- * that level within the zone so that the next thread which performs
- * scanning of this zone will immediately start out at this priority
- * level. This affects only the decision whether or not to bring
- * mapped pages onto the inactive list.
- */
- if (priority < 0)
- priority = 0;
-
delayacct_freepages_end();
put_mems_allowed();
@@ -2247,6 +2301,15 @@ loop_again:
if (!zone_watermark_ok(zone, order,
min_wmark_pages(zone), end_zone, 0))
has_under_min_watermark_zone = 1;
+ } else {
+ /*
+ * If a zone reaches its high watermark,
+ * consider it to be no longer congested. It's
+ * possible there are dirty pages backed by
+ * congested BDIs but as pressure is relieved,
+ * spectulatively avoid congestion waits
+ */
+ zone_clear_flag(zone, ZONE_CONGESTED);
}
}
@@ -2987,6 +3050,7 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
return 0;
}
+#ifdef CONFIG_NUMA
/*
* per node 'scan_unevictable_pages' attribute. On demand re-scan of
* a specified node's per zone unevictable lists for evictable pages.
@@ -3033,4 +3097,4 @@ void scan_unevictable_unregister_node(struct node *node)
{
sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
}
-
+#endif
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 355a9e669aaa..cd2e42be7b68 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -17,6 +17,8 @@
#include <linux/vmstat.h>
#include <linux/sched.h>
#include <linux/math64.h>
+#include <linux/writeback.h>
+#include <linux/compaction.h>
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -394,6 +396,7 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z)
#endif
#ifdef CONFIG_COMPACTION
+
struct contig_page_info {
unsigned long free_pages;
unsigned long free_blocks_total;
@@ -745,6 +748,11 @@ static const char * const vmstat_text[] = {
"nr_isolated_anon",
"nr_isolated_file",
"nr_shmem",
+ "nr_dirtied",
+ "nr_written",
+ "nr_dirty_threshold",
+ "nr_dirty_background_threshold",
+
#ifdef CONFIG_NUMA
"numa_hit",
"numa_miss",
@@ -904,36 +912,44 @@ static const struct file_operations proc_zoneinfo_file_operations = {
.release = seq_release,
};
+enum writeback_stat_item {
+ NR_DIRTY_THRESHOLD,
+ NR_DIRTY_BG_THRESHOLD,
+ NR_VM_WRITEBACK_STAT_ITEMS,
+};
+
static void *vmstat_start(struct seq_file *m, loff_t *pos)
{
unsigned long *v;
-#ifdef CONFIG_VM_EVENT_COUNTERS
- unsigned long *e;
-#endif
- int i;
+ int i, stat_items_size;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
+ stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
+ NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
#ifdef CONFIG_VM_EVENT_COUNTERS
- v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
- + sizeof(struct vm_event_state), GFP_KERNEL);
-#else
- v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
- GFP_KERNEL);
+ stat_items_size += sizeof(struct vm_event_state);
#endif
+
+ v = kmalloc(stat_items_size, GFP_KERNEL);
m->private = v;
if (!v)
return ERR_PTR(-ENOMEM);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
v[i] = global_page_state(i);
+ v += NR_VM_ZONE_STAT_ITEMS;
+
+ global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
+ v + NR_DIRTY_THRESHOLD);
+ v += NR_VM_WRITEBACK_STAT_ITEMS;
+
#ifdef CONFIG_VM_EVENT_COUNTERS
- e = v + NR_VM_ZONE_STAT_ITEMS;
- all_vm_events(e);
- e[PGPGIN] /= 2; /* sectors -> kbytes */
- e[PGPGOUT] /= 2;
+ all_vm_events(v);
+ v[PGPGIN] /= 2; /* sectors -> kbytes */
+ v[PGPGOUT] /= 2;
#endif
- return v + *pos;
+ return m->private + *pos;
}
static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
diff --git a/net/802/garp.c b/net/802/garp.c
index 941f2a324d3a..c1df2dad8c6b 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -346,8 +346,8 @@ int garp_request_join(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
@@ -366,8 +366,8 @@ void garp_request_leave(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
@@ -546,11 +546,11 @@ static int garp_init_port(struct net_device *dev)
static void garp_release_port(struct net_device *dev)
{
- struct garp_port *port = dev->garp_port;
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
unsigned int i;
for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
- if (port->applicants[i])
+ if (rtnl_dereference(port->applicants[i]))
return;
}
rcu_assign_pointer(dev->garp_port, NULL);
@@ -565,7 +565,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
ASSERT_RTNL();
- if (!dev->garp_port) {
+ if (!rtnl_dereference(dev->garp_port)) {
err = garp_init_port(dev);
if (err < 0)
goto err1;
@@ -601,8 +601,8 @@ EXPORT_SYMBOL_GPL(garp_init_applicant);
void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
ASSERT_RTNL();
diff --git a/net/802/stp.c b/net/802/stp.c
index 53c8f77f0ccd..978c30b1b36b 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -21,8 +21,8 @@
#define GARP_ADDR_MAX 0x2F
#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
-static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
-static const struct stp_proto *stp_proto __read_mostly;
+static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
+static const struct stp_proto __rcu *stp_proto __read_mostly;
static struct llc_sap *sap __read_mostly;
static unsigned int sap_registered;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 05b867e43757..52077ca22072 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -112,7 +112,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
ASSERT_RTNL();
- grp = real_dev->vlgrp;
+ grp = rtnl_dereference(real_dev->vlgrp);
BUG_ON(!grp);
/* Take it out of our own structures, but be sure to interlock with
@@ -177,7 +177,7 @@ int register_vlan_dev(struct net_device *dev)
struct vlan_group *grp, *ngrp = NULL;
int err;
- grp = real_dev->vlgrp;
+ grp = rtnl_dereference(real_dev->vlgrp);
if (!grp) {
ngrp = grp = vlan_group_alloc(real_dev);
if (!grp)
@@ -385,7 +385,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
}
- grp = dev->vlgrp;
+ grp = rtnl_dereference(dev->vlgrp);
if (!grp)
goto out;
diff --git a/net/9p/client.c b/net/9p/client.c
index 83bf0541d66f..a848bca9fbff 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -450,32 +450,43 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
return err;
}
- if (type == P9_RERROR) {
+ if (type == P9_RERROR || type == P9_RLERROR) {
int ecode;
- char *ename;
- err = p9pdu_readf(req->rc, c->proto_version, "s?d",
- &ename, &ecode);
- if (err) {
- P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n",
- err);
- return err;
- }
+ if (!p9_is_proto_dotl(c)) {
+ char *ename;
- if (p9_is_proto_dotu(c) ||
- p9_is_proto_dotl(c))
- err = -ecode;
+ err = p9pdu_readf(req->rc, c->proto_version, "s?d",
+ &ename, &ecode);
+ if (err)
+ goto out_err;
+
+ if (p9_is_proto_dotu(c))
+ err = -ecode;
+
+ if (!err || !IS_ERR_VALUE(err)) {
+ err = p9_errstr2errno(ename, strlen(ename));
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
- if (!err || !IS_ERR_VALUE(err))
- err = p9_errstr2errno(ename, strlen(ename));
+ kfree(ename);
+ }
+ } else {
+ err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
+ err = -ecode;
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ }
- kfree(ename);
} else
err = 0;
return err;
+
+out_err:
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+
+ return err;
}
/**
@@ -568,11 +579,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
va_start(ap, fmt);
err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
va_end(ap);
+ if (err)
+ goto reterr;
p9pdu_finalize(req->tc);
err = c->trans_mod->request(c, req);
if (err < 0) {
- c->status = Disconnected;
+ if (err != -ERESTARTSYS)
+ c->status = Disconnected;
goto reterr;
}
@@ -1151,12 +1165,44 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname)
}
EXPORT_SYMBOL(p9_client_link);
+int p9_client_fsync(struct p9_fid *fid, int datasync)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
+ fid->fid, datasync);
+ err = 0;
+ clnt = fid->clnt;
+
+ req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
+
+ p9_free_req(clnt, req);
+
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_fsync);
+
int p9_client_clunk(struct p9_fid *fid)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
+ if (!fid) {
+ P9_EPRINTK(KERN_WARNING, "Trying to clunk with NULL fid\n");
+ dump_stack();
+ return 0;
+ }
+
P9_DPRINTK(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid);
err = 0;
clnt = fid->clnt;
@@ -1240,16 +1286,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
if (data) {
memmove(data, dataptr, count);
- }
-
- if (udata) {
+ } else {
err = copy_to_user(udata, dataptr, count);
if (err) {
err = -EFAULT;
goto free_and_error;
}
}
-
p9_free_req(clnt, req);
return count;
@@ -1761,3 +1804,96 @@ error:
}
EXPORT_SYMBOL(p9_client_mkdir_dotl);
+
+int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ err = 0;
+ clnt = fid->clnt;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d "
+ "start %lld length %lld proc_id %d client_id %s\n",
+ fid->fid, flock->type, flock->flags, flock->start,
+ flock->length, flock->proc_id, flock->client_id);
+
+ req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type,
+ flock->flags, flock->start, flock->length,
+ flock->proc_id, flock->client_id);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
+error:
+ p9_free_req(clnt, req);
+ return err;
+
+}
+EXPORT_SYMBOL(p9_client_lock_dotl);
+
+int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ err = 0;
+ clnt = fid->clnt;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld "
+ "length %lld proc_id %d client_id %s\n", fid->fid, glock->type,
+ glock->start, glock->length, glock->proc_id, glock->client_id);
+
+ req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid, glock->type,
+ glock->start, glock->length, glock->proc_id, glock->client_id);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "bqqds", &glock->type,
+ &glock->start, &glock->length, &glock->proc_id,
+ &glock->client_id);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
+ "proc_id %d client_id %s\n", glock->type, glock->start,
+ glock->length, glock->proc_id, glock->client_id);
+error:
+ p9_free_req(clnt, req);
+ return err;
+}
+EXPORT_SYMBOL(p9_client_getlock_dotl);
+
+int p9_client_readlink(struct p9_fid *fid, char **target)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ err = 0;
+ clnt = fid->clnt;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
+
+ req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
+error:
+ p9_free_req(clnt, req);
+ return err;
+}
+EXPORT_SYMBOL(p9_client_readlink);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 3acd3afb20c8..45c15f491401 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -122,9 +122,8 @@ static size_t
pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
{
size_t len = MIN(pdu->capacity - pdu->size, size);
- int err = copy_from_user(&pdu->sdata[pdu->size], udata, len);
- if (err)
- printk(KERN_WARNING "pdu_write_u returning: %d\n", err);
+ if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
+ len = 0;
pdu->size += len;
return size - len;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index b88515936e4b..c8f3f72ab20e 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -75,6 +75,8 @@ struct virtio_chan {
struct p9_client *client;
struct virtio_device *vdev;
struct virtqueue *vq;
+ int ring_bufs_avail;
+ wait_queue_head_t *vc_wq;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[VIRTQUEUE_NUM];
@@ -134,16 +136,30 @@ static void req_done(struct virtqueue *vq)
struct p9_fcall *rc;
unsigned int len;
struct p9_req_t *req;
+ unsigned long flags;
P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
- while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) {
- P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
- P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
- req = p9_tag_lookup(chan->client, rc->tag);
- req->status = REQ_STATUS_RCVD;
- p9_client_cb(chan->client, req);
- }
+ do {
+ spin_lock_irqsave(&chan->lock, flags);
+ rc = virtqueue_get_buf(chan->vq, &len);
+
+ if (rc != NULL) {
+ if (!chan->ring_bufs_avail) {
+ chan->ring_bufs_avail = 1;
+ wake_up(chan->vc_wq);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+ P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
+ P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n",
+ rc->tag);
+ req = p9_tag_lookup(chan->client, rc->tag);
+ req->status = REQ_STATUS_RCVD;
+ p9_client_cb(chan->client, req);
+ } else {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+ } while (rc != NULL);
}
/**
@@ -199,23 +215,43 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
int in, out;
struct virtio_chan *chan = client->trans;
char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
+ unsigned long flags;
+ int err;
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+req_retry:
+ req->status = REQ_STATUS_SENT;
+
+ spin_lock_irqsave(&chan->lock, flags);
out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
req->tc->size);
in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
client->msize);
- req->status = REQ_STATUS_SENT;
-
- if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
- P9_DPRINTK(P9_DEBUG_TRANS,
- "9p debug: virtio rpc add_buf returned failure");
- return -EIO;
+ err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ chan->ring_bufs_avail = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ err = wait_event_interruptible(*chan->vc_wq,
+ chan->ring_bufs_avail);
+ if (err == -ERESTARTSYS)
+ return err;
+
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+ goto req_retry;
+ } else {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ P9_DPRINTK(P9_DEBUG_TRANS,
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ return -EIO;
+ }
}
virtqueue_kick(chan->vq);
+ spin_unlock_irqrestore(&chan->lock, flags);
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
return 0;
@@ -290,14 +326,23 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->tag_len = tag_len;
err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
if (err) {
- kfree(tag);
- goto out_free_vq;
+ goto out_free_tag;
}
+ chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
+ if (!chan->vc_wq) {
+ err = -ENOMEM;
+ goto out_free_tag;
+ }
+ init_waitqueue_head(chan->vc_wq);
+ chan->ring_bufs_avail = 1;
+
mutex_lock(&virtio_9p_lock);
list_add_tail(&chan->chan_list, &virtio_chan_list);
mutex_unlock(&virtio_9p_lock);
return 0;
+out_free_tag:
+ kfree(tag);
out_free_vq:
vdev->config->del_vqs(vdev);
kfree(chan);
@@ -371,6 +416,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
mutex_unlock(&virtio_9p_lock);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
kfree(chan->tag);
+ kfree(chan->vc_wq);
kfree(chan);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 78b5a89b0f40..35dfb8318483 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1685,10 +1685,10 @@ EXPORT_SYMBOL(netif_device_attach);
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
- return ((features & NETIF_F_GEN_CSUM) ||
- ((features & NETIF_F_IP_CSUM) &&
+ return ((features & NETIF_F_NO_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_IPV6_CSUM) &&
+ ((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
@@ -1696,22 +1696,18 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
{
+ __be16 protocol = skb->protocol;
int features = dev->features;
- if (vlan_tx_tag_present(skb))
+ if (vlan_tx_tag_present(skb)) {
features &= dev->vlan_features;
-
- if (can_checksum_protocol(features, skb->protocol))
- return true;
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- if (can_checksum_protocol(dev->features & dev->vlan_features,
- veh->h_vlan_encapsulated_proto))
- return true;
+ protocol = veh->h_vlan_encapsulated_proto;
+ features &= dev->vlan_features;
}
- return false;
+ return can_checksum_protocol(features, protocol);
}
/**
@@ -2213,7 +2209,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
}
static DEFINE_PER_CPU(int, xmit_recursion);
-#define RECURSION_LIMIT 3
+#define RECURSION_LIMIT 10
/**
* dev_queue_xmit - transmit a buffer
@@ -2413,7 +2409,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
/*
@@ -2425,7 +2421,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
struct netdev_rx_queue *rxqueue;
- struct rps_map *map = NULL;
+ struct rps_map *map;
struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table;
int cpu = -1;
@@ -2444,15 +2440,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
} else
rxqueue = dev->_rx;
- if (rxqueue->rps_map) {
- map = rcu_dereference(rxqueue->rps_map);
- if (map && map->len == 1) {
+ map = rcu_dereference(rxqueue->rps_map);
+ if (map) {
+ if (map->len == 1) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
goto done;
}
- } else if (!rxqueue->rps_flow_table) {
+ } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
goto done;
}
@@ -5416,7 +5412,7 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(netdev_refcnt_read(dev));
WARN_ON(rcu_dereference_raw(dev->ip_ptr));
- WARN_ON(dev->ip6_ptr);
+ WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
WARN_ON(dev->dn_ptr);
if (dev->destructor)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1bc3f253ba6c..82a4369ae150 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -351,12 +351,12 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
list_for_each_entry(r, &ops->rules_list, list) {
if (r->pref == rule->target) {
- rule->ctarget = r;
+ RCU_INIT_POINTER(rule->ctarget, r);
break;
}
}
- if (rule->ctarget == NULL)
+ if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
unresolved = 1;
} else if (rule->action == FR_ACT_GOTO)
goto errout_free;
@@ -373,6 +373,11 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
fib_rule_get(rule);
+ if (last)
+ list_add_rcu(&rule->list, &last->list);
+ else
+ list_add_rcu(&rule->list, &ops->rules_list);
+
if (ops->unresolved_rules) {
/*
* There are unresolved goto rules in the list, check if
@@ -381,7 +386,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
list_for_each_entry(r, &ops->rules_list, list) {
if (r->action == FR_ACT_GOTO &&
r->target == rule->pref) {
- BUG_ON(r->ctarget != NULL);
+ BUG_ON(rtnl_dereference(r->ctarget) != NULL);
rcu_assign_pointer(r->ctarget, rule);
if (--ops->unresolved_rules == 0)
break;
@@ -395,11 +400,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
if (unresolved)
ops->unresolved_rules++;
- if (last)
- list_add_rcu(&rule->list, &last->list);
- else
- list_add_rcu(&rule->list, &ops->rules_list);
-
notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
flush_route_cache(ops);
rules_ops_put(ops);
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
*/
if (ops->nr_goto_rules > 0) {
list_for_each_entry(tmp, &ops->rules_list, list) {
- if (tmp->ctarget == rule) {
+ if (rtnl_dereference(tmp->ctarget) == rule) {
rcu_assign_pointer(tmp->ctarget, NULL);
ops->unresolved_rules++;
}
@@ -545,7 +545,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->action = rule->action;
frh->flags = rule->flags;
- if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
+ if (rule->action == FR_ACT_GOTO &&
+ rcu_dereference_raw(rule->ctarget) == NULL)
frh->flags |= FIB_RULE_UNRESOLVED;
if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 7adf50352918..7beaec36b541 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -89,8 +89,8 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
rcu_read_lock_bh();
filter = rcu_dereference_bh(sk->sk_filter);
if (filter) {
- unsigned int pkt_len = sk_run_filter(skb, filter->insns,
- filter->len);
+ unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
rcu_read_unlock_bh();
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b143173e3eb2..a5ff5a89f376 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
}
spin_lock(&rps_map_lock);
- old_map = queue->rps_map;
+ old_map = rcu_dereference_protected(queue->rps_map,
+ lockdep_is_held(&rps_map_lock));
rcu_assign_pointer(queue->rps_map, map);
spin_unlock(&rps_map_lock);
@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
table = NULL;
spin_lock(&rps_dev_flow_lock);
- old_table = queue->rps_flow_table;
+ old_table = rcu_dereference_protected(queue->rps_flow_table,
+ lockdep_is_held(&rps_dev_flow_lock));
rcu_assign_pointer(queue->rps_flow_table, table);
spin_unlock(&rps_dev_flow_lock);
@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
struct netdev_rx_queue *first = queue->first;
+ struct rps_map *map;
+ struct rps_dev_flow_table *flow_table;
- if (queue->rps_map)
- call_rcu(&queue->rps_map->rcu, rps_map_release);
- if (queue->rps_flow_table)
- call_rcu(&queue->rps_flow_table->rcu,
- rps_dev_flow_table_release);
+ map = rcu_dereference_raw(queue->rps_map);
+ if (map)
+ call_rcu(&map->rcu, rps_map_release);
+
+ flow_table = rcu_dereference_raw(queue->rps_flow_table);
+ if (flow_table)
+ call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
if (atomic_dec_and_test(&first->count))
kfree(first);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index c988e685433a..3f860261c5ee 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -42,7 +42,9 @@ static int net_assign_generic(struct net *net, int id, void *data)
BUG_ON(!mutex_is_locked(&net_mutex));
BUG_ON(id == 0);
- ng = old_ng = net->gen;
+ old_ng = rcu_dereference_protected(net->gen,
+ lockdep_is_held(&net_mutex));
+ ng = old_ng;
if (old_ng->len >= id)
goto assign;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2c0df0f95b3d..679b797d06b1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -771,10 +771,10 @@ done:
static unsigned long num_arg(const char __user * user_buffer,
unsigned long maxlen, unsigned long *num)
{
- int i = 0;
+ int i;
*num = 0;
- for (; i < maxlen; i++) {
+ for (i = 0; i < maxlen; i++) {
char c;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
@@ -789,9 +789,9 @@ static unsigned long num_arg(const char __user * user_buffer,
static int strn_len(const char __user * user_buffer, unsigned int maxlen)
{
- int i = 0;
+ int i;
- for (; i < maxlen; i++) {
+ for (i = 0; i < maxlen; i++) {
char c;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
@@ -846,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file,
{
struct seq_file *seq = file->private_data;
struct pktgen_dev *pkt_dev = seq->private;
- int i = 0, max, len;
+ int i, max, len;
char name[16], valstr[32];
unsigned long value = 0;
char *pg_result = NULL;
@@ -860,13 +860,13 @@ static ssize_t pktgen_if_write(struct file *file,
return -EINVAL;
}
- max = count - i;
- tmp = count_trail_chars(&user_buffer[i], max);
+ max = count;
+ tmp = count_trail_chars(user_buffer, max);
if (tmp < 0) {
pr_warning("illegal format\n");
return tmp;
}
- i += tmp;
+ i = tmp;
/* Read variable name */
@@ -1764,7 +1764,7 @@ static ssize_t pktgen_thread_write(struct file *file,
{
struct seq_file *seq = file->private_data;
struct pktgen_thread *t = seq->private;
- int i = 0, max, len, ret;
+ int i, max, len, ret;
char name[40];
char *pg_result;
@@ -1773,12 +1773,12 @@ static ssize_t pktgen_thread_write(struct file *file,
return -EINVAL;
}
- max = count - i;
- len = count_trail_chars(&user_buffer[i], max);
+ max = count;
+ len = count_trail_chars(user_buffer, max);
if (len < 0)
return len;
- i += len;
+ i = len;
/* Read variable name */
@@ -1975,7 +1975,7 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
const char *ifname)
{
char b[IFNAMSIZ+5];
- int i = 0;
+ int i;
for (i = 0; ifname[i] != '@'; i++) {
if (i == IFNAMSIZ)
@@ -2519,8 +2519,8 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
{
if (pkt_dev->cflows) {
/* let go of the SAs if we have them */
- int i = 0;
- for (; i < pkt_dev->cflows; i++) {
+ int i;
+ for (i = 0; i < pkt_dev->cflows; i++) {
struct xfrm_state *x = pkt_dev->flows[i].x;
if (x) {
xfrm_state_put(x);
diff --git a/net/core/sock.c b/net/core/sock.c
index 11db43632df8..3eed5424e659 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1225,7 +1225,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
sock_reset_flag(newsk, SOCK_DONE);
skb_queue_head_init(&newsk->sk_error_queue);
- filter = newsk->sk_filter;
+ filter = rcu_dereference_protected(newsk->sk_filter, 1);
if (filter != NULL)
sk_filter_charge(newsk, filter);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 01eee5d984be..385b6095fdc4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rps_sock_flow_table;
+ orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 43e1c594ce8f..b232375a0b75 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -120,11 +120,12 @@ static inline void fn_rebuild_zone(struct fn_zone *fz,
struct fib_node *f;
hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
- struct hlist_head __rcu *new_head;
+ struct hlist_head *new_head;
hlist_del_rcu(&f->fn_hash);
- new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+ new_head = rcu_dereference_protected(fz->fz_hash, 1) +
+ fn_hash(f->fn_key, fz);
hlist_add_head_rcu(&f->fn_hash, new_head);
}
}
@@ -179,8 +180,8 @@ static void fn_rehash_zone(struct fn_zone *fz)
memcpy(&nfz, fz, sizeof(nfz));
write_seqlock_bh(&fz->fz_lock);
- old_ht = fz->fz_hash;
- nfz.fz_hash = ht;
+ old_ht = rcu_dereference_protected(fz->fz_hash, 1);
+ RCU_INIT_POINTER(nfz.fz_hash, ht);
nfz.fz_hashmask = new_hashmask;
nfz.fz_divisor = new_divisor;
fn_rebuild_zone(&nfz, old_ht, old_divisor);
@@ -236,7 +237,7 @@ fn_new_zone(struct fn_hash *table, int z)
seqlock_init(&fz->fz_lock);
fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
fz->fz_hashmask = fz->fz_divisor - 1;
- fz->fz_hash = fz->fz_embedded_hash;
+ RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
fz->fz_order = z;
fz->fz_revorder = 32 - z;
fz->fz_mask = inet_make_mask(z);
@@ -272,7 +273,7 @@ int fib_table_lookup(struct fib_table *tb,
for (fz = rcu_dereference(t->fn_zone_list);
fz != NULL;
fz = rcu_dereference(fz->fz_next)) {
- struct hlist_head __rcu *head;
+ struct hlist_head *head;
struct hlist_node *node;
struct fib_node *f;
__be32 k;
@@ -282,7 +283,7 @@ int fib_table_lookup(struct fib_table *tb,
seq = read_seqbegin(&fz->fz_lock);
k = fz_key(flp->fl4_dst, fz);
- head = &fz->fz_hash[fn_hash(k, fz)];
+ head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
hlist_for_each_entry_rcu(f, node, head, fn_hash) {
if (f->fn_key != k)
continue;
@@ -311,6 +312,7 @@ void fib_table_select_default(struct fib_table *tb,
struct fib_info *last_resort;
struct fn_hash *t = (struct fn_hash *)tb->tb_data;
struct fn_zone *fz = t->fn_zones[0];
+ struct hlist_head *head;
if (fz == NULL)
return;
@@ -320,7 +322,8 @@ void fib_table_select_default(struct fib_table *tb,
order = -1;
rcu_read_lock();
- hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) {
+ head = rcu_dereference(fz->fz_hash);
+ hlist_for_each_entry_rcu(f, node, head, fn_hash) {
struct fib_alias *fa;
list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
@@ -374,7 +377,7 @@ out:
/* Insert node F to FZ. */
static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
{
- struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
hlist_add_head_rcu(&f->fn_hash, head);
}
@@ -382,7 +385,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
/* Return the node in FZ matching KEY. */
static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
{
- struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
struct hlist_node *node;
struct fib_node *f;
@@ -662,7 +665,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
static int fn_flush_list(struct fn_zone *fz, int idx)
{
- struct hlist_head *head = &fz->fz_hash[idx];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
struct hlist_node *node, *n;
struct fib_node *f;
int found = 0;
@@ -761,14 +764,15 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
struct fn_zone *fz)
{
int h, s_h;
+ struct hlist_head *head = rcu_dereference(fz->fz_hash);
- if (fz->fz_hash == NULL)
+ if (head == NULL)
return skb->len;
s_h = cb->args[3];
for (h = s_h; h < fz->fz_divisor; h++) {
- if (hlist_empty(&fz->fz_hash[h]))
+ if (hlist_empty(head + h))
continue;
- if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) {
+ if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
cb->args[3] = h;
return -1;
}
@@ -872,7 +876,7 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
if (!iter->zone->fz_nent)
continue;
- iter->hash_head = iter->zone->fz_hash;
+ iter->hash_head = rcu_dereference(iter->zone->fz_hash);
maxslot = iter->zone->fz_divisor;
for (iter->bucket = 0; iter->bucket < maxslot;
@@ -957,7 +961,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
goto out;
iter->bucket = 0;
- iter->hash_head = iter->zone->fz_hash;
+ iter->hash_head = rcu_dereference(iter->zone->fz_hash);
hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
list_for_each_entry(fa, &fn->fn_alias, fa_list) {
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index caea6885fdbd..c6933f2ea310 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -22,7 +22,7 @@
#include <net/gre.h>
-static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
+static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
static DEFINE_SPINLOCK(gre_proto_lock);
int gre_add_protocol(const struct gre_protocol *proto, u8 version)
@@ -51,7 +51,8 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
goto err_out;
spin_lock(&gre_proto_lock);
- if (gre_proto[version] != proto)
+ if (rcu_dereference_protected(gre_proto[version],
+ lockdep_is_held(&gre_proto_lock)) != proto)
goto err_out_unlock;
rcu_assign_pointer(gre_proto[version], NULL);
spin_unlock(&gre_proto_lock);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9ffa24b9a804..9e94d7cf4f8a 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -72,18 +72,19 @@ static struct kmem_cache *peer_cachep __read_mostly;
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
static const struct inet_peer peer_fake_node = {
- .avl_left = peer_avl_empty,
- .avl_right = peer_avl_empty,
+ .avl_left = peer_avl_empty_rcu,
+ .avl_right = peer_avl_empty_rcu,
.avl_height = 0
};
static struct {
- struct inet_peer *root;
+ struct inet_peer __rcu *root;
spinlock_t lock;
int total;
} peers = {
- .root = peer_avl_empty,
+ .root = peer_avl_empty_rcu,
.lock = __SPIN_LOCK_UNLOCKED(peers.lock),
.total = 0,
};
@@ -156,11 +157,14 @@ static void unlink_from_unused(struct inet_peer *p)
*/
#define lookup(_daddr, _stack) \
({ \
- struct inet_peer *u, **v; \
+ struct inet_peer *u; \
+ struct inet_peer __rcu **v; \
\
stackptr = _stack; \
*stackptr++ = &peers.root; \
- for (u = peers.root; u != peer_avl_empty; ) { \
+ for (u = rcu_dereference_protected(peers.root, \
+ lockdep_is_held(&peers.lock)); \
+ u != peer_avl_empty; ) { \
if (_daddr == u->v4daddr) \
break; \
if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
@@ -168,7 +172,8 @@ static void unlink_from_unused(struct inet_peer *p)
else \
v = &u->avl_right; \
*stackptr++ = v; \
- u = *v; \
+ u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
} \
u; \
})
@@ -209,13 +214,17 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
/* Called with local BH disabled and the pool lock held. */
#define lookup_rightempty(start) \
({ \
- struct inet_peer *u, **v; \
+ struct inet_peer *u; \
+ struct inet_peer __rcu **v; \
*stackptr++ = &start->avl_left; \
v = &start->avl_left; \
- for (u = *v; u->avl_right != peer_avl_empty; ) { \
+ for (u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
+ u->avl_right != peer_avl_empty_rcu; ) { \
v = &u->avl_right; \
*stackptr++ = v; \
- u = *v; \
+ u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
} \
u; \
})
@@ -224,74 +233,86 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
* Variable names are the proof of operation correctness.
* Look into mm/map_avl.c for more detail description of the ideas.
*/
-static void peer_avl_rebalance(struct inet_peer **stack[],
- struct inet_peer ***stackend)
+static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
+ struct inet_peer __rcu ***stackend)
{
- struct inet_peer **nodep, *node, *l, *r;
+ struct inet_peer __rcu **nodep;
+ struct inet_peer *node, *l, *r;
int lh, rh;
while (stackend > stack) {
nodep = *--stackend;
- node = *nodep;
- l = node->avl_left;
- r = node->avl_right;
+ node = rcu_dereference_protected(*nodep,
+ lockdep_is_held(&peers.lock));
+ l = rcu_dereference_protected(node->avl_left,
+ lockdep_is_held(&peers.lock));
+ r = rcu_dereference_protected(node->avl_right,
+ lockdep_is_held(&peers.lock));
lh = node_height(l);
rh = node_height(r);
if (lh > rh + 1) { /* l: RH+2 */
struct inet_peer *ll, *lr, *lrl, *lrr;
int lrh;
- ll = l->avl_left;
- lr = l->avl_right;
+ ll = rcu_dereference_protected(l->avl_left,
+ lockdep_is_held(&peers.lock));
+ lr = rcu_dereference_protected(l->avl_right,
+ lockdep_is_held(&peers.lock));
lrh = node_height(lr);
if (lrh <= node_height(ll)) { /* ll: RH+1 */
- node->avl_left = lr; /* lr: RH or RH+1 */
- node->avl_right = r; /* r: RH */
+ RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
+ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = lrh + 1; /* RH+1 or RH+2 */
- l->avl_left = ll; /* ll: RH+1 */
- l->avl_right = node; /* node: RH+1 or RH+2 */
+ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
+ RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
l->avl_height = node->avl_height + 1;
- *nodep = l;
+ RCU_INIT_POINTER(*nodep, l);
} else { /* ll: RH, lr: RH+1 */
- lrl = lr->avl_left; /* lrl: RH or RH-1 */
- lrr = lr->avl_right; /* lrr: RH or RH-1 */
- node->avl_left = lrr; /* lrr: RH or RH-1 */
- node->avl_right = r; /* r: RH */
+ lrl = rcu_dereference_protected(lr->avl_left,
+ lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */
+ lrr = rcu_dereference_protected(lr->avl_right,
+ lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */
+ RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
+ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = rh + 1; /* node: RH+1 */
- l->avl_left = ll; /* ll: RH */
- l->avl_right = lrl; /* lrl: RH or RH-1 */
+ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
+ RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
l->avl_height = rh + 1; /* l: RH+1 */
- lr->avl_left = l; /* l: RH+1 */
- lr->avl_right = node; /* node: RH+1 */
+ RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
+ RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
lr->avl_height = rh + 2;
- *nodep = lr;
+ RCU_INIT_POINTER(*nodep, lr);
}
} else if (rh > lh + 1) { /* r: LH+2 */
struct inet_peer *rr, *rl, *rlr, *rll;
int rlh;
- rr = r->avl_right;
- rl = r->avl_left;
+ rr = rcu_dereference_protected(r->avl_right,
+ lockdep_is_held(&peers.lock));
+ rl = rcu_dereference_protected(r->avl_left,
+ lockdep_is_held(&peers.lock));
rlh = node_height(rl);
if (rlh <= node_height(rr)) { /* rr: LH+1 */
- node->avl_right = rl; /* rl: LH or LH+1 */
- node->avl_left = l; /* l: LH */
+ RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
+ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = rlh + 1; /* LH+1 or LH+2 */
- r->avl_right = rr; /* rr: LH+1 */
- r->avl_left = node; /* node: LH+1 or LH+2 */
+ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
+ RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
r->avl_height = node->avl_height + 1;
- *nodep = r;
+ RCU_INIT_POINTER(*nodep, r);
} else { /* rr: RH, rl: RH+1 */
- rlr = rl->avl_right; /* rlr: LH or LH-1 */
- rll = rl->avl_left; /* rll: LH or LH-1 */
- node->avl_right = rll; /* rll: LH or LH-1 */
- node->avl_left = l; /* l: LH */
+ rlr = rcu_dereference_protected(rl->avl_right,
+ lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */
+ rll = rcu_dereference_protected(rl->avl_left,
+ lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */
+ RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
+ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = lh + 1; /* node: LH+1 */
- r->avl_right = rr; /* rr: LH */
- r->avl_left = rlr; /* rlr: LH or LH-1 */
+ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
+ RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
r->avl_height = lh + 1; /* r: LH+1 */
- rl->avl_right = r; /* r: LH+1 */
- rl->avl_left = node; /* node: LH+1 */
+ RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
+ RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
rl->avl_height = lh + 2;
- *nodep = rl;
+ RCU_INIT_POINTER(*nodep, rl);
}
} else {
node->avl_height = (lh > rh ? lh : rh) + 1;
@@ -303,10 +324,10 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
#define link_to_pool(n) \
do { \
n->avl_height = 1; \
- n->avl_left = peer_avl_empty; \
- n->avl_right = peer_avl_empty; \
- smp_wmb(); /* lockless readers can catch us now */ \
- **--stackptr = n; \
+ n->avl_left = peer_avl_empty_rcu; \
+ n->avl_right = peer_avl_empty_rcu; \
+ /* lockless readers can catch us now */ \
+ rcu_assign_pointer(**--stackptr, n); \
peer_avl_rebalance(stack, stackptr); \
} while (0)
@@ -330,24 +351,25 @@ static void unlink_from_pool(struct inet_peer *p)
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
- struct inet_peer **stack[PEER_MAXDEPTH];
- struct inet_peer ***stackptr, ***delp;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH];
+ struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(p->v4daddr, stack) != p)
BUG();
delp = stackptr - 1; /* *delp[0] == p */
- if (p->avl_left == peer_avl_empty) {
+ if (p->avl_left == peer_avl_empty_rcu) {
*delp[0] = p->avl_right;
--stackptr;
} else {
/* look for a node to insert instead of p */
struct inet_peer *t;
t = lookup_rightempty(p);
- BUG_ON(*stackptr[-1] != t);
+ BUG_ON(rcu_dereference_protected(*stackptr[-1],
+ lockdep_is_held(&peers.lock)) != t);
**--stackptr = t->avl_left;
/* t is removed, t->v4daddr > x->v4daddr for any
* x in p->avl_left subtree.
* Put t in the old place of p. */
- *delp[0] = t;
+ RCU_INIT_POINTER(*delp[0], t);
t->avl_left = p->avl_left;
t->avl_right = p->avl_right;
t->avl_height = p->avl_height;
@@ -414,7 +436,7 @@ static int cleanup_once(unsigned long ttl)
struct inet_peer *inet_getpeer(__be32 daddr, int create)
{
struct inet_peer *p;
- struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ffcbe369b7..01087e035b7d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1072,6 +1072,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
ipgre_tunnel_unlink(ign, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
t->parms.i_key = p.i_key;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64b70ad162e3..3948c86e59ca 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -238,7 +238,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
but receiver should be enough clever f.e. to forward mtrace requests,
sent to multicast group to reach destination designated router.
*/
-struct ip_ra_chain *ip_ra_chain;
+struct ip_ra_chain __rcu *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
@@ -253,7 +253,8 @@ static void ip_ra_destroy_rcu(struct rcu_head *head)
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
{
- struct ip_ra_chain *ra, *new_ra, **rap;
+ struct ip_ra_chain *ra, *new_ra;
+ struct ip_ra_chain __rcu **rap;
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
return -EINVAL;
@@ -261,7 +262,10 @@ int ip_ra_control(struct sock *sk, unsigned char on,
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock);
- for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
+ for (rap = &ip_ra_chain;
+ (ra = rcu_dereference_protected(*rap,
+ lockdep_is_held(&ip_ra_lock))) != NULL;
+ rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
spin_unlock_bh(&ip_ra_lock);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e9b816e6cd73..cd300aaee78f 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -676,6 +676,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
}
t = netdev_priv(dev);
ipip_tunnel_unlink(ipn, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 65699c24411c..9ae5c01cd0b2 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,7 +28,7 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
+const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
/*
* Add a protocol handler to the hash tables
@@ -38,7 +38,8 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
int hash = protocol & (MAX_INET_PROTOS - 1);
- return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
+ return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet_add_protocol);
@@ -50,7 +51,8 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
+ ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d6cb2bfcd8e1..987bf9adb318 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -198,7 +198,7 @@ const __u8 ip_tos2prio[16] = {
*/
struct rt_hash_bucket {
- struct rtable *chain;
+ struct rtable __rcu *chain;
};
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@@ -280,7 +280,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
- if (!rt_hash_table[st->bucket].chain)
+ if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
continue;
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -300,17 +300,17 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
{
struct rt_cache_iter_state *st = seq->private;
- r = r->dst.rt_next;
+ r = rcu_dereference_bh(r->dst.rt_next);
while (!r) {
rcu_read_unlock_bh();
do {
if (--st->bucket < 0)
return NULL;
- } while (!rt_hash_table[st->bucket].chain);
+ } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
rcu_read_lock_bh();
- r = rt_hash_table[st->bucket].chain;
+ r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
}
- return rcu_dereference_bh(r);
+ return r;
}
static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -721,19 +721,23 @@ static void rt_do_flush(int process_context)
for (i = 0; i <= rt_hash_mask; i++) {
if (process_context && need_resched())
cond_resched();
- rth = rt_hash_table[i].chain;
+ rth = rcu_dereference_raw(rt_hash_table[i].chain);
if (!rth)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
#ifdef CONFIG_NET_NS
{
- struct rtable ** prev, * p;
+ struct rtable __rcu **prev;
+ struct rtable *p;
- rth = rt_hash_table[i].chain;
+ rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ lockdep_is_held(rt_hash_lock_addr(i)));
/* defer releasing the head of the list after spin_unlock */
- for (tail = rth; tail; tail = tail->dst.rt_next)
+ for (tail = rth; tail;
+ tail = rcu_dereference_protected(tail->dst.rt_next,
+ lockdep_is_held(rt_hash_lock_addr(i))))
if (!rt_is_expired(tail))
break;
if (rth != tail)
@@ -741,8 +745,12 @@ static void rt_do_flush(int process_context)
/* call rt_free on entries after the tail requiring flush */
prev = &rt_hash_table[i].chain;
- for (p = *prev; p; p = next) {
- next = p->dst.rt_next;
+ for (p = rcu_dereference_protected(*prev,
+ lockdep_is_held(rt_hash_lock_addr(i)));
+ p != NULL;
+ p = next) {
+ next = rcu_dereference_protected(p->dst.rt_next,
+ lockdep_is_held(rt_hash_lock_addr(i)));
if (!rt_is_expired(p)) {
prev = &p->dst.rt_next;
} else {
@@ -752,14 +760,15 @@ static void rt_do_flush(int process_context)
}
}
#else
- rth = rt_hash_table[i].chain;
- rt_hash_table[i].chain = NULL;
+ rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ lockdep_is_held(rt_hash_lock_addr(i)));
+ rcu_assign_pointer(rt_hash_table[i].chain, NULL);
tail = NULL;
#endif
spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth != tail; rth = next) {
- next = rth->dst.rt_next;
+ next = rcu_dereference_protected(rth->dst.rt_next, 1);
rt_free(rth);
}
}
@@ -790,7 +799,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
while (aux != rth) {
if (compare_hash_inputs(&aux->fl, &rth->fl))
return 0;
- aux = aux->dst.rt_next;
+ aux = rcu_dereference_protected(aux->dst.rt_next, 1);
}
return ONE;
}
@@ -799,7 +808,8 @@ static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0;
unsigned long delta;
@@ -825,11 +835,12 @@ static void rt_check_expire(void)
samples++;
- if (*rthp == NULL)
+ if (rcu_dereference_raw(*rthp) == NULL)
continue;
length = 0;
spin_lock_bh(rt_hash_lock_addr(i));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
prefetch(rth->dst.rt_next);
if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next;
@@ -941,7 +952,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
static unsigned long last_gc;
static int rover;
static int equilibrium;
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
unsigned long now = jiffies;
int goal;
int entries = dst_entries_get_fast(&ipv4_dst_ops);
@@ -995,7 +1007,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
k = (k + 1) & rt_hash_mask;
rthp = &rt_hash_table[k].chain;
spin_lock_bh(rt_hash_lock_addr(k));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
if (!rt_is_expired(rth) &&
!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1;
@@ -1071,7 +1084,7 @@ static int slow_chain_length(const struct rtable *head)
while (rth) {
length += has_noalias(head, rth);
- rth = rth->dst.rt_next;
+ rth = rcu_dereference_protected(rth->dst.rt_next, 1);
}
return length >> FRACT_BITS;
}
@@ -1079,9 +1092,9 @@ static int slow_chain_length(const struct rtable *head)
static int rt_intern_hash(unsigned hash, struct rtable *rt,
struct rtable **rp, struct sk_buff *skb, int ifindex)
{
- struct rtable *rth, **rthp;
+ struct rtable *rth, *cand;
+ struct rtable __rcu **rthp, **candp;
unsigned long now;
- struct rtable *cand, **candp;
u32 min_score;
int chain_length;
int attempts = !in_softirq();
@@ -1128,7 +1141,8 @@ restart:
rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next;
rt_free(rth);
@@ -1324,12 +1338,14 @@ EXPORT_SYMBOL(__ip_select_ident);
static void rt_del(unsigned hash, struct rtable *rt)
{
- struct rtable **rthp, *aux;
+ struct rtable __rcu **rthp;
+ struct rtable *aux;
rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt);
- while ((aux = *rthp) != NULL) {
+ while ((aux = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (aux == rt || rt_is_expired(aux)) {
*rthp = aux->dst.rt_next;
rt_free(aux);
@@ -1346,7 +1362,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
{
int i, k;
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
__be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
struct netevent_redirect netevent;
@@ -1379,7 +1396,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
rt_genid(net));
- rthp=&rt_hash_table[hash].chain;
+ rthp = &rt_hash_table[hash].chain;
while ((rth = rcu_dereference(*rthp)) != NULL) {
struct rtable *rt;
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 9a17bd2a0a37..ac3b3ee4b07c 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,27 +14,32 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
-static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
static DEFINE_MUTEX(tunnel4_mutex);
-static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
+static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
{
return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
}
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
{
- struct xfrm_tunnel **pprev;
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
+
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel4_mutex);
- for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
- if ((*pprev)->priority > priority)
+ for (pprev = fam_handlers(family);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel4_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
break;
- if ((*pprev)->priority == priority)
+ if (t->priority == priority)
goto err;
}
@@ -52,13 +57,17 @@ EXPORT_SYMBOL(xfrm4_tunnel_register);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
{
- struct xfrm_tunnel **pprev;
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel4_mutex);
- for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
- if (*pprev == handler) {
+ for (pprev = fam_handlers(family);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel4_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b3f7e8cf18ac..28cb2d733a3c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1413,7 +1413,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
}
- if (sk->sk_filter) {
+ if (rcu_dereference_raw(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ec7a91d9e865..e048ec62d109 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -836,7 +836,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
{
struct inet6_dev *idev = ifp->idev;
struct in6_addr addr, *tmpaddr;
- unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp;
+ unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
@@ -886,12 +886,13 @@ retry:
goto out;
}
memcpy(&addr.s6_addr[8], idev->rndid, 8);
+ age = (jiffies - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
- idev->cnf.temp_valid_lft);
+ idev->cnf.temp_valid_lft + age);
tmp_prefered_lft = min_t(__u32,
ifp->prefered_lft,
- idev->cnf.temp_prefered_lft -
+ idev->cnf.temp_prefered_lft + age -
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
@@ -1426,8 +1427,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
- if (addrconf_dad_end(ifp))
+ if (addrconf_dad_end(ifp)) {
+ in6_ifa_put(ifp);
return;
+ }
if (net_ratelimit())
printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
@@ -2021,10 +2024,11 @@ ok:
ipv6_ifa_notify(0, ift);
}
- if (create && in6_dev->cnf.use_tempaddr > 0) {
+ if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
/*
* When a new public address is created as described in [ADDRCONF],
- * also create a new temporary address.
+ * also create a new temporary address. Also create a temporary
+ * address if it's enabled but no temporary address currently exists.
*/
read_unlock_bh(&in6_dev->lock);
ipv6_create_tempaddr(ifp, NULL);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c2c0f89397b1..2a59610c2a58 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1284,6 +1284,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
t = netdev_priv(dev);
ip6_tnl_unlink(ip6n, t);
+ synchronize_net();
err = ip6_tnl_change(t, &p);
ip6_tnl_link(ip6n, t);
netdev_state_change(dev);
@@ -1371,6 +1372,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0553867a317f..d1770e061c08 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,6 +343,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_TRANSPARENT:
+ if (!capable(CAP_NET_ADMIN)) {
+ retv = -EPERM;
+ break;
+ }
if (optlen < sizeof(int))
goto e_inval;
/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 44d2eeac089b..448464844a25 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -5,10 +5,15 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
+config NF_DEFRAG_IPV6
+ tristate
+ default n
+
config NF_CONNTRACK_IPV6
tristate "IPv6 connection tracking support"
depends on INET && IPV6 && NF_CONNTRACK
default m if NETFILTER_ADVANCED=n
+ select NF_DEFRAG_IPV6
---help---
Connection tracking keeps a record of what packets have passed
through your machine, in order to figure out how they are related
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 3f8e4a3d83ce..0a432c9b0795 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -12,11 +12,14 @@ obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
# objects for l3 independent conntrack
nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
-nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
# l3 independent conntrack
obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
+# defrag
+nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
+obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
+
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 489d71b844ac..3a3f129a44cb 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -625,21 +625,24 @@ int nf_ct_frag6_init(void)
inet_frags_init_net(&nf_init_frags);
inet_frags_init(&nf_frags);
+#ifdef CONFIG_SYSCTL
nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
nf_ct_frag6_sysctl_table);
if (!nf_ct_frag6_sysctl_header) {
inet_frags_fini(&nf_frags);
return -ENOMEM;
}
+#endif
return 0;
}
void nf_ct_frag6_cleanup(void)
{
+#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nf_ct_frag6_sysctl_header);
nf_ct_frag6_sysctl_header = NULL;
-
+#endif
inet_frags_fini(&nf_frags);
nf_init_frags.low_thresh = 0;
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9bb936ae2452..9a7978fdc02a 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,13 +25,14 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
+const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int hash = protocol & (MAX_INET_PROTOS - 1);
- return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
+ return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_protocol);
@@ -43,7 +44,8 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
+ ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 45e6efb7f171..86c39526ba5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -373,7 +373,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
- if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
+ if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 367a6cc584cc..d6bfaec3bbbf 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -963,6 +963,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
}
t = netdev_priv(dev);
ipip6_tunnel_unlink(sitn, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index d9864725d0c6..4f3cec12aa85 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,23 +30,26 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
-static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
static DEFINE_MUTEX(tunnel6_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
{
- struct xfrm6_tunnel **pprev;
+ struct xfrm6_tunnel __rcu **pprev;
+ struct xfrm6_tunnel *t;
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
- *pprev; pprev = &(*pprev)->next) {
- if ((*pprev)->priority > priority)
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel6_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
break;
- if ((*pprev)->priority == priority)
+ if (t->priority == priority)
goto err;
}
@@ -65,14 +68,17 @@ EXPORT_SYMBOL(xfrm6_tunnel_register);
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
{
- struct xfrm6_tunnel **pprev;
+ struct xfrm6_tunnel __rcu **pprev;
+ struct xfrm6_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
- *pprev; pprev = &(*pprev)->next) {
- if (*pprev == handler) {
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel6_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c84dad432114..91def93bec85 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -527,7 +527,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
}
}
- if (sk->sk_filter) {
+ if (rcu_dereference_raw(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1712af1c7b3f..c64ce0a0bb03 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,10 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+
static inline struct l2tp_net *l2tp_pernet(struct net *net)
{
BUG_ON(!net);
@@ -118,6 +122,34 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
return net_generic(net, l2tp_net_id);
}
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ l2tp_tunnel_free(tunnel);
+}
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_tunnel_inc_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_inc_refcount_1(_t); \
+ } while (0)
+#define l2tp_tunnel_dec_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_dec_refcount_1(_t); \
+ } while (0)
+#else
+#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
+#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
+#endif
+
/* Session hash global list for L2TPv3.
* The session_id SHOULD be random according to RFC3931, but several
* L2TP implementations use incrementing session_ids. So we do a real
@@ -699,8 +731,8 @@ EXPORT_SYMBOL(l2tp_recv_common);
* Returns 1 if the packet was not a good data packet and could not be
* forwarded. All such packets are passed up to userspace to deal with.
*/
-int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
- int (*payload_hook)(struct sk_buff *skb))
+static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
+ int (*payload_hook)(struct sk_buff *skb))
{
struct l2tp_session *session = NULL;
unsigned char *ptr, *optr;
@@ -812,7 +844,6 @@ error:
return 1;
}
-EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes:
@@ -922,7 +953,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
return bufp - optr;
}
-int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
+ size_t data_len)
{
struct l2tp_tunnel *tunnel = session->tunnel;
unsigned int len = skb->len;
@@ -970,7 +1002,6 @@ int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t dat
return 0;
}
-EXPORT_SYMBOL_GPL(l2tp_xmit_core);
/* Automatically called when the skb is freed.
*/
@@ -1089,7 +1120,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
* The tunnel context is deleted only when all session sockets have been
* closed.
*/
-void l2tp_tunnel_destruct(struct sock *sk)
+static void l2tp_tunnel_destruct(struct sock *sk)
{
struct l2tp_tunnel *tunnel;
@@ -1128,11 +1159,10 @@ void l2tp_tunnel_destruct(struct sock *sk)
end:
return;
}
-EXPORT_SYMBOL(l2tp_tunnel_destruct);
/* When the tunnel is closed, all the attached sessions need to go too.
*/
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
int hash;
struct hlist_node *walk;
@@ -1193,12 +1223,11 @@ again:
}
write_unlock_bh(&tunnel->hlist_lock);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
/* Really kill the tunnel.
* Come here only when all sessions have been cleared from the tunnel.
*/
-void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
@@ -1217,7 +1246,6 @@ void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
atomic_dec(&l2tp_tunnel_count);
kfree(tunnel);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
/* Create a socket for the tunnel, if one isn't set up by
* userspace. This is used for static tunnels where there is no
@@ -1512,7 +1540,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_len parameters are set.
*/
-void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
if (version == L2TP_HDR_VER_2) {
session->hdr_len = 6;
@@ -1525,7 +1553,6 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
}
}
-EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index f0f318edd3f1..a16a48e79fab 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -231,48 +231,15 @@ extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_i
extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
extern void l2tp_session_free(struct l2tp_session *session);
extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
-extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-extern void l2tp_tunnel_destruct(struct sock *sk);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_inc_refcount_1(_t); \
- } while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_dec_refcount_1(_t); \
- } while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
/* Session reference counts. Incremented when code obtains a reference
* to a session.
*/
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 1c770c0644d1..0bf6a59545ab 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -576,7 +576,7 @@ out:
return copied;
}
-struct proto l2tp_ip_prot = {
+static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
.init = l2tp_ip_open,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ff60c022f51d..239c4836a946 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -456,6 +456,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
+ sta->last_rx = jiffies;
set_sta_flags(sta, WLAN_STA_AUTHORIZED);
/* make sure mandatory rates are always added */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 22bc42b18991..6b322fa681f5 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -748,7 +748,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
hw->queues = IEEE80211_MAX_QUEUES;
local->workqueue =
- create_singlethread_workqueue(wiphy_name(local->hw.wiphy));
+ alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
if (!local->workqueue) {
result = -ENOMEM;
goto fail_workqueue;
@@ -962,12 +962,6 @@ static void __exit ieee80211_exit(void)
rc80211_minstrel_ht_exit();
rc80211_minstrel_exit();
- /*
- * For key todo, it'll be empty by now but the work
- * might still be scheduled.
- */
- flush_scheduled_work();
-
if (mesh_allocated)
ieee80211s_stop();
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 809cf230d251..33f76993da08 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -329,6 +329,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
* if needed.
*/
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ /* Skip invalid rates */
+ if (info->control.rates[i].idx < 0)
+ break;
/* Rate masking supports only legacy rates for now */
if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
continue;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 43288259f4a1..1534f2b44caf 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -525,6 +525,7 @@ config NETFILTER_XT_TARGET_TPROXY
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
This option adds a `TPROXY' target, which is somewhat similar to
REDIRECT. It can only be used in the mangle table and is useful
@@ -927,6 +928,7 @@ config NETFILTER_XT_MATCH_SOCKET
depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
This option adds a `socket' match, which can be used to match
packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 19c482caf30b..640678f47a2a 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -21,7 +21,9 @@
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
@@ -172,7 +174,7 @@ tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
static inline const struct in6_addr *
tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
@@ -372,7 +374,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
.hooks = 1 << NF_INET_PRE_ROUTING,
.me = THIS_MODULE,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
{
.name = "TPROXY",
.family = NFPROTO_IPV6,
@@ -391,7 +393,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
static int __init tproxy_tg_init(void)
{
nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
nf_defrag_ipv6_enable();
#endif
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 2dbd4c857735..d94a858dc52a 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -14,7 +14,6 @@
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h>
@@ -22,7 +21,12 @@
#include <net/inet_sock.h>
#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_SOCKET_HAVE_IPV6 1
+#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#endif
#include <linux/netfilter/xt_socket.h>
@@ -186,7 +190,7 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par)
return socket_match(skb, par, par->matchinfo);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
static int
extract_icmp6_fields(const struct sk_buff *skb,
@@ -331,7 +335,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
{
.name = "socket",
.revision = 1,
@@ -348,7 +352,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
static int __init socket_mt_init(void)
{
nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
nf_defrag_ipv6_enable();
#endif
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index cd96ed3ccee4..478181d53c55 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -83,9 +83,9 @@ struct netlink_sock {
struct module *module;
};
-struct listeners_rcu_head {
- struct rcu_head rcu_head;
- void *ptr;
+struct listeners {
+ struct rcu_head rcu;
+ unsigned long masks[0];
};
#define NETLINK_KERNEL_SOCKET 0x1
@@ -119,7 +119,7 @@ struct nl_pid_hash {
struct netlink_table {
struct nl_pid_hash hash;
struct hlist_head mc_list;
- unsigned long *listeners;
+ struct listeners __rcu *listeners;
unsigned int nl_nonroot;
unsigned int groups;
struct mutex *cb_mutex;
@@ -338,7 +338,7 @@ netlink_update_listeners(struct sock *sk)
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
- tbl->listeners[i] = mask;
+ tbl->listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
@@ -936,7 +936,7 @@ EXPORT_SYMBOL(netlink_unicast);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
int res = 0;
- unsigned long *listeners;
+ struct listeners *listeners;
BUG_ON(!netlink_is_kernel(sk));
@@ -944,7 +944,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
if (group - 1 < nl_table[sk->sk_protocol].groups)
- res = test_bit(group - 1, listeners);
+ res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
@@ -1498,7 +1498,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
struct socket *sock;
struct sock *sk;
struct netlink_sock *nlk;
- unsigned long *listeners = NULL;
+ struct listeners *listeners = NULL;
BUG_ON(!nl_table);
@@ -1523,8 +1523,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
if (groups < 32)
groups = 32;
- listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
- GFP_KERNEL);
+ listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
goto out_sock_release;
@@ -1541,7 +1540,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
netlink_table_grab();
if (!nl_table[unit].registered) {
nl_table[unit].groups = groups;
- nl_table[unit].listeners = listeners;
+ rcu_assign_pointer(nl_table[unit].listeners, listeners);
nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
nl_table[unit].registered = 1;
@@ -1572,43 +1571,28 @@ netlink_kernel_release(struct sock *sk)
EXPORT_SYMBOL(netlink_kernel_release);
-static void netlink_free_old_listeners(struct rcu_head *rcu_head)
+static void listeners_free_rcu(struct rcu_head *head)
{
- struct listeners_rcu_head *lrh;
-
- lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head);
- kfree(lrh->ptr);
+ kfree(container_of(head, struct listeners, rcu));
}
int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
- unsigned long *listeners, *old = NULL;
- struct listeners_rcu_head *old_rcu_head;
+ struct listeners *new, *old;
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
if (groups < 32)
groups = 32;
if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
- listeners = kzalloc(NLGRPSZ(groups) +
- sizeof(struct listeners_rcu_head),
- GFP_ATOMIC);
- if (!listeners)
+ new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
+ if (!new)
return -ENOMEM;
- old = tbl->listeners;
- memcpy(listeners, old, NLGRPSZ(tbl->groups));
- rcu_assign_pointer(tbl->listeners, listeners);
- /*
- * Free the old memory after an RCU grace period so we
- * don't leak it. We use call_rcu() here in order to be
- * able to call this function from atomic contexts. The
- * allocation of this memory will have reserved enough
- * space for struct listeners_rcu_head at the end.
- */
- old_rcu_head = (void *)(tbl->listeners +
- NLGRPLONGS(tbl->groups));
- old_rcu_head->ptr = old;
- call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners);
+ old = rcu_dereference_raw(tbl->listeners);
+ memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
+ rcu_assign_pointer(tbl->listeners, new);
+
+ call_rcu(&old->rcu, listeners_free_rcu);
}
tbl->groups = groups;
@@ -2104,18 +2088,17 @@ static void __net_exit netlink_net_exit(struct net *net)
static void __init netlink_add_usersock_entry(void)
{
- unsigned long *listeners;
+ struct listeners *listeners;
int groups = 32;
- listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
- GFP_KERNEL);
+ listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
- panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+ panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
netlink_table_grab();
nl_table[NETLINK_USERSOCK].groups = groups;
- nl_table[NETLINK_USERSOCK].listeners = listeners;
+ rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
diff --git a/net/socket.c b/net/socket.c
index abf3e2561521..ee3cd280c76e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -377,7 +377,7 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
&socket_file_ops);
if (unlikely(!file)) {
/* drop dentry, keep inode */
- atomic_inc(&path.dentry->d_inode->i_count);
+ ihold(path.dentry->d_inode);
path_put(&path);
put_unused_fd(fd);
return -ENFILE;
@@ -480,6 +480,7 @@ static struct socket *sock_alloc(void)
sock = SOCKET_I(inode);
kmemcheck_annotate_bitfield(sock, type);
+ inode->i_ino = get_next_ino();
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
@@ -1145,7 +1146,7 @@ call_kill:
}
EXPORT_SYMBOL(sock_wake_async);
-static int __sock_create(struct net *net, int family, int type, int protocol,
+int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
int err;
@@ -1257,6 +1258,7 @@ out_release:
rcu_read_unlock();
goto out_sock_release;
}
+EXPORT_SYMBOL(__sock_create);
int sock_create(int family, int type, int protocol, struct socket **res)
{
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 3376d7657185..8873fd8ddacd 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -36,22 +36,3 @@ config RPCSEC_GSS_KRB5
Kerberos support should be installed.
If unsure, say Y.
-
-config RPCSEC_GSS_SPKM3
- tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
- depends on SUNRPC && EXPERIMENTAL
- select SUNRPC_GSS
- select CRYPTO
- select CRYPTO_MD5
- select CRYPTO_DES
- select CRYPTO_CAST5
- select CRYPTO_CBC
- help
- Choose Y here to enable Secure RPC using the SPKM3 public key
- GSS-API mechanism (RFC 2025).
-
- Secure RPC calls with SPKM3 require an auxiliary userspace
- daemon which may be found in the Linux nfs-utils package
- available from http://linux-nfs.org/.
-
- If unsure, say N.
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 68192e562749..afe67849269f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -658,7 +658,7 @@ out1:
return err;
}
-void __exit rpcauth_remove_module(void)
+void rpcauth_remove_module(void)
{
rpc_destroy_authunix();
rpc_destroy_generic_auth();
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 43162bb3b78f..e010a015d996 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -158,7 +158,7 @@ int __init rpc_init_generic_auth(void)
return rpcauth_init_credcache(&generic_auth);
}
-void __exit rpc_destroy_generic_auth(void)
+void rpc_destroy_generic_auth(void)
{
rpcauth_destroy_credcache(&generic_auth);
}
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 74a231735f67..7350d86a32ee 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -11,8 +11,3 @@ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
-
-obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
-
-rpcsec_gss_spkm3-objs := gss_spkm3_mech.o gss_spkm3_seal.o gss_spkm3_unseal.o \
- gss_spkm3_token.o
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 778e5dfc5144..f375decc024b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -427,7 +427,7 @@ static int
context_derive_keys_rc4(struct krb5_ctx *ctx)
{
struct crypto_hash *hmac;
- char sigkeyconstant[] = "signaturekey";
+ static const char sigkeyconstant[] = "signaturekey";
int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
struct hash_desc desc;
struct scatterlist sg[1];
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
deleted file mode 100644
index adade3d313f2..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_mech.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- * J. Bruce Fields <bfields@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/sunrpc/auth.h>
-#include <linux/in.h>
-#include <linux/sunrpc/svcauth_gss.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/sunrpc/xdr.h>
-#include <linux/crypto.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, int len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
-{
- const void *q;
- unsigned int len;
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- res->len = len;
- if (len == 0) {
- res->data = NULL;
- return p;
- }
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- res->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(res->data == NULL))
- return ERR_PTR(-ENOMEM);
- return q;
-}
-
-static int
-gss_import_sec_context_spkm3(const void *p, size_t len,
- struct gss_ctx *ctx_id,
- gfp_t gfp_mask)
-{
- const void *end = (const void *)((const char *)p + len);
- struct spkm3_ctx *ctx;
- int version;
-
- if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask)))
- goto out_err;
-
- p = simple_get_bytes(p, end, &version, sizeof(version));
- if (IS_ERR(p))
- goto out_err_free_ctx;
- if (version != 1) {
- dprintk("RPC: unknown spkm3 token format: "
- "obsolete nfs-utils?\n");
- p = ERR_PTR(-EINVAL);
- goto out_err_free_ctx;
- }
-
- p = simple_get_netobj(p, end, &ctx->ctx_id);
- if (IS_ERR(p))
- goto out_err_free_ctx;
-
- p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
- if (IS_ERR(p))
- goto out_err_free_ctx_id;
-
- p = simple_get_netobj(p, end, &ctx->mech_used);
- if (IS_ERR(p))
- goto out_err_free_ctx_id;
-
- p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags));
- if (IS_ERR(p))
- goto out_err_free_mech;
-
- p = simple_get_netobj(p, end, &ctx->conf_alg);
- if (IS_ERR(p))
- goto out_err_free_mech;
-
- p = simple_get_netobj(p, end, &ctx->derived_conf_key);
- if (IS_ERR(p))
- goto out_err_free_conf_alg;
-
- p = simple_get_netobj(p, end, &ctx->intg_alg);
- if (IS_ERR(p))
- goto out_err_free_conf_key;
-
- p = simple_get_netobj(p, end, &ctx->derived_integ_key);
- if (IS_ERR(p))
- goto out_err_free_intg_alg;
-
- if (p != end) {
- p = ERR_PTR(-EFAULT);
- goto out_err_free_intg_key;
- }
-
- ctx_id->internal_ctx_id = ctx;
-
- dprintk("RPC: Successfully imported new spkm context.\n");
- return 0;
-
-out_err_free_intg_key:
- kfree(ctx->derived_integ_key.data);
-out_err_free_intg_alg:
- kfree(ctx->intg_alg.data);
-out_err_free_conf_key:
- kfree(ctx->derived_conf_key.data);
-out_err_free_conf_alg:
- kfree(ctx->conf_alg.data);
-out_err_free_mech:
- kfree(ctx->mech_used.data);
-out_err_free_ctx_id:
- kfree(ctx->ctx_id.data);
-out_err_free_ctx:
- kfree(ctx);
-out_err:
- return PTR_ERR(p);
-}
-
-static void
-gss_delete_sec_context_spkm3(void *internal_ctx)
-{
- struct spkm3_ctx *sctx = internal_ctx;
-
- kfree(sctx->derived_integ_key.data);
- kfree(sctx->intg_alg.data);
- kfree(sctx->derived_conf_key.data);
- kfree(sctx->conf_alg.data);
- kfree(sctx->mech_used.data);
- kfree(sctx->ctx_id.data);
- kfree(sctx);
-}
-
-static u32
-gss_verify_mic_spkm3(struct gss_ctx *ctx,
- struct xdr_buf *signbuf,
- struct xdr_netobj *checksum)
-{
- u32 maj_stat = 0;
- struct spkm3_ctx *sctx = ctx->internal_ctx_id;
-
- maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
-
- dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
- return maj_stat;
-}
-
-static u32
-gss_get_mic_spkm3(struct gss_ctx *ctx,
- struct xdr_buf *message_buffer,
- struct xdr_netobj *message_token)
-{
- u32 err = 0;
- struct spkm3_ctx *sctx = ctx->internal_ctx_id;
-
- err = spkm3_make_token(sctx, message_buffer,
- message_token, SPKM_MIC_TOK);
- dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err);
- return err;
-}
-
-static const struct gss_api_ops gss_spkm3_ops = {
- .gss_import_sec_context = gss_import_sec_context_spkm3,
- .gss_get_mic = gss_get_mic_spkm3,
- .gss_verify_mic = gss_verify_mic_spkm3,
- .gss_delete_sec_context = gss_delete_sec_context_spkm3,
-};
-
-static struct pf_desc gss_spkm3_pfs[] = {
- {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"},
- {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
-};
-
-static struct gss_api_mech gss_spkm3_mech = {
- .gm_name = "spkm3",
- .gm_owner = THIS_MODULE,
- .gm_oid = {7, "\053\006\001\005\005\001\003"},
- .gm_ops = &gss_spkm3_ops,
- .gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs),
- .gm_pfs = gss_spkm3_pfs,
-};
-
-static int __init init_spkm3_module(void)
-{
- int status;
-
- status = gss_mech_register(&gss_spkm3_mech);
- if (status)
- printk("Failed to register spkm3 gss mechanism!\n");
- return status;
-}
-
-static void __exit cleanup_spkm3_module(void)
-{
- gss_mech_unregister(&gss_spkm3_mech);
-}
-
-MODULE_LICENSE("GPL");
-module_init(init_spkm3_module);
-module_exit(cleanup_spkm3_module);
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
deleted file mode 100644
index 5a3a65a0e2b4..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_seal.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/jiffies.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/random.h>
-#include <linux/crypto.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-#include <linux/sunrpc/xdr.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-const struct xdr_netobj hmac_md5_oid = { 8, "\x2B\x06\x01\x05\x05\x08\x01\x01"};
-const struct xdr_netobj cast5_cbc_oid = {9, "\x2A\x86\x48\x86\xF6\x7D\x07\x42\x0A"};
-
-/*
- * spkm3_make_token()
- *
- * Only SPKM_MIC_TOK with md5 intg-alg is supported
- */
-
-u32
-spkm3_make_token(struct spkm3_ctx *ctx,
- struct xdr_buf * text, struct xdr_netobj * token,
- int toktype)
-{
- s32 checksum_type;
- char tokhdrbuf[25];
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
- struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf};
- int tokenlen = 0;
- unsigned char *ptr;
- s32 now;
- int ctxelen = 0, ctxzbit = 0;
- int md5elen = 0, md5zbit = 0;
-
- now = jiffies;
-
- if (ctx->ctx_id.len != 16) {
- dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
- ctx->ctx_id.len);
- goto out_err;
- }
-
- if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
- dprintk("RPC: gss_spkm3_seal: unsupported I-ALG "
- "algorithm. only support hmac-md5 I-ALG.\n");
- goto out_err;
- } else
- checksum_type = CKSUMTYPE_HMAC_MD5;
-
- if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) {
- dprintk("RPC: gss_spkm3_seal: unsupported C-ALG "
- "algorithm\n");
- goto out_err;
- }
-
- if (toktype == SPKM_MIC_TOK) {
- /* Calculate checksum over the mic-header */
- asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
- spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
- ctxelen, ctxzbit);
- if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key,
- (char *)mic_hdr.data, mic_hdr.len,
- text, 0, &md5cksum))
- goto out_err;
-
- asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
- tokenlen = 10 + ctxelen + 1 + md5elen + 1;
-
- /* Create token header using generic routines */
- token->len = g_token_size(&ctx->mech_used, tokenlen + 2);
-
- ptr = token->data;
- g_make_token_header(&ctx->mech_used, tokenlen + 2, &ptr);
-
- spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
- } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
- dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK "
- "not supported\n");
- goto out_err;
- }
-
- /* XXX need to implement sequence numbers, and ctx->expired */
-
- return GSS_S_COMPLETE;
-out_err:
- token->data = NULL;
- token->len = 0;
- return GSS_S_FAILURE;
-}
-
-static int
-spkm3_checksummer(struct scatterlist *sg, void *data)
-{
- struct hash_desc *desc = data;
-
- return crypto_hash_update(desc, sg, sg->length);
-}
-
-/* checksum the plaintext data and hdrlen bytes of the token header */
-s32
-make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
- unsigned int hdrlen, struct xdr_buf *body,
- unsigned int body_offset, struct xdr_netobj *cksum)
-{
- char *cksumname;
- struct hash_desc desc; /* XXX add to ctx? */
- struct scatterlist sg[1];
- int err;
-
- switch (cksumtype) {
- case CKSUMTYPE_HMAC_MD5:
- cksumname = "hmac(md5)";
- break;
- default:
- dprintk("RPC: spkm3_make_checksum:"
- " unsupported checksum %d", cksumtype);
- return GSS_S_FAILURE;
- }
-
- if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE;
-
- desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(desc.tfm))
- return GSS_S_FAILURE;
- cksum->len = crypto_hash_digestsize(desc.tfm);
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_setkey(desc.tfm, key->data, key->len);
- if (err)
- goto out;
-
- err = crypto_hash_init(&desc);
- if (err)
- goto out;
-
- sg_init_one(sg, header, hdrlen);
- crypto_hash_update(&desc, sg, sg->length);
-
- xdr_process_buf(body, body_offset, body->len - body_offset,
- spkm3_checksummer, &desc);
- crypto_hash_final(&desc, cksum->data);
-
-out:
- crypto_free_hash(desc.tfm);
-
- return err ? GSS_S_FAILURE : 0;
-}
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
deleted file mode 100644
index a99825d7caa0..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_token.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/random.h>
-#include <linux/crypto.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-/*
- * asn1_bitstring_len()
- *
- * calculate the asn1 bitstring length of the xdr_netobject
- */
-void
-asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
-{
- int i, zbit = 0,elen = in->len;
- char *ptr;
-
- ptr = &in->data[in->len -1];
-
- /* count trailing 0's */
- for(i = in->len; i > 0; i--) {
- if (*ptr == 0) {
- ptr--;
- elen--;
- } else
- break;
- }
-
- /* count number of 0 bits in final octet */
- ptr = &in->data[elen - 1];
- for(i = 0; i < 8; i++) {
- short mask = 0x01;
-
- if (!((mask << i) & *ptr))
- zbit++;
- else
- break;
- }
- *enclen = elen;
- *zerobits = zbit;
-}
-
-/*
- * decode_asn1_bitstring()
- *
- * decode a bitstring into a buffer of the expected length.
- * enclen = bit string length
- * explen = expected length (define in rfc)
- */
-int
-decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
-{
- if (!(out->data = kzalloc(explen,GFP_NOFS)))
- return 0;
- out->len = explen;
- memcpy(out->data, in, enclen);
- return 1;
-}
-
-/*
- * SPKMInnerContextToken choice SPKM_MIC asn1 token layout
- *
- * contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
- *
- * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
- *
- * pos value
- * ----------
- * [0] a4 SPKM-MIC tag
- * [1] ?? innertoken length (max 44)
- *
- *
- * tok_hdr piece of checksum data starts here
- *
- * the maximum mic-header len = 9 + 17 = 26
- * mic-header
- * ----------
- * [2] 30 SEQUENCE tag
- * [3] ?? mic-header length: (max 23) = TokenID + ContextID
- *
- * TokenID - all fields constant and can be hardcoded
- * -------
- * [4] 02 Type 2
- * [5] 02 Length 2
- * [6][7] 01 01 TokenID (SPKM_MIC_TOK)
- *
- * ContextID - encoded length not constant, calculated
- * ---------
- * [8] 03 Type 3
- * [9] ?? encoded length
- * [10] ?? ctxzbit
- * [11] contextid
- *
- * mic_header piece of checksum data ends here.
- *
- * int-cksum - encoded length not constant, calculated
- * ---------
- * [??] 03 Type 3
- * [??] ?? encoded length
- * [??] ?? md5zbit
- * [??] int-cksum (NID_md5 = 16)
- *
- * maximum SPKM-MIC innercontext token length =
- * 10 + encoded contextid_size(17 max) + 2 + encoded
- * cksum_size (17 maxfor NID_md5) = 46
- */
-
-/*
- * spkm3_mic_header()
- *
- * Prepare the SPKM_MIC_TOK mic-header for check-sum calculation
- * elen: 16 byte context id asn1 bitstring encoded length
- */
-void
-spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ctxdata, int elen, int zbit)
-{
- char *hptr = *hdrbuf;
- char *top = *hdrbuf;
-
- *(u8 *)hptr++ = 0x30;
- *(u8 *)hptr++ = elen + 7; /* on the wire header length */
-
- /* tokenid */
- *(u8 *)hptr++ = 0x02;
- *(u8 *)hptr++ = 0x02;
- *(u8 *)hptr++ = 0x01;
- *(u8 *)hptr++ = 0x01;
-
- /* coniextid */
- *(u8 *)hptr++ = 0x03;
- *(u8 *)hptr++ = elen + 1; /* add 1 to include zbit */
- *(u8 *)hptr++ = zbit;
- memcpy(hptr, ctxdata, elen);
- hptr += elen;
- *hdrlen = hptr - top;
-}
-
-/*
- * spkm3_mic_innercontext_token()
- *
- * *tokp points to the beginning of the SPKM_MIC token described
- * in rfc 2025, section 3.2.1:
- *
- * toklen is the inner token length
- */
-void
-spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit)
-{
- unsigned char *ict = *tokp;
-
- *(u8 *)ict++ = 0xa4;
- *(u8 *)ict++ = toklen;
- memcpy(ict, mic_hdr->data, mic_hdr->len);
- ict += mic_hdr->len;
-
- *(u8 *)ict++ = 0x03;
- *(u8 *)ict++ = md5elen + 1; /* add 1 to include zbit */
- *(u8 *)ict++ = md5zbit;
- memcpy(ict, md5cksum->data, md5elen);
-}
-
-u32
-spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **cksum)
-{
- struct xdr_netobj spkm3_ctx_id = {.len =0, .data = NULL};
- unsigned char *ptr = *tokp;
- int ctxelen;
- u32 ret = GSS_S_DEFECTIVE_TOKEN;
-
- /* spkm3 innercontext token preamble */
- if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
- dprintk("RPC: BAD SPKM ictoken preamble\n");
- goto out;
- }
-
- *mic_hdrlen = ptr[3];
-
- /* token type */
- if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) {
- dprintk("RPC: BAD asn1 SPKM3 token type\n");
- goto out;
- }
-
- /* only support SPKM_MIC_TOK */
- if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
- dprintk("RPC: ERROR unsupported SPKM3 token\n");
- goto out;
- }
-
- /* contextid */
- if (ptr[8] != 0x03) {
- dprintk("RPC: BAD SPKM3 asn1 context-id type\n");
- goto out;
- }
-
- ctxelen = ptr[9];
- if (ctxelen > 17) { /* length includes asn1 zbit octet */
- dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen);
- goto out;
- }
-
- /* ignore ptr[10] */
-
- if(!decode_asn1_bitstring(&spkm3_ctx_id, &ptr[11], ctxelen - 1, 16))
- goto out;
-
- /*
- * in the current implementation: the optional int-alg is not present
- * so the default int-alg (md5) is used the optional snd-seq field is
- * also not present
- */
-
- if (*mic_hdrlen != 6 + ctxelen) {
- dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only "
- "support default int-alg (should be absent) "
- "and do not support snd-seq\n", *mic_hdrlen);
- goto out;
- }
- /* checksum */
- *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
-
- ret = GSS_S_COMPLETE;
-out:
- kfree(spkm3_ctx_id.data);
- return ret;
-}
-
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
deleted file mode 100644
index cc21ee860bb6..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_unseal.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/crypto.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-/*
- * spkm3_read_token()
- *
- * only SPKM_MIC_TOK with md5 intg-alg is supported
- */
-u32
-spkm3_read_token(struct spkm3_ctx *ctx,
- struct xdr_netobj *read_token, /* checksum */
- struct xdr_buf *message_buffer, /* signbuf */
- int toktype)
-{
- s32 checksum_type;
- s32 code;
- struct xdr_netobj wire_cksum = {.len =0, .data = NULL};
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
- unsigned char *ptr = (unsigned char *)read_token->data;
- unsigned char *cksum;
- int bodysize, md5elen;
- int mic_hdrlen;
- u32 ret = GSS_S_DEFECTIVE_TOKEN;
-
- if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used,
- &bodysize, &ptr, read_token->len))
- goto out;
-
- /* decode the token */
-
- if (toktype != SPKM_MIC_TOK) {
- dprintk("RPC: BAD SPKM3 token type: %d\n", toktype);
- goto out;
- }
-
- if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum)))
- goto out;
-
- if (*cksum++ != 0x03) {
- dprintk("RPC: spkm3_read_token BAD checksum type\n");
- goto out;
- }
- md5elen = *cksum++;
- cksum++; /* move past the zbit */
-
- if (!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
- goto out;
-
- /* HARD CODED FOR MD5 */
-
- /* compute the checksum of the message.
- * ptr + 2 = start of header piece of checksum
- * mic_hdrlen + 2 = length of header piece of checksum
- */
- ret = GSS_S_DEFECTIVE_TOKEN;
- if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
- dprintk("RPC: gss_spkm3_seal: unsupported I-ALG "
- "algorithm\n");
- goto out;
- }
-
- checksum_type = CKSUMTYPE_HMAC_MD5;
-
- code = make_spkm3_checksum(checksum_type,
- &ctx->derived_integ_key, ptr + 2, mic_hdrlen + 2,
- message_buffer, 0, &md5cksum);
-
- if (code)
- goto out;
-
- ret = GSS_S_BAD_SIG;
- code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
- if (code) {
- dprintk("RPC: bad MIC checksum\n");
- goto out;
- }
-
-
- /* XXX: need to add expiration and sequencing */
- ret = GSS_S_COMPLETE;
-out:
- kfree(wire_cksum.data);
- return ret;
-}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index cc385b3a59c2..dec2a6fc7c12 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -964,7 +964,7 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
if (rqstp->rq_gssclient == NULL)
return SVC_DENIED;
stat = svcauth_unix_set_client(rqstp);
- if (stat == SVC_DROP)
+ if (stat == SVC_DROP || stat == SVC_CLOSE)
return stat;
return SVC_OK;
}
@@ -1018,7 +1018,7 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
return SVC_DENIED;
memset(&rsikey, 0, sizeof(rsikey));
if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
- return SVC_DROP;
+ return SVC_CLOSE;
*authp = rpc_autherr_badverf;
if (svc_safe_getnetobj(argv, &tmpobj)) {
kfree(rsikey.in_handle.data);
@@ -1026,38 +1026,35 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
}
if (dup_netobj(&rsikey.in_token, &tmpobj)) {
kfree(rsikey.in_handle.data);
- return SVC_DROP;
+ return SVC_CLOSE;
}
/* Perform upcall, or find upcall result: */
rsip = rsi_lookup(&rsikey);
rsi_free(&rsikey);
if (!rsip)
- return SVC_DROP;
- switch (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
- case -EAGAIN:
- case -ETIMEDOUT:
- case -ENOENT:
+ return SVC_CLOSE;
+ if (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
/* No upcall result: */
- return SVC_DROP;
- case 0:
- ret = SVC_DROP;
- /* Got an answer to the upcall; use it: */
- if (gss_write_init_verf(rqstp, rsip))
- goto out;
- if (resv->iov_len + 4 > PAGE_SIZE)
- goto out;
- svc_putnl(resv, RPC_SUCCESS);
- if (svc_safe_putnetobj(resv, &rsip->out_handle))
- goto out;
- if (resv->iov_len + 3 * 4 > PAGE_SIZE)
- goto out;
- svc_putnl(resv, rsip->major_status);
- svc_putnl(resv, rsip->minor_status);
- svc_putnl(resv, GSS_SEQ_WIN);
- if (svc_safe_putnetobj(resv, &rsip->out_token))
- goto out;
- }
+ return SVC_CLOSE;
+
+ ret = SVC_CLOSE;
+ /* Got an answer to the upcall; use it: */
+ if (gss_write_init_verf(rqstp, rsip))
+ goto out;
+ if (resv->iov_len + 4 > PAGE_SIZE)
+ goto out;
+ svc_putnl(resv, RPC_SUCCESS);
+ if (svc_safe_putnetobj(resv, &rsip->out_handle))
+ goto out;
+ if (resv->iov_len + 3 * 4 > PAGE_SIZE)
+ goto out;
+ svc_putnl(resv, rsip->major_status);
+ svc_putnl(resv, rsip->minor_status);
+ svc_putnl(resv, GSS_SEQ_WIN);
+ if (svc_safe_putnetobj(resv, &rsip->out_token))
+ goto out;
+
ret = SVC_COMPLETE;
out:
cache_put(&rsip->h, &rsi_cache);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7dce81a926c5..e433e7580e27 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -33,15 +33,16 @@
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include "netns.h"
#define RPCDBG_FACILITY RPCDBG_CACHE
-static int cache_defer_req(struct cache_req *req, struct cache_head *item);
+static void cache_defer_req(struct cache_req *req, struct cache_head *item);
static void cache_revisit_request(struct cache_head *item);
static void cache_init(struct cache_head *h)
{
- time_t now = get_seconds();
+ time_t now = seconds_since_boot();
h->next = NULL;
h->flags = 0;
kref_init(&h->ref);
@@ -51,7 +52,7 @@ static void cache_init(struct cache_head *h)
static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{
- return (h->expiry_time < get_seconds()) ||
+ return (h->expiry_time < seconds_since_boot()) ||
(detail->flush_time > h->last_refresh);
}
@@ -126,7 +127,7 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
static void cache_fresh_locked(struct cache_head *head, time_t expiry)
{
head->expiry_time = expiry;
- head->last_refresh = get_seconds();
+ head->last_refresh = seconds_since_boot();
set_bit(CACHE_VALID, &head->flags);
}
@@ -237,7 +238,7 @@ int cache_check(struct cache_detail *detail,
/* now see if we want to start an upcall */
refresh_age = (h->expiry_time - h->last_refresh);
- age = get_seconds() - h->last_refresh;
+ age = seconds_since_boot() - h->last_refresh;
if (rqstp == NULL) {
if (rv == -EAGAIN)
@@ -252,7 +253,7 @@ int cache_check(struct cache_detail *detail,
cache_revisit_request(h);
if (rv == -EAGAIN) {
set_bit(CACHE_NEGATIVE, &h->flags);
- cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY);
+ cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
cache_fresh_unlocked(h, detail);
rv = -ENOENT;
}
@@ -267,7 +268,8 @@ int cache_check(struct cache_detail *detail,
}
if (rv == -EAGAIN) {
- if (cache_defer_req(rqstp, h) < 0) {
+ cache_defer_req(rqstp, h);
+ if (!test_bit(CACHE_PENDING, &h->flags)) {
/* Request is not deferred */
rv = cache_is_valid(detail, h);
if (rv == -EAGAIN)
@@ -387,11 +389,11 @@ static int cache_clean(void)
return -1;
}
current_detail = list_entry(next, struct cache_detail, others);
- if (current_detail->nextcheck > get_seconds())
+ if (current_detail->nextcheck > seconds_since_boot())
current_index = current_detail->hash_size;
else {
current_index = 0;
- current_detail->nextcheck = get_seconds()+30*60;
+ current_detail->nextcheck = seconds_since_boot()+30*60;
}
}
@@ -476,7 +478,7 @@ EXPORT_SYMBOL_GPL(cache_flush);
void cache_purge(struct cache_detail *detail)
{
detail->flush_time = LONG_MAX;
- detail->nextcheck = get_seconds();
+ detail->nextcheck = seconds_since_boot();
cache_flush();
detail->flush_time = 1;
}
@@ -505,81 +507,155 @@ EXPORT_SYMBOL_GPL(cache_purge);
static DEFINE_SPINLOCK(cache_defer_lock);
static LIST_HEAD(cache_defer_list);
-static struct list_head cache_defer_hash[DFR_HASHSIZE];
+static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
static int cache_defer_cnt;
-static int cache_defer_req(struct cache_req *req, struct cache_head *item)
+static void __unhash_deferred_req(struct cache_deferred_req *dreq)
+{
+ hlist_del_init(&dreq->hash);
+ if (!list_empty(&dreq->recent)) {
+ list_del_init(&dreq->recent);
+ cache_defer_cnt--;
+ }
+}
+
+static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
{
- struct cache_deferred_req *dreq, *discard;
int hash = DFR_HASH(item);
- if (cache_defer_cnt >= DFR_MAX) {
- /* too much in the cache, randomly drop this one,
- * or continue and drop the oldest below
- */
- if (net_random()&1)
- return -ENOMEM;
- }
- dreq = req->defer(req);
- if (dreq == NULL)
- return -ENOMEM;
+ INIT_LIST_HEAD(&dreq->recent);
+ hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
+}
+
+static void setup_deferral(struct cache_deferred_req *dreq,
+ struct cache_head *item,
+ int count_me)
+{
dreq->item = item;
spin_lock(&cache_defer_lock);
- list_add(&dreq->recent, &cache_defer_list);
-
- if (cache_defer_hash[hash].next == NULL)
- INIT_LIST_HEAD(&cache_defer_hash[hash]);
- list_add(&dreq->hash, &cache_defer_hash[hash]);
+ __hash_deferred_req(dreq, item);
- /* it is in, now maybe clean up */
- discard = NULL;
- if (++cache_defer_cnt > DFR_MAX) {
- discard = list_entry(cache_defer_list.prev,
- struct cache_deferred_req, recent);
- list_del_init(&discard->recent);
- list_del_init(&discard->hash);
- cache_defer_cnt--;
+ if (count_me) {
+ cache_defer_cnt++;
+ list_add(&dreq->recent, &cache_defer_list);
}
+
spin_unlock(&cache_defer_lock);
+}
+
+struct thread_deferred_req {
+ struct cache_deferred_req handle;
+ struct completion completion;
+};
+
+static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
+{
+ struct thread_deferred_req *dr =
+ container_of(dreq, struct thread_deferred_req, handle);
+ complete(&dr->completion);
+}
+
+static void cache_wait_req(struct cache_req *req, struct cache_head *item)
+{
+ struct thread_deferred_req sleeper;
+ struct cache_deferred_req *dreq = &sleeper.handle;
+
+ sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
+ dreq->revisit = cache_restart_thread;
+
+ setup_deferral(dreq, item, 0);
+
+ if (!test_bit(CACHE_PENDING, &item->flags) ||
+ wait_for_completion_interruptible_timeout(
+ &sleeper.completion, req->thread_wait) <= 0) {
+ /* The completion wasn't completed, so we need
+ * to clean up
+ */
+ spin_lock(&cache_defer_lock);
+ if (!hlist_unhashed(&sleeper.handle.hash)) {
+ __unhash_deferred_req(&sleeper.handle);
+ spin_unlock(&cache_defer_lock);
+ } else {
+ /* cache_revisit_request already removed
+ * this from the hash table, but hasn't
+ * called ->revisit yet. It will very soon
+ * and we need to wait for it.
+ */
+ spin_unlock(&cache_defer_lock);
+ wait_for_completion(&sleeper.completion);
+ }
+ }
+}
+
+static void cache_limit_defers(void)
+{
+ /* Make sure we haven't exceed the limit of allowed deferred
+ * requests.
+ */
+ struct cache_deferred_req *discard = NULL;
+
+ if (cache_defer_cnt <= DFR_MAX)
+ return;
+
+ spin_lock(&cache_defer_lock);
+
+ /* Consider removing either the first or the last */
+ if (cache_defer_cnt > DFR_MAX) {
+ if (net_random() & 1)
+ discard = list_entry(cache_defer_list.next,
+ struct cache_deferred_req, recent);
+ else
+ discard = list_entry(cache_defer_list.prev,
+ struct cache_deferred_req, recent);
+ __unhash_deferred_req(discard);
+ }
+ spin_unlock(&cache_defer_lock);
if (discard)
- /* there was one too many */
discard->revisit(discard, 1);
+}
- if (!test_bit(CACHE_PENDING, &item->flags)) {
- /* must have just been validated... */
- cache_revisit_request(item);
- return -EAGAIN;
+static void cache_defer_req(struct cache_req *req, struct cache_head *item)
+{
+ struct cache_deferred_req *dreq;
+
+ if (req->thread_wait) {
+ cache_wait_req(req, item);
+ if (!test_bit(CACHE_PENDING, &item->flags))
+ return;
}
- return 0;
+ dreq = req->defer(req);
+ if (dreq == NULL)
+ return;
+ setup_deferral(dreq, item, 1);
+ if (!test_bit(CACHE_PENDING, &item->flags))
+ /* Bit could have been cleared before we managed to
+ * set up the deferral, so need to revisit just in case
+ */
+ cache_revisit_request(item);
+
+ cache_limit_defers();
}
static void cache_revisit_request(struct cache_head *item)
{
struct cache_deferred_req *dreq;
struct list_head pending;
-
- struct list_head *lp;
+ struct hlist_node *lp, *tmp;
int hash = DFR_HASH(item);
INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
- lp = cache_defer_hash[hash].next;
- if (lp) {
- while (lp != &cache_defer_hash[hash]) {
- dreq = list_entry(lp, struct cache_deferred_req, hash);
- lp = lp->next;
- if (dreq->item == item) {
- list_del_init(&dreq->hash);
- list_move(&dreq->recent, &pending);
- cache_defer_cnt--;
- }
+ hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
+ if (dreq->item == item) {
+ __unhash_deferred_req(dreq);
+ list_add(&dreq->recent, &pending);
}
- }
+
spin_unlock(&cache_defer_lock);
while (!list_empty(&pending)) {
@@ -600,9 +676,8 @@ void cache_clean_deferred(void *owner)
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) {
- list_del_init(&dreq->hash);
- list_move(&dreq->recent, &pending);
- cache_defer_cnt--;
+ __unhash_deferred_req(dreq);
+ list_add(&dreq->recent, &pending);
}
}
spin_unlock(&cache_defer_lock);
@@ -901,7 +976,7 @@ static int cache_release(struct inode *inode, struct file *filp,
filp->private_data = NULL;
kfree(rp);
- cd->last_close = get_seconds();
+ cd->last_close = seconds_since_boot();
atomic_dec(&cd->readers);
}
module_put(cd->owner);
@@ -1014,6 +1089,23 @@ static void warn_no_listener(struct cache_detail *detail)
}
}
+static bool cache_listeners_exist(struct cache_detail *detail)
+{
+ if (atomic_read(&detail->readers))
+ return true;
+ if (detail->last_close == 0)
+ /* This cache was never opened */
+ return false;
+ if (detail->last_close < seconds_since_boot() - 30)
+ /*
+ * We allow for the possibility that someone might
+ * restart a userspace daemon without restarting the
+ * server; but after 30 seconds, we give up.
+ */
+ return false;
+ return true;
+}
+
/*
* register an upcall request to user-space and queue it up for read() by the
* upcall daemon.
@@ -1032,10 +1124,9 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
char *bp;
int len;
- if (atomic_read(&detail->readers) == 0 &&
- detail->last_close < get_seconds() - 30) {
- warn_no_listener(detail);
- return -EINVAL;
+ if (!cache_listeners_exist(detail)) {
+ warn_no_listener(detail);
+ return -EINVAL;
}
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -1094,13 +1185,19 @@ int qword_get(char **bpp, char *dest, int bufsize)
if (bp[0] == '\\' && bp[1] == 'x') {
/* HEX STRING */
bp += 2;
- while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
- int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
- bp++;
- byte <<= 4;
- byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
- *dest++ = byte;
- bp++;
+ while (len < bufsize) {
+ int h, l;
+
+ h = hex_to_bin(bp[0]);
+ if (h < 0)
+ break;
+
+ l = hex_to_bin(bp[1]);
+ if (l < 0)
+ break;
+
+ *dest++ = (h << 4) | l;
+ bp += 2;
len++;
}
} else {
@@ -1218,7 +1315,8 @@ static int c_show(struct seq_file *m, void *p)
ifdebug(CACHE)
seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
- cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
+ convert_to_wallclock(cp->expiry_time),
+ atomic_read(&cp->ref.refcount), cp->flags);
cache_get(cp);
if (cache_check(cd, cp, NULL))
/* cache_check does a cache_put on failure */
@@ -1284,7 +1382,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
unsigned long p = *ppos;
size_t len;
- sprintf(tbuf, "%lu\n", cd->flush_time);
+ sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
len = strlen(tbuf);
if (p >= len)
return 0;
@@ -1302,19 +1400,20 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
struct cache_detail *cd)
{
char tbuf[20];
- char *ep;
- long flushtime;
+ char *bp, *ep;
+
if (*ppos || count > sizeof(tbuf)-1)
return -EINVAL;
if (copy_from_user(tbuf, buf, count))
return -EFAULT;
tbuf[count] = 0;
- flushtime = simple_strtoul(tbuf, &ep, 0);
+ simple_strtoul(tbuf, &ep, 0);
if (*ep && *ep != '\n')
return -EINVAL;
- cd->flush_time = flushtime;
- cd->nextcheck = get_seconds();
+ bp = tbuf;
+ cd->flush_time = get_expiry(&bp);
+ cd->nextcheck = seconds_since_boot();
cache_flush();
*ppos += count;
@@ -1438,8 +1537,10 @@ static const struct file_operations cache_flush_operations_procfs = {
.llseek = no_llseek,
};
-static void remove_cache_proc_entries(struct cache_detail *cd)
+static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
+ struct sunrpc_net *sn;
+
if (cd->u.procfs.proc_ent == NULL)
return;
if (cd->u.procfs.flush_ent)
@@ -1449,15 +1550,18 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
if (cd->u.procfs.content_ent)
remove_proc_entry("content", cd->u.procfs.proc_ent);
cd->u.procfs.proc_ent = NULL;
- remove_proc_entry(cd->name, proc_net_rpc);
+ sn = net_generic(net, sunrpc_net_id);
+ remove_proc_entry(cd->name, sn->proc_net_rpc);
}
#ifdef CONFIG_PROC_FS
-static int create_cache_proc_entries(struct cache_detail *cd)
+static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
struct proc_dir_entry *p;
+ struct sunrpc_net *sn;
- cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc);
+ sn = net_generic(net, sunrpc_net_id);
+ cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
if (cd->u.procfs.proc_ent == NULL)
goto out_nomem;
cd->u.procfs.channel_ent = NULL;
@@ -1488,11 +1592,11 @@ static int create_cache_proc_entries(struct cache_detail *cd)
}
return 0;
out_nomem:
- remove_cache_proc_entries(cd);
+ remove_cache_proc_entries(cd, net);
return -ENOMEM;
}
#else /* CONFIG_PROC_FS */
-static int create_cache_proc_entries(struct cache_detail *cd)
+static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
return 0;
}
@@ -1503,23 +1607,33 @@ void __init cache_initialize(void)
INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
}
-int cache_register(struct cache_detail *cd)
+int cache_register_net(struct cache_detail *cd, struct net *net)
{
int ret;
sunrpc_init_cache_detail(cd);
- ret = create_cache_proc_entries(cd);
+ ret = create_cache_proc_entries(cd, net);
if (ret)
sunrpc_destroy_cache_detail(cd);
return ret;
}
+
+int cache_register(struct cache_detail *cd)
+{
+ return cache_register_net(cd, &init_net);
+}
EXPORT_SYMBOL_GPL(cache_register);
-void cache_unregister(struct cache_detail *cd)
+void cache_unregister_net(struct cache_detail *cd, struct net *net)
{
- remove_cache_proc_entries(cd);
+ remove_cache_proc_entries(cd, net);
sunrpc_destroy_cache_detail(cd);
}
+
+void cache_unregister(struct cache_detail *cd)
+{
+ cache_unregister_net(cd, &init_net);
+}
EXPORT_SYMBOL_GPL(cache_unregister);
static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc5b8ccc8be..9dab9573be41 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -284,6 +284,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
struct rpc_xprt *xprt;
struct rpc_clnt *clnt;
struct xprt_create xprtargs = {
+ .net = args->net,
.ident = args->protocol,
.srcaddr = args->saddress,
.dstaddr = args->address,
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
new file mode 100644
index 000000000000..d013bf211cae
--- /dev/null
+++ b/net/sunrpc/netns.h
@@ -0,0 +1,19 @@
+#ifndef __SUNRPC_NETNS_H__
+#define __SUNRPC_NETNS_H__
+
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+struct cache_detail;
+
+struct sunrpc_net {
+ struct proc_dir_entry *proc_net_rpc;
+ struct cache_detail *ip_map_cache;
+};
+
+extern int sunrpc_net_id;
+
+int ip_map_cache_create(struct net *);
+void ip_map_cache_destroy(struct net *);
+
+#endif
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 52f252432144..7df92d237cb8 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -445,6 +445,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
struct inode *inode = new_inode(sb);
if (!inode)
return NULL;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch(mode & S_IFMT) {
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 63ec116b4dd4..fa6d7ca2c851 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -177,6 +177,7 @@ static DEFINE_MUTEX(rpcb_create_local_mutex);
static int rpcb_create_local(void)
{
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = XPRT_TRANSPORT_TCP,
.address = (struct sockaddr *)&rpcb_inaddr_loopback,
.addrsize = sizeof(rpcb_inaddr_loopback),
@@ -229,6 +230,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
size_t salen, int proto, u32 version)
{
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = proto,
.address = srvaddr,
.addrsize = salen,
@@ -248,7 +250,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
((struct sockaddr_in6 *)srvaddr)->sin6_port = htons(RPCBIND_PORT);
break;
default:
- return NULL;
+ return ERR_PTR(-EAFNOSUPPORT);
}
return rpc_create(&args);
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index ea1046f3f9a3..f71a73107ae9 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -22,11 +22,10 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/metrics.h>
-#include <net/net_namespace.h>
-#define RPCDBG_FACILITY RPCDBG_MISC
+#include "netns.h"
-struct proc_dir_entry *proc_net_rpc = NULL;
+#define RPCDBG_FACILITY RPCDBG_MISC
/*
* Get RPC client stats
@@ -218,10 +217,11 @@ EXPORT_SYMBOL_GPL(rpc_print_iostats);
static inline struct proc_dir_entry *
do_register(const char *name, void *data, const struct file_operations *fops)
{
- rpc_proc_init();
- dprintk("RPC: registering /proc/net/rpc/%s\n", name);
+ struct sunrpc_net *sn;
- return proc_create_data(name, 0, proc_net_rpc, fops, data);
+ dprintk("RPC: registering /proc/net/rpc/%s\n", name);
+ sn = net_generic(&init_net, sunrpc_net_id);
+ return proc_create_data(name, 0, sn->proc_net_rpc, fops, data);
}
struct proc_dir_entry *
@@ -234,7 +234,10 @@ EXPORT_SYMBOL_GPL(rpc_proc_register);
void
rpc_proc_unregister(const char *name)
{
- remove_proc_entry(name, proc_net_rpc);
+ struct sunrpc_net *sn;
+
+ sn = net_generic(&init_net, sunrpc_net_id);
+ remove_proc_entry(name, sn->proc_net_rpc);
}
EXPORT_SYMBOL_GPL(rpc_proc_unregister);
@@ -248,25 +251,29 @@ EXPORT_SYMBOL_GPL(svc_proc_register);
void
svc_proc_unregister(const char *name)
{
- remove_proc_entry(name, proc_net_rpc);
+ struct sunrpc_net *sn;
+
+ sn = net_generic(&init_net, sunrpc_net_id);
+ remove_proc_entry(name, sn->proc_net_rpc);
}
EXPORT_SYMBOL_GPL(svc_proc_unregister);
-void
-rpc_proc_init(void)
+int rpc_proc_init(struct net *net)
{
+ struct sunrpc_net *sn;
+
dprintk("RPC: registering /proc/net/rpc\n");
- if (!proc_net_rpc)
- proc_net_rpc = proc_mkdir("rpc", init_net.proc_net);
+ sn = net_generic(net, sunrpc_net_id);
+ sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
+ if (sn->proc_net_rpc == NULL)
+ return -ENOMEM;
+
+ return 0;
}
-void
-rpc_proc_exit(void)
+void rpc_proc_exit(struct net *net)
{
dprintk("RPC: unregistering /proc/net/rpc\n");
- if (proc_net_rpc) {
- proc_net_rpc = NULL;
- remove_proc_entry("rpc", init_net.proc_net);
- }
+ remove_proc_entry("rpc", net->proc_net);
}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index c0d085013a2b..9d0809160994 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -22,7 +22,44 @@
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/xprtsock.h>
-extern struct cache_detail ip_map_cache, unix_gid_cache;
+#include "netns.h"
+
+int sunrpc_net_id;
+
+static __net_init int sunrpc_init_net(struct net *net)
+{
+ int err;
+
+ err = rpc_proc_init(net);
+ if (err)
+ goto err_proc;
+
+ err = ip_map_cache_create(net);
+ if (err)
+ goto err_ipmap;
+
+ return 0;
+
+err_ipmap:
+ rpc_proc_exit(net);
+err_proc:
+ return err;
+}
+
+static __net_exit void sunrpc_exit_net(struct net *net)
+{
+ ip_map_cache_destroy(net);
+ rpc_proc_exit(net);
+}
+
+static struct pernet_operations sunrpc_net_ops = {
+ .init = sunrpc_init_net,
+ .exit = sunrpc_exit_net,
+ .id = &sunrpc_net_id,
+ .size = sizeof(struct sunrpc_net),
+};
+
+extern struct cache_detail unix_gid_cache;
extern void cleanup_rpcb_clnt(void);
@@ -38,18 +75,22 @@ init_sunrpc(void)
err = rpcauth_init_module();
if (err)
goto out3;
+
+ cache_initialize();
+
+ err = register_pernet_subsys(&sunrpc_net_ops);
+ if (err)
+ goto out4;
#ifdef RPC_DEBUG
rpc_register_sysctl();
#endif
-#ifdef CONFIG_PROC_FS
- rpc_proc_init();
-#endif
- cache_initialize();
- cache_register(&ip_map_cache);
cache_register(&unix_gid_cache);
svc_init_xprt_sock(); /* svc sock transport */
init_socket_xprt(); /* clnt sock transport */
return 0;
+
+out4:
+ rpcauth_remove_module();
out3:
rpc_destroy_mempool();
out2:
@@ -67,14 +108,11 @@ cleanup_sunrpc(void)
svc_cleanup_xprt_sock();
unregister_rpc_pipefs();
rpc_destroy_mempool();
- cache_unregister(&ip_map_cache);
cache_unregister(&unix_gid_cache);
+ unregister_pernet_subsys(&sunrpc_net_ops);
#ifdef RPC_DEBUG
rpc_unregister_sysctl();
#endif
-#ifdef CONFIG_PROC_FS
- rpc_proc_exit();
-#endif
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
MODULE_LICENSE("GPL");
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index d9017d64597e..6359c42c4941 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1055,6 +1055,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto err_bad;
case SVC_DENIED:
goto err_bad_auth;
+ case SVC_CLOSE:
+ if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+ svc_close_xprt(rqstp->rq_xprt);
case SVC_DROP:
goto dropit;
case SVC_COMPLETE:
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index cbc084939dd8..c82fe739fbdc 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -100,16 +100,14 @@ EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
*/
int svc_print_xprts(char *buf, int maxlen)
{
- struct list_head *le;
+ struct svc_xprt_class *xcl;
char tmpstr[80];
int len = 0;
buf[0] = '\0';
spin_lock(&svc_xprt_class_lock);
- list_for_each(le, &svc_xprt_class_list) {
+ list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
int slen;
- struct svc_xprt_class *xcl =
- list_entry(le, struct svc_xprt_class, xcl_list);
sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
slen = strlen(tmpstr);
@@ -128,9 +126,9 @@ static void svc_xprt_free(struct kref *kref)
struct svc_xprt *xprt =
container_of(kref, struct svc_xprt, xpt_ref);
struct module *owner = xprt->xpt_class->xcl_owner;
- if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) &&
- xprt->xpt_auth_cache != NULL)
- svcauth_unix_info_release(xprt->xpt_auth_cache);
+ if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
+ svcauth_unix_info_release(xprt);
+ put_net(xprt->xpt_net);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
@@ -156,15 +154,18 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
INIT_LIST_HEAD(&xprt->xpt_list);
INIT_LIST_HEAD(&xprt->xpt_ready);
INIT_LIST_HEAD(&xprt->xpt_deferred);
+ INIT_LIST_HEAD(&xprt->xpt_users);
mutex_init(&xprt->xpt_mutex);
spin_lock_init(&xprt->xpt_lock);
set_bit(XPT_BUSY, &xprt->xpt_flags);
rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
+ xprt->xpt_net = get_net(&init_net);
}
EXPORT_SYMBOL_GPL(svc_xprt_init);
static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
struct svc_serv *serv,
+ struct net *net,
const int family,
const unsigned short port,
int flags)
@@ -199,12 +200,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
return ERR_PTR(-EAFNOSUPPORT);
}
- return xcl->xcl_ops->xpo_create(serv, sap, len, flags);
+ return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
}
int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
- const int family, const unsigned short port,
- int flags)
+ struct net *net, const int family,
+ const unsigned short port, int flags)
{
struct svc_xprt_class *xcl;
@@ -220,7 +221,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
goto err;
spin_unlock(&svc_xprt_class_lock);
- newxprt = __svc_xpo_create(xcl, serv, family, port, flags);
+ newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
if (IS_ERR(newxprt)) {
module_put(xcl->xcl_owner);
return PTR_ERR(newxprt);
@@ -329,12 +330,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
"svc_xprt_enqueue: "
"threads and transports both waiting??\n");
- if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
- /* Don't enqueue dead transports */
- dprintk("svc: transport %p is dead, not enqueued\n", xprt);
- goto out_unlock;
- }
-
pool->sp_stats.packets++;
/* Mark transport as busy. It will remain in this state until
@@ -651,6 +646,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (signalled() || kthread_should_stop())
return -EINTR;
+ /* Normally we will wait up to 5 seconds for any required
+ * cache information to be provided.
+ */
+ rqstp->rq_chandle.thread_wait = 5*HZ;
+
spin_lock_bh(&pool->sp_lock);
xprt = svc_xprt_dequeue(pool);
if (xprt) {
@@ -658,6 +658,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+ /* As there is a shortage of threads and this request
+ * had to be queued, don't allow the thread to wait so
+ * long for cache updates.
+ */
+ rqstp->rq_chandle.thread_wait = 1*HZ;
} else {
/* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp);
@@ -868,6 +874,19 @@ static void svc_age_temp_xprts(unsigned long closure)
mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
}
+static void call_xpt_users(struct svc_xprt *xprt)
+{
+ struct svc_xpt_user *u;
+
+ spin_lock(&xprt->xpt_lock);
+ while (!list_empty(&xprt->xpt_users)) {
+ u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
+ list_del(&u->list);
+ u->callback(u);
+ }
+ spin_unlock(&xprt->xpt_lock);
+}
+
/*
* Remove a dead transport
*/
@@ -878,7 +897,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
/* Only do this once */
if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
- return;
+ BUG();
dprintk("svc: svc_delete_xprt(%p)\n", xprt);
xprt->xpt_ops->xpo_detach(xprt);
@@ -900,6 +919,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
while ((dr = svc_deferred_dequeue(xprt)) != NULL)
kfree(dr);
+ call_xpt_users(xprt);
svc_xprt_put(xprt);
}
@@ -910,10 +930,7 @@ void svc_close_xprt(struct svc_xprt *xprt)
/* someone else will have to effect the close */
return;
- svc_xprt_get(xprt);
svc_delete_xprt(xprt);
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
- svc_xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(svc_close_xprt);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 207311610988..560677d187f1 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -18,6 +18,8 @@
#include <linux/sunrpc/clnt.h>
+#include "netns.h"
+
/*
* AUTHUNIX and AUTHNULL credentials are both handled here.
* AUTHNULL is treated just like AUTHUNIX except that the uid/gid
@@ -92,7 +94,6 @@ struct ip_map {
struct unix_domain *m_client;
int m_add_change;
};
-static struct cache_head *ip_table[IP_HASHMAX];
static void ip_map_put(struct kref *kref)
{
@@ -178,8 +179,8 @@ static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
}
-static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
-static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
+static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
+static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
static int ip_map_parse(struct cache_detail *cd,
char *mesg, int mlen)
@@ -219,10 +220,9 @@ static int ip_map_parse(struct cache_detail *cd,
switch (address.sa.sa_family) {
case AF_INET:
/* Form a mapped IPv4 address in sin6 */
- memset(&sin6, 0, sizeof(sin6));
sin6.sin6_family = AF_INET6;
- sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
- sin6.sin6_addr.s6_addr32[3] = address.s4.sin_addr.s_addr;
+ ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
+ &sin6.sin6_addr);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
@@ -249,9 +249,9 @@ static int ip_map_parse(struct cache_detail *cd,
dom = NULL;
/* IPv6 scope IDs are ignored for now */
- ipmp = ip_map_lookup(class, &sin6.sin6_addr);
+ ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
if (ipmp) {
- err = ip_map_update(ipmp,
+ err = __ip_map_update(cd, ipmp,
container_of(dom, struct unix_domain, h),
expiry);
} else
@@ -294,29 +294,15 @@ static int ip_map_show(struct seq_file *m,
}
-struct cache_detail ip_map_cache = {
- .owner = THIS_MODULE,
- .hash_size = IP_HASHMAX,
- .hash_table = ip_table,
- .name = "auth.unix.ip",
- .cache_put = ip_map_put,
- .cache_upcall = ip_map_upcall,
- .cache_parse = ip_map_parse,
- .cache_show = ip_map_show,
- .match = ip_map_match,
- .init = ip_map_init,
- .update = update,
- .alloc = ip_map_alloc,
-};
-
-static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
+static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
+ struct in6_addr *addr)
{
struct ip_map ip;
struct cache_head *ch;
strcpy(ip.m_class, class);
ipv6_addr_copy(&ip.m_addr, addr);
- ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
+ ch = sunrpc_cache_lookup(cd, &ip.h,
hash_str(class, IP_HASHBITS) ^
hash_ip6(*addr));
@@ -326,7 +312,17 @@ static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
return NULL;
}
-static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry)
+static inline struct ip_map *ip_map_lookup(struct net *net, char *class,
+ struct in6_addr *addr)
+{
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ return __ip_map_lookup(sn->ip_map_cache, class, addr);
+}
+
+static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
+ struct unix_domain *udom, time_t expiry)
{
struct ip_map ip;
struct cache_head *ch;
@@ -344,17 +340,25 @@ static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t ex
ip.m_add_change++;
}
ip.h.expiry_time = expiry;
- ch = sunrpc_cache_update(&ip_map_cache,
- &ip.h, &ipm->h,
+ ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
hash_ip6(ipm->m_addr));
if (!ch)
return -ENOMEM;
- cache_put(ch, &ip_map_cache);
+ cache_put(ch, cd);
return 0;
}
-int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
+static inline int ip_map_update(struct net *net, struct ip_map *ipm,
+ struct unix_domain *udom, time_t expiry)
+{
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
+}
+
+int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
{
struct unix_domain *udom;
struct ip_map *ipmp;
@@ -362,10 +366,10 @@ int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
if (dom->flavour != &svcauth_unix)
return -EINVAL;
udom = container_of(dom, struct unix_domain, h);
- ipmp = ip_map_lookup("nfsd", addr);
+ ipmp = ip_map_lookup(net, "nfsd", addr);
if (ipmp)
- return ip_map_update(ipmp, udom, NEVER);
+ return ip_map_update(net, ipmp, udom, NEVER);
else
return -ENOMEM;
}
@@ -383,16 +387,18 @@ int auth_unix_forget_old(struct auth_domain *dom)
}
EXPORT_SYMBOL_GPL(auth_unix_forget_old);
-struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
+struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
{
struct ip_map *ipm;
struct auth_domain *rv;
+ struct sunrpc_net *sn;
- ipm = ip_map_lookup("nfsd", addr);
+ sn = net_generic(net, sunrpc_net_id);
+ ipm = ip_map_lookup(net, "nfsd", addr);
if (!ipm)
return NULL;
- if (cache_check(&ip_map_cache, &ipm->h, NULL))
+ if (cache_check(sn->ip_map_cache, &ipm->h, NULL))
return NULL;
if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
@@ -403,22 +409,29 @@ struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
rv = &ipm->m_client->h;
kref_get(&rv->ref);
}
- cache_put(&ipm->h, &ip_map_cache);
+ cache_put(&ipm->h, sn->ip_map_cache);
return rv;
}
EXPORT_SYMBOL_GPL(auth_unix_lookup);
void svcauth_unix_purge(void)
{
- cache_purge(&ip_map_cache);
+ struct net *net;
+
+ for_each_net(net) {
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ cache_purge(sn->ip_map_cache);
+ }
}
EXPORT_SYMBOL_GPL(svcauth_unix_purge);
static inline struct ip_map *
-ip_map_cached_get(struct svc_rqst *rqstp)
+ip_map_cached_get(struct svc_xprt *xprt)
{
struct ip_map *ipm = NULL;
- struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct sunrpc_net *sn;
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
spin_lock(&xprt->xpt_lock);
@@ -430,9 +443,10 @@ ip_map_cached_get(struct svc_rqst *rqstp)
* remembered, e.g. by a second mount from the
* same IP address.
*/
+ sn = net_generic(xprt->xpt_net, sunrpc_net_id);
xprt->xpt_auth_cache = NULL;
spin_unlock(&xprt->xpt_lock);
- cache_put(&ipm->h, &ip_map_cache);
+ cache_put(&ipm->h, sn->ip_map_cache);
return NULL;
}
cache_get(&ipm->h);
@@ -443,10 +457,8 @@ ip_map_cached_get(struct svc_rqst *rqstp)
}
static inline void
-ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
+ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
{
- struct svc_xprt *xprt = rqstp->rq_xprt;
-
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
spin_lock(&xprt->xpt_lock);
if (xprt->xpt_auth_cache == NULL) {
@@ -456,15 +468,26 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
}
spin_unlock(&xprt->xpt_lock);
}
- if (ipm)
- cache_put(&ipm->h, &ip_map_cache);
+ if (ipm) {
+ struct sunrpc_net *sn;
+
+ sn = net_generic(xprt->xpt_net, sunrpc_net_id);
+ cache_put(&ipm->h, sn->ip_map_cache);
+ }
}
void
-svcauth_unix_info_release(void *info)
+svcauth_unix_info_release(struct svc_xprt *xpt)
{
- struct ip_map *ipm = info;
- cache_put(&ipm->h, &ip_map_cache);
+ struct ip_map *ipm;
+
+ ipm = xpt->xpt_auth_cache;
+ if (ipm != NULL) {
+ struct sunrpc_net *sn;
+
+ sn = net_generic(xpt->xpt_net, sunrpc_net_id);
+ cache_put(&ipm->h, sn->ip_map_cache);
+ }
}
/****************************************************************************
@@ -674,6 +697,8 @@ static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
switch (ret) {
case -ENOENT:
return ERR_PTR(-ENOENT);
+ case -ETIMEDOUT:
+ return ERR_PTR(-ESHUTDOWN);
case 0:
gi = get_group_info(ug->gi);
cache_put(&ug->h, &unix_gid_cache);
@@ -691,6 +716,9 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
struct ip_map *ipm;
struct group_info *gi;
struct svc_cred *cred = &rqstp->rq_cred;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct net *net = xprt->xpt_net;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
switch (rqstp->rq_addr.ss_family) {
case AF_INET:
@@ -709,26 +737,27 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
if (rqstp->rq_proc == 0)
return SVC_OK;
- ipm = ip_map_cached_get(rqstp);
+ ipm = ip_map_cached_get(xprt);
if (ipm == NULL)
- ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
+ ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
&sin6->sin6_addr);
if (ipm == NULL)
return SVC_DENIED;
- switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
+ switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
default:
BUG();
- case -EAGAIN:
case -ETIMEDOUT:
+ return SVC_CLOSE;
+ case -EAGAIN:
return SVC_DROP;
case -ENOENT:
return SVC_DENIED;
case 0:
rqstp->rq_client = &ipm->m_client->h;
kref_get(&rqstp->rq_client->ref);
- ip_map_cached_put(rqstp, ipm);
+ ip_map_cached_put(xprt, ipm);
break;
}
@@ -736,6 +765,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
switch (PTR_ERR(gi)) {
case -EAGAIN:
return SVC_DROP;
+ case -ESHUTDOWN:
+ return SVC_CLOSE;
case -ENOENT:
break;
default:
@@ -776,7 +807,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
cred->cr_gid = (gid_t) -1;
cred->cr_group_info = groups_alloc(0);
if (cred->cr_group_info == NULL)
- return SVC_DROP; /* kmalloc failure - client must retry */
+ return SVC_CLOSE; /* kmalloc failure - client must retry */
/* Put NULL verifier */
svc_putnl(resv, RPC_AUTH_NULL);
@@ -840,7 +871,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
goto badcred;
cred->cr_group_info = groups_alloc(slen);
if (cred->cr_group_info == NULL)
- return SVC_DROP;
+ return SVC_CLOSE;
for (i = 0; i < slen; i++)
GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
@@ -886,3 +917,56 @@ struct auth_ops svcauth_unix = {
.set_client = svcauth_unix_set_client,
};
+int ip_map_cache_create(struct net *net)
+{
+ int err = -ENOMEM;
+ struct cache_detail *cd;
+ struct cache_head **tbl;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL);
+ if (cd == NULL)
+ goto err_cd;
+
+ tbl = kzalloc(IP_HASHMAX * sizeof(struct cache_head *), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_tbl;
+
+ cd->owner = THIS_MODULE,
+ cd->hash_size = IP_HASHMAX,
+ cd->hash_table = tbl,
+ cd->name = "auth.unix.ip",
+ cd->cache_put = ip_map_put,
+ cd->cache_upcall = ip_map_upcall,
+ cd->cache_parse = ip_map_parse,
+ cd->cache_show = ip_map_show,
+ cd->match = ip_map_match,
+ cd->init = ip_map_init,
+ cd->update = update,
+ cd->alloc = ip_map_alloc,
+
+ err = cache_register_net(cd, net);
+ if (err)
+ goto err_reg;
+
+ sn->ip_map_cache = cd;
+ return 0;
+
+err_reg:
+ kfree(tbl);
+err_tbl:
+ kfree(cd);
+err_cd:
+ return err;
+}
+
+void ip_map_cache_destroy(struct net *net)
+{
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ cache_purge(sn->ip_map_cache);
+ cache_unregister_net(sn->ip_map_cache, net);
+ kfree(sn->ip_map_cache->hash_table);
+ kfree(sn->ip_map_cache);
+}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 7e534dd09077..07919e16be3e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -64,7 +64,8 @@ static void svc_tcp_sock_detach(struct svc_xprt *);
static void svc_sock_free(struct svc_xprt *);
static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
- struct sockaddr *, int, int);
+ struct net *, struct sockaddr *,
+ int, int);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key svc_key[2];
static struct lock_class_key svc_slock_key[2];
@@ -657,10 +658,11 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
}
static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags)
{
- return svc_create_socket(serv, IPPROTO_UDP, sa, salen, flags);
+ return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags);
}
static struct svc_xprt_ops svc_udp_ops = {
@@ -1133,9 +1135,6 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
reclen = htonl(0x80000000|((xbufp->len ) - 4));
memcpy(xbufp->head[0].iov_base, &reclen, 4);
- if (test_bit(XPT_DEAD, &rqstp->rq_xprt->xpt_flags))
- return -ENOTCONN;
-
sent = svc_sendto(rqstp, &rqstp->rq_res);
if (sent != xbufp->len) {
printk(KERN_NOTICE
@@ -1178,10 +1177,11 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
}
static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags)
{
- return svc_create_socket(serv, IPPROTO_TCP, sa, salen, flags);
+ return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
}
static struct svc_xprt_ops svc_tcp_ops = {
@@ -1258,19 +1258,13 @@ void svc_sock_update_bufs(struct svc_serv *serv)
* The number of server threads has changed. Update
* rcvbuf and sndbuf accordingly on all sockets
*/
- struct list_head *le;
+ struct svc_sock *svsk;
spin_lock_bh(&serv->sv_lock);
- list_for_each(le, &serv->sv_permsocks) {
- struct svc_sock *svsk =
- list_entry(le, struct svc_sock, sk_xprt.xpt_list);
+ list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
- }
- list_for_each(le, &serv->sv_tempsocks) {
- struct svc_sock *svsk =
- list_entry(le, struct svc_sock, sk_xprt.xpt_list);
+ list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
- }
spin_unlock_bh(&serv->sv_lock);
}
EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
@@ -1385,6 +1379,7 @@ EXPORT_SYMBOL_GPL(svc_addsock);
*/
static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
int protocol,
+ struct net *net,
struct sockaddr *sin, int len,
int flags)
{
@@ -1421,7 +1416,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
return ERR_PTR(-EINVAL);
}
- error = sock_create_kern(family, type, protocol, &sock);
+ error = __sock_create(net, family, type, protocol, &sock, 1);
if (error < 0)
return ERR_PTR(error);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 970fb00f388c..4c8f18aff7c3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -199,8 +199,6 @@ int xprt_reserve_xprt(struct rpc_task *task)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
return 1;
- if (task == NULL)
- return 0;
goto out_sleep;
}
xprt->snd_task = task;
@@ -757,13 +755,11 @@ static void xprt_connect_status(struct rpc_task *task)
*/
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{
- struct list_head *pos;
+ struct rpc_rqst *entry;
- list_for_each(pos, &xprt->recv) {
- struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
+ list_for_each_entry(entry, &xprt->recv, rq_list)
if (entry->rq_xid == xid)
return entry;
- }
dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
ntohl(xid));
@@ -962,6 +958,37 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
spin_unlock(&xprt->reserve_lock);
}
+struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
+{
+ struct rpc_xprt *xprt;
+
+ xprt = kzalloc(size, GFP_KERNEL);
+ if (xprt == NULL)
+ goto out;
+
+ xprt->max_reqs = max_req;
+ xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
+ if (xprt->slot == NULL)
+ goto out_free;
+
+ xprt->xprt_net = get_net(net);
+ return xprt;
+
+out_free:
+ kfree(xprt);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xprt_alloc);
+
+void xprt_free(struct rpc_xprt *xprt)
+{
+ put_net(xprt->xprt_net);
+ kfree(xprt->slot);
+ kfree(xprt);
+}
+EXPORT_SYMBOL_GPL(xprt_free);
+
/**
* xprt_reserve - allocate an RPC request slot
* @task: RPC task requesting a slot allocation
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index d718b8fa9525..09af4fab1a45 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
+#include <linux/workqueue.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svc_rdma.h>
@@ -74,6 +75,8 @@ atomic_t rdma_stat_sq_prod;
struct kmem_cache *svc_rdma_map_cachep;
struct kmem_cache *svc_rdma_ctxt_cachep;
+struct workqueue_struct *svc_rdma_wq;
+
/*
* This function implements reading and resetting an atomic_t stat
* variable through read/write to a proc file. Any write to the file
@@ -231,7 +234,7 @@ static ctl_table svcrdma_root_table[] = {
void svc_rdma_cleanup(void)
{
dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
- flush_scheduled_work();
+ destroy_workqueue(svc_rdma_wq);
if (svcrdma_table_header) {
unregister_sysctl_table(svcrdma_table_header);
svcrdma_table_header = NULL;
@@ -249,6 +252,11 @@ int svc_rdma_init(void)
dprintk("\tsq_depth : %d\n",
svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
+
+ svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);
+ if (!svc_rdma_wq)
+ return -ENOMEM;
+
if (!svcrdma_table_header)
svcrdma_table_header =
register_sysctl_table(svcrdma_root_table);
@@ -283,6 +291,7 @@ int svc_rdma_init(void)
kmem_cache_destroy(svc_rdma_map_cachep);
err0:
unregister_sysctl_table(svcrdma_table_header);
+ destroy_workqueue(svc_rdma_wq);
return -ENOMEM;
}
MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 0194de814933..df67211c4baf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -263,9 +263,9 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT;
for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(rqstp->rq_arg.pages[page_no]),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ rqstp->rq_arg.pages[page_no], 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
@@ -309,17 +309,21 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
int count)
{
int i;
+ unsigned long off;
ctxt->count = count;
ctxt->direction = DMA_FROM_DEVICE;
for (i = 0; i < count; i++) {
ctxt->sge[i].length = 0; /* in case map fails */
if (!frmr) {
+ BUG_ON(0 == virt_to_page(vec[i].iov_base));
+ off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
ctxt->sge[i].addr =
- ib_dma_map_single(xprt->sc_cm_id->device,
- vec[i].iov_base,
- vec[i].iov_len,
- DMA_FROM_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ virt_to_page(vec[i].iov_base),
+ off,
+ vec[i].iov_len,
+ DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
ctxt->sge[i].addr))
return -EINVAL;
@@ -491,6 +495,7 @@ next_sge:
printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
err);
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
+ svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 0);
goto out;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index b15e1ebb2bfa..249a835b703f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -70,8 +70,8 @@
* on extra page for the RPCRMDA header.
*/
static int fast_reg_xdr(struct svcxprt_rdma *xprt,
- struct xdr_buf *xdr,
- struct svc_rdma_req_map *vec)
+ struct xdr_buf *xdr,
+ struct svc_rdma_req_map *vec)
{
int sge_no;
u32 sge_bytes;
@@ -96,21 +96,25 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
vec->count = 2;
sge_no++;
- /* Build the FRMR */
+ /* Map the XDR head */
frmr->kva = frva;
frmr->direction = DMA_TO_DEVICE;
frmr->access_flags = 0;
frmr->map_len = PAGE_SIZE;
frmr->page_list_len = 1;
+ page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device,
- (void *)xdr->head[0].iov_base,
- PAGE_SIZE, DMA_TO_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ virt_to_page(xdr->head[0].iov_base),
+ page_off,
+ PAGE_SIZE - page_off,
+ DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
atomic_inc(&xprt->sc_dma_used);
+ /* Map the XDR page list */
page_off = xdr->page_base;
page_bytes = xdr->page_len + page_off;
if (!page_bytes)
@@ -128,9 +132,9 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
page_bytes -= sge_bytes;
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(page),
- PAGE_SIZE, DMA_TO_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ page, page_off,
+ sge_bytes, DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
@@ -166,8 +170,10 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE,
- DMA_TO_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va),
+ page_off,
+ PAGE_SIZE,
+ DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
@@ -245,6 +251,35 @@ static int map_xdr(struct svcxprt_rdma *xprt,
return 0;
}
+static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
+ struct xdr_buf *xdr,
+ u32 xdr_off, size_t len, int dir)
+{
+ struct page *page;
+ dma_addr_t dma_addr;
+ if (xdr_off < xdr->head[0].iov_len) {
+ /* This offset is in the head */
+ xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
+ page = virt_to_page(xdr->head[0].iov_base);
+ } else {
+ xdr_off -= xdr->head[0].iov_len;
+ if (xdr_off < xdr->page_len) {
+ /* This offset is in the page list */
+ page = xdr->pages[xdr_off >> PAGE_SHIFT];
+ xdr_off &= ~PAGE_MASK;
+ } else {
+ /* This offset is in the tail */
+ xdr_off -= xdr->page_len;
+ xdr_off += (unsigned long)
+ xdr->tail[0].iov_base & ~PAGE_MASK;
+ page = virt_to_page(xdr->tail[0].iov_base);
+ }
+ }
+ dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
+ min_t(size_t, PAGE_SIZE, len), dir);
+ return dma_addr;
+}
+
/* Assumptions:
* - We are using FRMR
* - or -
@@ -293,10 +328,9 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
sge[sge_no].length = sge_bytes;
if (!vec->frmr) {
sge[sge_no].addr =
- ib_dma_map_single(xprt->sc_cm_id->device,
- (void *)
- vec->sge[xdr_sge_no].iov_base + sge_off,
- sge_bytes, DMA_TO_DEVICE);
+ dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
+ sge_bytes, DMA_TO_DEVICE);
+ xdr_off += sge_bytes;
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
sge[sge_no].addr))
goto err;
@@ -333,6 +367,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
goto err;
return 0;
err:
+ svc_rdma_unmap_dma(ctxt);
+ svc_rdma_put_frmr(xprt, vec->frmr);
svc_rdma_put_context(ctxt, 0);
/* Fatal error, close transport */
return -EIO;
@@ -494,7 +530,8 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
* In all three cases, this function prepares the RPCRDMA header in
* sge[0], the 'type' parameter indicates the type to place in the
* RPCRDMA header, and the 'byte_count' field indicates how much of
- * the XDR to include in this RDMA_SEND.
+ * the XDR to include in this RDMA_SEND. NB: The offset of the payload
+ * to send is zero in the XDR.
*/
static int send_reply(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
@@ -536,23 +573,24 @@ static int send_reply(struct svcxprt_rdma *rdma,
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].addr =
- ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
- ctxt->sge[0].length, DMA_TO_DEVICE);
+ ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
+ ctxt->sge[0].length, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
goto err;
atomic_inc(&rdma->sc_dma_used);
ctxt->direction = DMA_TO_DEVICE;
- /* Determine how many of our SGE are to be transmitted */
+ /* Map the payload indicated by 'byte_count' */
for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
+ int xdr_off = 0;
sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
byte_count -= sge_bytes;
if (!vec->frmr) {
ctxt->sge[sge_no].addr =
- ib_dma_map_single(rdma->sc_cm_id->device,
- vec->sge[sge_no].iov_base,
- sge_bytes, DMA_TO_DEVICE);
+ dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
+ sge_bytes, DMA_TO_DEVICE);
+ xdr_off += sge_bytes;
if (ib_dma_mapping_error(rdma->sc_cm_id->device,
ctxt->sge[sge_no].addr))
goto err;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index edea15a54e51..9df1eadc912a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -45,6 +45,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>
@@ -52,6 +53,7 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags);
static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
@@ -89,6 +91,9 @@ struct svc_xprt_class svc_rdma_class = {
/* WR context cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_ctxt_cachep;
+/* Workqueue created in svc_rdma.c */
+extern struct workqueue_struct *svc_rdma_wq;
+
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
{
struct svc_rdma_op_ctxt *ctxt;
@@ -120,7 +125,7 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
*/
if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
atomic_dec(&xprt->sc_dma_used);
- ib_dma_unmap_single(xprt->sc_cm_id->device,
+ ib_dma_unmap_page(xprt->sc_cm_id->device,
ctxt->sge[i].addr,
ctxt->sge[i].length,
ctxt->direction);
@@ -502,8 +507,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
BUG_ON(sge_no >= xprt->sc_max_sge);
page = svc_rdma_get_page();
ctxt->pages[sge_no] = page;
- pa = ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(page), PAGE_SIZE,
+ pa = ib_dma_map_page(xprt->sc_cm_id->device,
+ page, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt;
@@ -511,9 +516,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
ctxt->sge[sge_no].addr = pa;
ctxt->sge[sge_no].length = PAGE_SIZE;
ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
+ ctxt->count = sge_no + 1;
buflen += PAGE_SIZE;
}
- ctxt->count = sge_no;
recv_wr.next = NULL;
recv_wr.sg_list = &ctxt->sge[0];
recv_wr.num_sge = ctxt->count;
@@ -529,6 +534,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
return ret;
err_put_ctxt:
+ svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
return -ENOMEM;
}
@@ -670,6 +676,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
* Create a listening RDMA service endpoint.
*/
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags)
{
@@ -798,8 +805,8 @@ static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
if (ib_dma_mapping_error(frmr->mr->device, addr))
continue;
atomic_dec(&xprt->sc_dma_used);
- ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE,
- frmr->direction);
+ ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
+ frmr->direction);
}
}
@@ -1184,7 +1191,7 @@ static void svc_rdma_free(struct svc_xprt *xprt)
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
INIT_WORK(&rdma->sc_work, __svc_rdma_free);
- schedule_work(&rdma->sc_work);
+ queue_work(svc_rdma_wq, &rdma->sc_work);
}
static int svc_rdma_has_wspace(struct svc_xprt *xprt)
@@ -1274,7 +1281,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
atomic_read(&xprt->sc_sq_count) <
xprt->sc_sq_depth);
if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
- return 0;
+ return -ENOTCONN;
continue;
}
/* Take a transport ref for each WR posted */
@@ -1306,7 +1313,6 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
enum rpcrdma_errcode err)
{
struct ib_send_wr err_wr;
- struct ib_sge sge;
struct page *p;
struct svc_rdma_op_ctxt *ctxt;
u32 *va;
@@ -1319,26 +1325,27 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
/* XDR encode error */
length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
+ ctxt = svc_rdma_get_context(xprt);
+ ctxt->direction = DMA_FROM_DEVICE;
+ ctxt->count = 1;
+ ctxt->pages[0] = p;
+
/* Prepare SGE for local address */
- sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
+ ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
+ p, 0, length, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
put_page(p);
return;
}
atomic_inc(&xprt->sc_dma_used);
- sge.lkey = xprt->sc_dma_lkey;
- sge.length = length;
-
- ctxt = svc_rdma_get_context(xprt);
- ctxt->count = 1;
- ctxt->pages[0] = p;
+ ctxt->sge[0].lkey = xprt->sc_dma_lkey;
+ ctxt->sge[0].length = length;
/* Prepare SEND WR */
memset(&err_wr, 0, sizeof err_wr);
ctxt->wr_op = IB_WR_SEND;
err_wr.wr_id = (unsigned long)ctxt;
- err_wr.sg_list = &sge;
+ err_wr.sg_list = ctxt->sge;
err_wr.num_sge = 1;
err_wr.opcode = IB_WR_SEND;
err_wr.send_flags = IB_SEND_SIGNALED;
@@ -1348,9 +1355,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
if (ret) {
dprintk("svcrdma: Error %d posting send for protocol error\n",
ret);
- ib_dma_unmap_single(xprt->sc_cm_id->device,
- sge.addr, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
}
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index a85e866a77f7..0867070bb5ca 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -237,8 +237,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
dprintk("RPC: %s: called\n", __func__);
- cancel_delayed_work(&r_xprt->rdma_connect);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&r_xprt->rdma_connect);
xprt_clear_connected(xprt);
@@ -251,9 +250,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
xprt_rdma_free_addresses(xprt);
- kfree(xprt->slot);
- xprt->slot = NULL;
- kfree(xprt);
+ xprt_free(xprt);
dprintk("RPC: %s: returning\n", __func__);
@@ -285,23 +282,14 @@ xprt_setup_rdma(struct xprt_create *args)
return ERR_PTR(-EBADF);
}
- xprt = kzalloc(sizeof(struct rpcrdma_xprt), GFP_KERNEL);
+ xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
+ xprt_rdma_slot_table_entries);
if (xprt == NULL) {
dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
__func__);
return ERR_PTR(-ENOMEM);
}
- xprt->max_reqs = xprt_rdma_slot_table_entries;
- xprt->slot = kcalloc(xprt->max_reqs,
- sizeof(struct rpc_rqst), GFP_KERNEL);
- if (xprt->slot == NULL) {
- dprintk("RPC: %s: couldn't allocate %d slots\n",
- __func__, xprt->max_reqs);
- kfree(xprt);
- return ERR_PTR(-ENOMEM);
- }
-
/* 60 second timeout, no retries */
xprt->timeout = &xprt_rdma_default_timeout;
xprt->bind_timeout = (60U * HZ);
@@ -410,8 +398,7 @@ out3:
out2:
rpcrdma_ia_close(&new_xprt->rx_ia);
out1:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ERR_PTR(rc);
}
@@ -460,7 +447,7 @@ xprt_rdma_connect(struct rpc_task *task)
} else {
schedule_delayed_work(&r_xprt->rdma_connect, 0);
if (!RPC_IS_ASYNC(task))
- flush_scheduled_work();
+ flush_delayed_work(&r_xprt->rdma_connect);
}
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index fe9306bf10cc..dfcab5ac65af 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -774,8 +774,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
xs_close(xprt);
xs_free_peer_addresses(xprt);
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
module_put(THIS_MODULE);
}
@@ -1516,7 +1515,7 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
xs_update_peer_port(xprt);
}
-static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
+static unsigned short xs_get_srcport(struct sock_xprt *transport)
{
unsigned short port = transport->srcport;
@@ -1525,7 +1524,7 @@ static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket
return port;
}
-static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
+static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
{
if (transport->srcport != 0)
transport->srcport = 0;
@@ -1535,23 +1534,18 @@ static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket
return xprt_max_resvport;
return --port;
}
-
-static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
+static int xs_bind(struct sock_xprt *transport, struct socket *sock)
{
- struct sockaddr_in myaddr = {
- .sin_family = AF_INET,
- };
- struct sockaddr_in *sa;
+ struct sockaddr_storage myaddr;
int err, nloop = 0;
- unsigned short port = xs_get_srcport(transport, sock);
+ unsigned short port = xs_get_srcport(transport);
unsigned short last;
- sa = (struct sockaddr_in *)&transport->srcaddr;
- myaddr.sin_addr = sa->sin_addr;
+ memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
do {
- myaddr.sin_port = htons(port);
- err = kernel_bind(sock, (struct sockaddr *) &myaddr,
- sizeof(myaddr));
+ rpc_set_port((struct sockaddr *)&myaddr, port);
+ err = kernel_bind(sock, (struct sockaddr *)&myaddr,
+ transport->xprt.addrlen);
if (port == 0)
break;
if (err == 0) {
@@ -1559,48 +1553,23 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
break;
}
last = port;
- port = xs_next_srcport(transport, sock, port);
+ port = xs_next_srcport(transport, port);
if (port > last)
nloop++;
} while (err == -EADDRINUSE && nloop != 2);
- dprintk("RPC: %s %pI4:%u: %s (%d)\n",
- __func__, &myaddr.sin_addr,
- port, err ? "failed" : "ok", err);
- return err;
-}
-
-static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
-{
- struct sockaddr_in6 myaddr = {
- .sin6_family = AF_INET6,
- };
- struct sockaddr_in6 *sa;
- int err, nloop = 0;
- unsigned short port = xs_get_srcport(transport, sock);
- unsigned short last;
- sa = (struct sockaddr_in6 *)&transport->srcaddr;
- myaddr.sin6_addr = sa->sin6_addr;
- do {
- myaddr.sin6_port = htons(port);
- err = kernel_bind(sock, (struct sockaddr *) &myaddr,
- sizeof(myaddr));
- if (port == 0)
- break;
- if (err == 0) {
- transport->srcport = port;
- break;
- }
- last = port;
- port = xs_next_srcport(transport, sock, port);
- if (port > last)
- nloop++;
- } while (err == -EADDRINUSE && nloop != 2);
- dprintk("RPC: xs_bind6 %pI6:%u: %s (%d)\n",
- &myaddr.sin6_addr, port, err ? "failed" : "ok", err);
+ if (myaddr.ss_family == AF_INET)
+ dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
+ &((struct sockaddr_in *)&myaddr)->sin_addr,
+ port, err ? "failed" : "ok", err);
+ else
+ dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
+ &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
+ port, err ? "failed" : "ok", err);
return err;
}
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key xs_key[2];
static struct lock_class_key xs_slock_key[2];
@@ -1622,6 +1591,18 @@ static inline void xs_reclassify_socket6(struct socket *sock)
sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
}
+
+static inline void xs_reclassify_socket(int family, struct socket *sock)
+{
+ switch (family) {
+ case AF_INET:
+ xs_reclassify_socket4(sock);
+ break;
+ case AF_INET6:
+ xs_reclassify_socket6(sock);
+ break;
+ }
+}
#else
static inline void xs_reclassify_socket4(struct socket *sock)
{
@@ -1630,8 +1611,36 @@ static inline void xs_reclassify_socket4(struct socket *sock)
static inline void xs_reclassify_socket6(struct socket *sock)
{
}
+
+static inline void xs_reclassify_socket(int family, struct socket *sock)
+{
+}
#endif
+static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ struct sock_xprt *transport, int family, int type, int protocol)
+{
+ struct socket *sock;
+ int err;
+
+ err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
+ if (err < 0) {
+ dprintk("RPC: can't create %d transport socket (%d).\n",
+ protocol, -err);
+ goto out;
+ }
+ xs_reclassify_socket(family, sock);
+
+ if (xs_bind(transport, sock)) {
+ sock_release(sock);
+ goto out;
+ }
+
+ return sock;
+out:
+ return ERR_PTR(err);
+}
+
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1661,82 +1670,23 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
xs_udp_do_set_buffer_size(xprt);
}
-/**
- * xs_udp_connect_worker4 - set up a UDP socket
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_udp_connect_worker4(struct work_struct *work)
+static void xs_udp_setup_socket(struct work_struct *work)
{
struct sock_xprt *transport =
container_of(work, struct sock_xprt, connect_worker.work);
struct rpc_xprt *xprt = &transport->xprt;
struct socket *sock = transport->sock;
- int err, status = -EIO;
+ int status = -EIO;
if (xprt->shutdown)
goto out;
/* Start by resetting any existing state */
xs_reset_transport(transport);
-
- err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
+ sock = xs_create_sock(xprt, transport,
+ xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP);
+ if (IS_ERR(sock))
goto out;
- }
- xs_reclassify_socket4(sock);
-
- if (xs_bind4(transport, sock)) {
- sock_release(sock);
- goto out;
- }
-
- dprintk("RPC: worker connecting xprt %p via %s to "
- "%s (port %s)\n", xprt,
- xprt->address_strings[RPC_DISPLAY_PROTO],
- xprt->address_strings[RPC_DISPLAY_ADDR],
- xprt->address_strings[RPC_DISPLAY_PORT]);
-
- xs_udp_finish_connecting(xprt, sock);
- status = 0;
-out:
- xprt_clear_connecting(xprt);
- xprt_wake_pending_tasks(xprt, status);
-}
-
-/**
- * xs_udp_connect_worker6 - set up a UDP socket
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_udp_connect_worker6(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
- struct socket *sock = transport->sock;
- int err, status = -EIO;
-
- if (xprt->shutdown)
- goto out;
-
- /* Start by resetting any existing state */
- xs_reset_transport(transport);
-
- err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
- goto out;
- }
- xs_reclassify_socket6(sock);
-
- if (xs_bind6(transport, sock) < 0) {
- sock_release(sock);
- goto out;
- }
dprintk("RPC: worker connecting xprt %p via %s to "
"%s (port %s)\n", xprt,
@@ -1755,12 +1705,12 @@ out:
* We need to preserve the port number so the reply cache on the server can
* find our cached RPC replies when we get around to reconnecting.
*/
-static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
+static void xs_abort_connection(struct sock_xprt *transport)
{
int result;
struct sockaddr any;
- dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt);
+ dprintk("RPC: disconnecting xprt %p to reuse port\n", transport);
/*
* Disconnect the transport socket by doing a connect operation
@@ -1770,13 +1720,13 @@ static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transpo
any.sa_family = AF_UNSPEC;
result = kernel_connect(transport->sock, &any, sizeof(any), 0);
if (!result)
- xs_sock_mark_closed(xprt);
+ xs_sock_mark_closed(&transport->xprt);
else
dprintk("RPC: AF_UNSPEC connect return code %d\n",
result);
}
-static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
+static void xs_tcp_reuse_connection(struct sock_xprt *transport)
{
unsigned int state = transport->inet->sk_state;
@@ -1799,7 +1749,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra
"sk_shutdown set to %d\n",
__func__, transport->inet->sk_shutdown);
}
- xs_abort_connection(xprt, transport);
+ xs_abort_connection(transport);
}
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
@@ -1852,12 +1802,12 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
*
* Invoked by a work queue tasklet.
*/
-static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
- struct sock_xprt *transport,
- struct socket *(*create_sock)(struct rpc_xprt *,
- struct sock_xprt *))
+static void xs_tcp_setup_socket(struct work_struct *work)
{
+ struct sock_xprt *transport =
+ container_of(work, struct sock_xprt, connect_worker.work);
struct socket *sock = transport->sock;
+ struct rpc_xprt *xprt = &transport->xprt;
int status = -EIO;
if (xprt->shutdown)
@@ -1865,7 +1815,8 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
if (!sock) {
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
- sock = create_sock(xprt, transport);
+ sock = xs_create_sock(xprt, transport,
+ xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (IS_ERR(sock)) {
status = PTR_ERR(sock);
goto out;
@@ -1876,7 +1827,7 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
&xprt->state);
/* "close" the socket, preserving the local port */
- xs_tcp_reuse_connection(xprt, transport);
+ xs_tcp_reuse_connection(transport);
if (abort_and_exit)
goto out_eagain;
@@ -1925,84 +1876,6 @@ out:
xprt_wake_pending_tasks(xprt, status);
}
-static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
- struct sock_xprt *transport)
-{
- struct socket *sock;
- int err;
-
- /* start from scratch */
- err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create TCP transport socket (%d).\n",
- -err);
- goto out_err;
- }
- xs_reclassify_socket4(sock);
-
- if (xs_bind4(transport, sock) < 0) {
- sock_release(sock);
- goto out_err;
- }
- return sock;
-out_err:
- return ERR_PTR(-EIO);
-}
-
-/**
- * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_tcp_connect_worker4(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
-
- xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4);
-}
-
-static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
- struct sock_xprt *transport)
-{
- struct socket *sock;
- int err;
-
- /* start from scratch */
- err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create TCP transport socket (%d).\n",
- -err);
- goto out_err;
- }
- xs_reclassify_socket6(sock);
-
- if (xs_bind6(transport, sock) < 0) {
- sock_release(sock);
- goto out_err;
- }
- return sock;
-out_err:
- return ERR_PTR(-EIO);
-}
-
-/**
- * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_tcp_connect_worker6(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
-
- xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6);
-}
-
/**
* xs_connect - connect a socket to a remote endpoint
* @task: address of RPC task that manages state of connect request
@@ -2262,6 +2135,31 @@ static struct rpc_xprt_ops bc_tcp_ops = {
.print_stats = xs_tcp_print_stats,
};
+static int xs_init_anyaddr(const int family, struct sockaddr *sap)
+{
+ static const struct sockaddr_in sin = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+ static const struct sockaddr_in6 sin6 = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ };
+
+ switch (family) {
+ case AF_INET:
+ memcpy(sap, &sin, sizeof(sin));
+ break;
+ case AF_INET6:
+ memcpy(sap, &sin6, sizeof(sin6));
+ break;
+ default:
+ dprintk("RPC: %s: Bad address family\n", __func__);
+ return -EAFNOSUPPORT;
+ }
+ return 0;
+}
+
static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
unsigned int slot_table_size)
{
@@ -2273,27 +2171,25 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
return ERR_PTR(-EBADF);
}
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (new == NULL) {
+ xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size);
+ if (xprt == NULL) {
dprintk("RPC: xs_setup_xprt: couldn't allocate "
"rpc_xprt\n");
return ERR_PTR(-ENOMEM);
}
- xprt = &new->xprt;
-
- xprt->max_reqs = slot_table_size;
- xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
- if (xprt->slot == NULL) {
- kfree(xprt);
- dprintk("RPC: xs_setup_xprt: couldn't allocate slot "
- "table\n");
- return ERR_PTR(-ENOMEM);
- }
+ new = container_of(xprt, struct sock_xprt, xprt);
memcpy(&xprt->addr, args->dstaddr, args->addrlen);
xprt->addrlen = args->addrlen;
if (args->srcaddr)
memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
+ else {
+ int err;
+ err = xs_init_anyaddr(args->dstaddr->sa_family,
+ (struct sockaddr *)&new->srcaddr);
+ if (err != 0)
+ return ERR_PTR(err);
+ }
return xprt;
}
@@ -2341,7 +2237,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_udp_connect_worker4);
+ xs_udp_setup_socket);
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
break;
case AF_INET6:
@@ -2349,7 +2245,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_udp_connect_worker6);
+ xs_udp_setup_socket);
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
break;
default:
@@ -2371,8 +2267,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
return xprt;
ret = ERR_PTR(-EINVAL);
out_err:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ret;
}
@@ -2416,7 +2311,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_tcp_connect_worker4);
+ xs_tcp_setup_socket);
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
break;
case AF_INET6:
@@ -2424,7 +2319,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_tcp_connect_worker6);
+ xs_tcp_setup_socket);
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
break;
default:
@@ -2447,8 +2342,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
return xprt;
ret = ERR_PTR(-EINVAL);
out_err:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ret;
}
@@ -2507,15 +2401,10 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
goto out_err;
}
- if (xprt_bound(xprt))
- dprintk("RPC: set up xprt to %s (port %s) via %s\n",
- xprt->address_strings[RPC_DISPLAY_ADDR],
- xprt->address_strings[RPC_DISPLAY_PORT],
- xprt->address_strings[RPC_DISPLAY_PROTO]);
- else
- dprintk("RPC: set up xprt to %s (autobind) via %s\n",
- xprt->address_strings[RPC_DISPLAY_ADDR],
- xprt->address_strings[RPC_DISPLAY_PROTO]);
+ dprintk("RPC: set up xprt to %s (port %s) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
/*
* Since we don't want connections for the backchannel, we set
@@ -2528,8 +2417,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
return xprt;
ret = ERR_PTR(-EINVAL);
out_err:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ret;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0ebc777a6660..3c95304a0817 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -117,7 +117,7 @@
static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
static DEFINE_SPINLOCK(unix_table_lock);
-static atomic_t unix_nr_socks = ATOMIC_INIT(0);
+static atomic_long_t unix_nr_socks;
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
@@ -360,13 +360,13 @@ static void unix_sock_destructor(struct sock *sk)
if (u->addr)
unix_release_addr(u->addr);
- atomic_dec(&unix_nr_socks);
+ atomic_long_dec(&unix_nr_socks);
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
- atomic_read(&unix_nr_socks));
+ printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+ atomic_long_read(&unix_nr_socks));
#endif
}
@@ -606,8 +606,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
struct sock *sk = NULL;
struct unix_sock *u;
- atomic_inc(&unix_nr_socks);
- if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
+ atomic_long_inc(&unix_nr_socks);
+ if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
goto out;
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
@@ -632,7 +632,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
unix_insert_socket(unix_sockets_unbound, sk);
out:
if (sk == NULL)
- atomic_dec(&unix_nr_socks);
+ atomic_long_dec(&unix_nr_socks);
else {
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d14bbf960c18..4b9f8912526c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1167,7 +1167,7 @@ static int ignore_request(struct wiphy *wiphy,
return 0;
return -EALREADY;
}
- return REG_INTERSECT;
+ return 0;
case NL80211_REGDOM_SET_BY_DRIVER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
if (regdom_changes(pending_request->alpha2))
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 2039acdf5122..90b54d4697fd 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2,7 +2,7 @@
# (c) 2001, Dave Jones. (the file handling bit)
# (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
# (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)
-# (c) 2008,2009, Andy Whitcroft <apw@canonical.com>
+# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>
# Licensed under the terms of the GNU GPL License version 2
use strict;
@@ -10,7 +10,7 @@ use strict;
my $P = $0;
$P =~ s@.*/@@g;
-my $V = '0.30';
+my $V = '0.31';
use Getopt::Long qw(:config no_auto_abbrev);
@@ -103,6 +103,8 @@ for my $key (keys %debug) {
die "$@" if ($@);
}
+my $rpt_cleaners = 0;
+
if ($terse) {
$emacs = 1;
$quiet++;
@@ -150,6 +152,20 @@ our $Sparse = qr{
# We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
our $Attribute = qr{
const|
+ __percpu|
+ __nocast|
+ __safe|
+ __bitwise__|
+ __packed__|
+ __packed2__|
+ __naked|
+ __maybe_unused|
+ __always_unused|
+ __noreturn|
+ __used|
+ __cold|
+ __noclone|
+ __deprecated|
__read_mostly|
__kprobes|
__(?:mem|cpu|dev|)(?:initdata|initconst|init\b)|
@@ -675,15 +691,15 @@ sub ctx_block_get {
$blk .= $rawlines[$line];
# Handle nested #if/#else.
- if ($rawlines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
+ if ($lines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
push(@stack, $level);
- } elsif ($rawlines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
+ } elsif ($lines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
$level = $stack[$#stack - 1];
- } elsif ($rawlines[$line] =~ /^.\s*#\s*endif\b/) {
+ } elsif ($lines[$line] =~ /^.\s*#\s*endif\b/) {
$level = pop(@stack);
}
- foreach my $c (split(//, $rawlines[$line])) {
+ foreach my $c (split(//, $lines[$line])) {
##print "C<$c>L<$level><$open$close>O<$off>\n";
if ($off > 0) {
$off--;
@@ -843,7 +859,12 @@ sub annotate_values {
$av_preprocessor = 0;
}
- } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\()/) {
+ } elsif ($cur =~ /^(\(\s*$Type\s*)\)/) {
+ print "CAST($1)\n" if ($dbg_values > 1);
+ push(@av_paren_type, $type);
+ $type = 'C';
+
+ } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
print "DECLARE($1)\n" if ($dbg_values > 1);
$type = 'T';
@@ -1308,7 +1329,11 @@ sub process {
$here = "#$realline: " if ($file);
# extract the filename as it passes
- if ($line=~/^\+\+\+\s+(\S+)/) {
+ if ($line =~ /^diff --git.*?(\S+)$/) {
+ $realfile = $1;
+ $realfile =~ s@^([^/]*)/@@;
+
+ } elsif ($line =~ /^\+\+\+\s+(\S+)/) {
$realfile = $1;
$realfile =~ s@^([^/]*)/@@;
@@ -1332,6 +1357,14 @@ sub process {
$cnt_lines++ if ($realcnt != 0);
+# Check for incorrect file permissions
+ if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
+ my $permhere = $here . "FILE: $realfile\n";
+ if ($realfile =~ /(Makefile|Kconfig|\.c|\.h|\.S|\.tmpl)$/) {
+ ERROR("do not set execute permissions for source files\n" . $permhere);
+ }
+ }
+
#check the patch for a signoff:
if ($line =~ /^\s*signed-off-by:/i) {
# This is a signoff, if ugly, so do not double report.
@@ -1389,21 +1422,38 @@ sub process {
} elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
ERROR("trailing whitespace\n" . $herevet);
+ $rpt_cleaners = 1;
}
# check for Kconfig help text having a real description
+# Only applies when adding the entry originally, after that we do not have
+# sufficient context to determine whether it is indeed long enough.
if ($realfile =~ /Kconfig/ &&
- $line =~ /\+?\s*(---)?help(---)?$/) {
+ $line =~ /\+\s*(?:---)?help(?:---)?$/) {
my $length = 0;
- for (my $l = $linenr; defined($lines[$l]); $l++) {
- my $f = $lines[$l];
+ my $cnt = $realcnt;
+ my $ln = $linenr + 1;
+ my $f;
+ my $is_end = 0;
+ while ($cnt > 0 && defined $lines[$ln - 1]) {
+ $f = $lines[$ln - 1];
+ $cnt-- if ($lines[$ln - 1] !~ /^-/);
+ $is_end = $lines[$ln - 1] =~ /^\+/;
+ $ln++;
+
+ next if ($f =~ /^-/);
+ $f =~ s/^.//;
$f =~ s/#.*//;
$f =~ s/^\s+//;
next if ($f =~ /^$/);
- last if ($f =~ /^\s*config\s/);
+ if ($f =~ /^\s*config\s/) {
+ $is_end = 1;
+ last;
+ }
$length++;
}
- WARN("please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($length < 4);
+ WARN("please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_end && $length < 4);
+ #print "is_end<$is_end> length<$length>\n";
}
# check we are in a valid source file if not then ignore this hunk
@@ -1450,6 +1500,7 @@ sub process {
$rawline =~ /^\+\s* \s*/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
ERROR("code indent should use tabs where possible\n" . $herevet);
+ $rpt_cleaners = 1;
}
# check for space before tabs.
@@ -1459,10 +1510,13 @@ sub process {
}
# check for spaces at the beginning of a line.
- if ($rawline =~ /^\+ / && $rawline !~ /\+ +\*/) {
+# Exceptions:
+# 1) within comments
+# 2) indented preprocessor commands
+# 3) hanging labels
+ if ($rawline =~ /^\+ / && $line !~ /\+ *(?:$;|#|$Ident:)/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- WARN("please, no space for starting a line, \
- excluding comments\n" . $herevet);
+ WARN("please, no spaces at the start of a line\n" . $herevet);
}
# check we are in a valid C source file if not then ignore this hunk
@@ -1598,7 +1652,7 @@ sub process {
if ($ctx !~ /{\s*/ && defined($lines[$ctx_ln -1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
ERROR("that open brace { should be on the previous line\n" .
- "$here\n$ctx\n$lines[$ctx_ln - 1]\n");
+ "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
}
if ($level == 0 && $pre_ctx !~ /}\s*while\s*\($/ &&
$ctx =~ /\)\s*\;\s*$/ &&
@@ -1607,7 +1661,7 @@ sub process {
my ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);
if ($nindent > $indent) {
WARN("trailing semicolon indicates no statements, indent implies otherwise\n" .
- "$here\n$ctx\n$lines[$ctx_ln - 1]\n");
+ "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
}
}
}
@@ -1768,8 +1822,17 @@ sub process {
!defined $suppress_export{$realline_next} &&
($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
$lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
+ # Handle definitions which produce identifiers with
+ # a prefix:
+ # XXX(foo);
+ # EXPORT_SYMBOL(something_foo);
my $name = $1;
- if ($stat !~ /(?:
+ if ($stat =~ /^.([A-Z_]+)\s*\(\s*($Ident)/ &&
+ $name =~ /^${Ident}_$2/) {
+#print "FOO C name<$name>\n";
+ $suppress_export{$realline_next} = 1;
+
+ } elsif ($stat !~ /(?:
\n.}\s*$|
^.DEFINE_$Ident\(\Q$name\E\)|
^.DECLARE_$Ident\(\Q$name\E\)|
@@ -1806,6 +1869,23 @@ sub process {
$herecurr);
}
+# check for static const char * arrays.
+ if ($line =~ /\bstatic\s+const\s+char\s*\*\s*(\w+)\s*\[\s*\]\s*=\s*/) {
+ WARN("static const char * array should probably be static const char * const\n" .
+ $herecurr);
+ }
+
+# check for static char foo[] = "bar" declarations.
+ if ($line =~ /\bstatic\s+char\s+(\w+)\s*\[\s*\]\s*=\s*"/) {
+ WARN("static char array declaration should probably be static const char\n" .
+ $herecurr);
+ }
+
+# check for declarations of struct pci_device_id
+ if ($line =~ /\bstruct\s+pci_device_id\s+\w+\s*\[\s*\]\s*\=\s*\{/) {
+ WARN("Use DEFINE_PCI_DEVICE_TABLE for struct pci_device_id\n" . $herecurr);
+ }
+
# check for new typedefs, only function parameters and sparse annotations
# make sense.
if ($line =~ /\btypedef\s/ &&
@@ -1899,6 +1979,11 @@ sub process {
ERROR("open brace '{' following $1 go on the same line\n" . $hereprev);
}
+# missing space after union, struct or enum definition
+ if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) {
+ WARN("missing space after $1 definition\n" . $herecurr);
+ }
+
# check for spacing round square brackets; allowed:
# 1. with a type on the left -- int [] a;
# 2. at the beginning of a line for slice initialisers -- [0...10] = 5,
@@ -2176,21 +2261,29 @@ sub process {
my $value = $2;
# Flatten any parentheses
- $value =~ s/\)\(/\) \(/g;
+ $value =~ s/\(/ \(/g;
+ $value =~ s/\)/\) /g;
while ($value =~ s/\[[^\{\}]*\]/1/ ||
$value !~ /(?:$Ident|-?$Constant)\s*
$Compare\s*
(?:$Ident|-?$Constant)/x &&
$value =~ s/\([^\(\)]*\)/1/) {
}
-
- if ($value =~ /^(?:$Ident|-?$Constant)$/) {
+#print "value<$value>\n";
+ if ($value =~ /^\s*(?:$Ident|-?$Constant)\s*$/) {
ERROR("return is not a function, parentheses are not required\n" . $herecurr);
} elsif ($spacing !~ /\s+/) {
ERROR("space required before the open parenthesis '('\n" . $herecurr);
}
}
+# Return of what appears to be an errno should normally be -'ve
+ if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
+ my $name = $1;
+ if ($name ne 'EOF' && $name ne 'ERROR') {
+ WARN("return of an errno should typically be -ve (return -$1)\n" . $herecurr);
+ }
+ }
# Need a space before open parenthesis after if, while etc
if ($line=~/\b(if|while|for|switch)\(/) {
@@ -2409,8 +2502,8 @@ sub process {
\.$Ident\s*=\s*|
^\"|\"$
}x;
- #print "REST<$rest> dstat<$dstat>\n";
- if ($rest ne '') {
+ #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
+ if ($rest ne '' && $rest ne ',') {
if ($rest !~ /while\s*\(/ &&
$dstat !~ /$exceptions/)
{
@@ -2839,6 +2932,15 @@ sub process {
print "\n" if ($quiet == 0);
}
+ if ($quiet == 0) {
+ # If there were whitespace errors which cleanpatch can fix
+ # then suggest that.
+ if ($rpt_cleaners) {
+ print "NOTE: whitespace errors detected, you may wish to use scripts/cleanpatch or\n";
+ print " scripts/cleanfile\n\n";
+ }
+ }
+
if ($clean == 1 && $quiet == 0) {
print "$vname has no obvious style problems and is ready for submission.\n"
}
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index b2281982f52f..d21ec3a89603 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
use strict;
my $P = $0;
-my $V = '0.24';
+my $V = '0.26-beta6';
use Getopt::Long qw(:config no_auto_abbrev);
@@ -24,15 +24,19 @@ my $email_maintainer = 1;
my $email_list = 1;
my $email_subscriber_list = 0;
my $email_git_penguin_chiefs = 0;
-my $email_git = 1;
+my $email_git = 0;
my $email_git_all_signature_types = 0;
my $email_git_blame = 0;
+my $email_git_blame_signatures = 1;
+my $email_git_fallback = 1;
my $email_git_min_signatures = 1;
my $email_git_max_maintainers = 5;
my $email_git_min_percent = 5;
my $email_git_since = "1-year-ago";
my $email_hg_since = "-365";
+my $interactive = 0;
my $email_remove_duplicates = 1;
+my $email_use_mailmap = 1;
my $output_multiline = 1;
my $output_separator = ", ";
my $output_roles = 0;
@@ -49,8 +53,13 @@ my $pattern_depth = 0;
my $version = 0;
my $help = 0;
+my $vcs_used = 0;
+
my $exit = 0;
+my %commit_author_hash;
+my %commit_signer_hash;
+
my @penguin_chief = ();
push(@penguin_chief, "Linus Torvalds:torvalds\@linux-foundation.org");
#Andrew wants in on most everything - 2009/01/14
@@ -73,7 +82,6 @@ my @signature_tags = ();
push(@signature_tags, "Signed-off-by:");
push(@signature_tags, "Reviewed-by:");
push(@signature_tags, "Acked-by:");
-my $signaturePattern = "\(" . join("|", @signature_tags) . "\)";
# rfc822 email address - preloaded methods go here.
my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
@@ -86,31 +94,70 @@ my %VCS_cmds;
my %VCS_cmds_git = (
"execute_cmd" => \&git_execute_cmd,
"available" => '(which("git") ne "") && (-d ".git")',
- "find_signers_cmd" => "git log --no-color --since=\$email_git_since -- \$file",
- "find_commit_signers_cmd" => "git log --no-color -1 \$commit",
+ "find_signers_cmd" =>
+ "git log --no-color --since=\$email_git_since " .
+ '--format="GitCommit: %H%n' .
+ 'GitAuthor: %an <%ae>%n' .
+ 'GitDate: %aD%n' .
+ 'GitSubject: %s%n' .
+ '%b%n"' .
+ " -- \$file",
+ "find_commit_signers_cmd" =>
+ "git log --no-color " .
+ '--format="GitCommit: %H%n' .
+ 'GitAuthor: %an <%ae>%n' .
+ 'GitDate: %aD%n' .
+ 'GitSubject: %s%n' .
+ '%b%n"' .
+ " -1 \$commit",
+ "find_commit_author_cmd" =>
+ "git log --no-color " .
+ '--format="GitCommit: %H%n' .
+ 'GitAuthor: %an <%ae>%n' .
+ 'GitDate: %aD%n' .
+ 'GitSubject: %s%n"' .
+ " -1 \$commit",
"blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
"blame_file_cmd" => "git blame -l \$file",
- "commit_pattern" => "^commit [0-9a-f]{40,40}",
- "blame_commit_pattern" => "^([0-9a-f]+) "
+ "commit_pattern" => "^GitCommit: ([0-9a-f]{40,40})",
+ "blame_commit_pattern" => "^([0-9a-f]+) ",
+ "author_pattern" => "^GitAuthor: (.*)",
+ "subject_pattern" => "^GitSubject: (.*)",
);
my %VCS_cmds_hg = (
"execute_cmd" => \&hg_execute_cmd,
"available" => '(which("hg") ne "") && (-d ".hg")',
"find_signers_cmd" =>
- "hg log --date=\$email_hg_since" .
- " --template='commit {node}\\n{desc}\\n' -- \$file",
- "find_commit_signers_cmd" => "hg log --template='{desc}\\n' -r \$commit",
+ "hg log --date=\$email_hg_since " .
+ "--template='HgCommit: {node}\\n" .
+ "HgAuthor: {author}\\n" .
+ "HgSubject: {desc}\\n'" .
+ " -- \$file",
+ "find_commit_signers_cmd" =>
+ "hg log " .
+ "--template='HgSubject: {desc}\\n'" .
+ " -r \$commit",
+ "find_commit_author_cmd" =>
+ "hg log " .
+ "--template='HgCommit: {node}\\n" .
+ "HgAuthor: {author}\\n" .
+ "HgSubject: {desc|firstline}\\n'" .
+ " -r \$commit",
"blame_range_cmd" => "", # not supported
- "blame_file_cmd" => "hg blame -c \$file",
- "commit_pattern" => "^commit [0-9a-f]{40,40}",
- "blame_commit_pattern" => "^([0-9a-f]+):"
+ "blame_file_cmd" => "hg blame -n \$file",
+ "commit_pattern" => "^HgCommit: ([0-9a-f]{40,40})",
+ "blame_commit_pattern" => "^([ 0-9a-f]+):",
+ "author_pattern" => "^HgAuthor: (.*)",
+ "subject_pattern" => "^HgSubject: (.*)",
);
-if (-f "${lk_path}.get_maintainer.conf") {
+my $conf = which_conf(".get_maintainer.conf");
+if (-f $conf) {
my @conf_args;
- open(my $conffile, '<', "${lk_path}.get_maintainer.conf")
- or warn "$P: Can't open .get_maintainer.conf: $!\n";
+ open(my $conffile, '<', "$conf")
+ or warn "$P: Can't find a readable .get_maintainer.conf file $!\n";
+
while (<$conffile>) {
my $line = $_;
@@ -136,13 +183,17 @@ if (!GetOptions(
'git!' => \$email_git,
'git-all-signature-types!' => \$email_git_all_signature_types,
'git-blame!' => \$email_git_blame,
+ 'git-blame-signatures!' => \$email_git_blame_signatures,
+ 'git-fallback!' => \$email_git_fallback,
'git-chief-penguins!' => \$email_git_penguin_chiefs,
'git-min-signatures=i' => \$email_git_min_signatures,
'git-max-maintainers=i' => \$email_git_max_maintainers,
'git-min-percent=i' => \$email_git_min_percent,
'git-since=s' => \$email_git_since,
'hg-since=s' => \$email_hg_since,
+ 'i|interactive!' => \$interactive,
'remove-duplicates!' => \$email_remove_duplicates,
+ 'mailmap!' => \$email_use_mailmap,
'm!' => \$email_maintainer,
'n!' => \$email_usename,
'l!' => \$email_list,
@@ -181,13 +232,9 @@ if (-t STDIN && !@ARGV) {
die "$P: missing patchfile or -f file - use --help if necessary\n";
}
-if ($output_separator ne ", ") {
- $output_multiline = 0;
-}
-
-if ($output_rolestats) {
- $output_roles = 1;
-}
+$output_multiline = 0 if ($output_separator ne ", ");
+$output_rolestats = 1 if ($interactive);
+$output_roles = 1 if ($output_rolestats);
if ($sections) {
$email = 0;
@@ -197,6 +244,7 @@ if ($sections) {
$subsystem = 0;
$web = 0;
$keywords = 0;
+ $interactive = 0;
} else {
my $selections = $email + $scm + $status + $subsystem + $web;
if ($selections == 0) {
@@ -215,10 +263,6 @@ if (!top_of_kernel_tree($lk_path)) {
. "a linux kernel source tree.\n";
}
-if ($email_git_all_signature_types) {
- $signaturePattern = "(.+?)[Bb][Yy]:";
-}
-
## Read MAINTAINERS for type/value pairs
my @typevalue = ();
@@ -253,31 +297,82 @@ while (<$maint>) {
}
close($maint);
-my %mailmap;
-if ($email_remove_duplicates) {
- open(my $mailmap, '<', "${lk_path}.mailmap")
- or warn "$P: Can't open .mailmap: $!\n";
- while (<$mailmap>) {
- my $line = $_;
+#
+# Read mail address map
+#
- next if ($line =~ m/^\s*#/);
- next if ($line =~ m/^\s*$/);
+my $mailmap;
- my ($name, $address) = parse_email($line);
- $line = format_email($name, $address, $email_usename);
+read_mailmap();
- next if ($line =~ m/^\s*$/);
+sub read_mailmap {
+ $mailmap = {
+ names => {},
+ addresses => {}
+ };
- if (exists($mailmap{$name})) {
- my $obj = $mailmap{$name};
- push(@$obj, $address);
- } else {
- my @arr = ($address);
- $mailmap{$name} = \@arr;
+ return if (!$email_use_mailmap || !(-f "${lk_path}.mailmap"));
+
+ open(my $mailmap_file, '<', "${lk_path}.mailmap")
+ or warn "$P: Can't open .mailmap: $!\n";
+
+ while (<$mailmap_file>) {
+ s/#.*$//; #strip comments
+ s/^\s+|\s+$//g; #trim
+
+ next if (/^\s*$/); #skip empty lines
+ #entries have one of the following formats:
+ # name1 <mail1>
+ # <mail1> <mail2>
+ # name1 <mail1> <mail2>
+ # name1 <mail1> name2 <mail2>
+ # (see man git-shortlog)
+ if (/^(.+)<(.+)>$/) {
+ my $real_name = $1;
+ my $address = $2;
+
+ $real_name =~ s/\s+$//;
+ ($real_name, $address) = parse_email("$real_name <$address>");
+ $mailmap->{names}->{$address} = $real_name;
+
+ } elsif (/^<([^\s]+)>\s*<([^\s]+)>$/) {
+ my $real_address = $1;
+ my $wrong_address = $2;
+
+ $mailmap->{addresses}->{$wrong_address} = $real_address;
+
+ } elsif (/^(.+)<([^\s]+)>\s*<([^\s]+)>$/) {
+ my $real_name = $1;
+ my $real_address = $2;
+ my $wrong_address = $3;
+
+ $real_name =~ s/\s+$//;
+ ($real_name, $real_address) =
+ parse_email("$real_name <$real_address>");
+ $mailmap->{names}->{$wrong_address} = $real_name;
+ $mailmap->{addresses}->{$wrong_address} = $real_address;
+
+ } elsif (/^(.+)<([^\s]+)>\s*([^\s].*)<([^\s]+)>$/) {
+ my $real_name = $1;
+ my $real_address = $2;
+ my $wrong_name = $3;
+ my $wrong_address = $4;
+
+ $real_name =~ s/\s+$//;
+ ($real_name, $real_address) =
+ parse_email("$real_name <$real_address>");
+
+ $wrong_name =~ s/\s+$//;
+ ($wrong_name, $wrong_address) =
+ parse_email("$wrong_name <$wrong_address>");
+
+ my $wrong_email = format_email($wrong_name, $wrong_address, 1);
+ $mailmap->{names}->{$wrong_email} = $real_name;
+ $mailmap->{addresses}->{$wrong_email} = $real_address;
}
}
- close($mailmap);
+ close($mailmap_file);
}
## use the filenames on the command line or find the filenames in the patchfiles
@@ -302,7 +397,7 @@ foreach my $file (@ARGV) {
}
if ($from_filename) {
push(@files, $file);
- if (-f $file && ($keywords || $file_emails)) {
+ if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) {
open(my $f, '<', $file)
or die "$P: Can't open $file: $!\n";
my $text = do { local($/) ; <$f> };
@@ -357,67 +452,127 @@ foreach my $file (@ARGV) {
@file_emails = uniq(@file_emails);
+my %email_hash_name;
+my %email_hash_address;
my @email_to = ();
+my %hash_list_to;
my @list_to = ();
my @scm = ();
my @web = ();
my @subsystem = ();
my @status = ();
+my %deduplicate_name_hash = ();
+my %deduplicate_address_hash = ();
+my $signature_pattern;
-# Find responsible parties
+my @maintainers = get_maintainers();
-foreach my $file (@files) {
+if (@maintainers) {
+ @maintainers = merge_email(@maintainers);
+ output(@maintainers);
+}
- my %hash;
- my $tvi = find_first_section();
- while ($tvi < @typevalue) {
- my $start = find_starting_index($tvi);
- my $end = find_ending_index($tvi);
- my $exclude = 0;
- my $i;
-
- #Do not match excluded file patterns
-
- for ($i = $start; $i < $end; $i++) {
- my $line = $typevalue[$i];
- if ($line =~ m/^(\C):\s*(.*)/) {
- my $type = $1;
- my $value = $2;
- if ($type eq 'X') {
- if (file_match_pattern($file, $value)) {
- $exclude = 1;
- last;
- }
- }
- }
- }
+if ($scm) {
+ @scm = uniq(@scm);
+ output(@scm);
+}
+
+if ($status) {
+ @status = uniq(@status);
+ output(@status);
+}
+
+if ($subsystem) {
+ @subsystem = uniq(@subsystem);
+ output(@subsystem);
+}
+
+if ($web) {
+ @web = uniq(@web);
+ output(@web);
+}
+
+exit($exit);
+
+sub get_maintainers {
+ %email_hash_name = ();
+ %email_hash_address = ();
+ %commit_author_hash = ();
+ %commit_signer_hash = ();
+ @email_to = ();
+ %hash_list_to = ();
+ @list_to = ();
+ @scm = ();
+ @web = ();
+ @subsystem = ();
+ @status = ();
+ %deduplicate_name_hash = ();
+ %deduplicate_address_hash = ();
+ if ($email_git_all_signature_types) {
+ $signature_pattern = "(.+?)[Bb][Yy]:";
+ } else {
+ $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+ }
+
+ # Find responsible parties
+
+ my %exact_pattern_match_hash = ();
+
+ foreach my $file (@files) {
+
+ my %hash;
+ my $tvi = find_first_section();
+ while ($tvi < @typevalue) {
+ my $start = find_starting_index($tvi);
+ my $end = find_ending_index($tvi);
+ my $exclude = 0;
+ my $i;
+
+ #Do not match excluded file patterns
- if (!$exclude) {
for ($i = $start; $i < $end; $i++) {
my $line = $typevalue[$i];
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
- if ($type eq 'F') {
+ if ($type eq 'X') {
if (file_match_pattern($file, $value)) {
- my $value_pd = ($value =~ tr@/@@);
- my $file_pd = ($file =~ tr@/@@);
- $value_pd++ if (substr($value,-1,1) ne "/");
- if ($pattern_depth == 0 ||
- (($file_pd - $value_pd) < $pattern_depth)) {
- $hash{$tvi} = $value_pd;
+ $exclude = 1;
+ last;
+ }
+ }
+ }
+ }
+
+ if (!$exclude) {
+ for ($i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
+ if ($line =~ m/^(\C):\s*(.*)/) {
+ my $type = $1;
+ my $value = $2;
+ if ($type eq 'F') {
+ if (file_match_pattern($file, $value)) {
+ my $value_pd = ($value =~ tr@/@@);
+ my $file_pd = ($file =~ tr@/@@);
+ $value_pd++ if (substr($value,-1,1) ne "/");
+ $value_pd = -1 if ($value =~ /^\.\*/);
+ if ($value_pd >= $file_pd) {
+ $exact_pattern_match_hash{$file} = 1;
+ }
+ if ($pattern_depth == 0 ||
+ (($file_pd - $value_pd) < $pattern_depth)) {
+ $hash{$tvi} = $value_pd;
+ }
}
}
}
}
}
+ $tvi = $end + 1;
}
- $tvi = $end + 1;
- }
-
- foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
- add_categories($line);
+ foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+ add_categories($line);
if ($sections) {
my $i;
my $start = find_starting_index($line);
@@ -435,80 +590,71 @@ foreach my $file (@files) {
}
print("\n");
}
+ }
}
- if ($email && $email_git) {
- vcs_file_signoffs($file);
+ if ($keywords) {
+ @keyword_tvi = sort_and_uniq(@keyword_tvi);
+ foreach my $line (@keyword_tvi) {
+ add_categories($line);
+ }
}
- if ($email && $email_git_blame) {
- vcs_file_blame($file);
+ foreach my $email (@email_to, @list_to) {
+ $email->[0] = deduplicate_email($email->[0]);
}
-}
-if ($keywords) {
- @keyword_tvi = sort_and_uniq(@keyword_tvi);
- foreach my $line (@keyword_tvi) {
- add_categories($line);
+ foreach my $file (@files) {
+ if ($email &&
+ ($email_git || ($email_git_fallback &&
+ !$exact_pattern_match_hash{$file}))) {
+ vcs_file_signoffs($file);
+ }
+ if ($email && $email_git_blame) {
+ vcs_file_blame($file);
+ }
}
-}
-if ($email) {
- foreach my $chief (@penguin_chief) {
- if ($chief =~ m/^(.*):(.*)/) {
- my $email_address;
+ if ($email) {
+ foreach my $chief (@penguin_chief) {
+ if ($chief =~ m/^(.*):(.*)/) {
+ my $email_address;
- $email_address = format_email($1, $2, $email_usename);
- if ($email_git_penguin_chiefs) {
- push(@email_to, [$email_address, 'chief penguin']);
- } else {
- @email_to = grep($_->[0] !~ /${email_address}/, @email_to);
+ $email_address = format_email($1, $2, $email_usename);
+ if ($email_git_penguin_chiefs) {
+ push(@email_to, [$email_address, 'chief penguin']);
+ } else {
+ @email_to = grep($_->[0] !~ /${email_address}/, @email_to);
+ }
}
}
- }
- foreach my $email (@file_emails) {
- my ($name, $address) = parse_email($email);
+ foreach my $email (@file_emails) {
+ my ($name, $address) = parse_email($email);
- my $tmp_email = format_email($name, $address, $email_usename);
- push_email_address($tmp_email, '');
- add_role($tmp_email, 'in file');
+ my $tmp_email = format_email($name, $address, $email_usename);
+ push_email_address($tmp_email, '');
+ add_role($tmp_email, 'in file');
+ }
}
-}
-if ($email || $email_list) {
my @to = ();
- if ($email) {
- @to = (@to, @email_to);
- }
- if ($email_list) {
- @to = (@to, @list_to);
+ if ($email || $email_list) {
+ if ($email) {
+ @to = (@to, @email_to);
+ }
+ if ($email_list) {
+ @to = (@to, @list_to);
+ }
}
- output(merge_email(@to));
-}
-
-if ($scm) {
- @scm = uniq(@scm);
- output(@scm);
-}
-
-if ($status) {
- @status = uniq(@status);
- output(@status);
-}
-if ($subsystem) {
- @subsystem = uniq(@subsystem);
- output(@subsystem);
-}
+ if ($interactive) {
+ @to = interactive_get_maintainers(\@to);
+ }
-if ($web) {
- @web = uniq(@web);
- output(@web);
+ return @to;
}
-exit($exit);
-
sub file_match_pattern {
my ($file, $pattern) = @_;
if (substr($pattern, -1) eq "/") {
@@ -537,7 +683,8 @@ MAINTAINER field selection options:
--email => print email address(es) if any
--git => include recent git \*-by: signers
--git-all-signature-types => include signers regardless of signature type
- or use only ${signaturePattern} signers (default: $email_git_all_signature_types)
+ or use only ${signature_pattern} signers (default: $email_git_all_signature_types)
+ --git-fallback => use git when no exact MAINTAINERS pattern (default: $email_git_fallback)
--git-chief-penguins => include ${penguin_chiefs}
--git-min-signatures => number of signatures required (default: $email_git_min_signatures)
--git-max-maintainers => maximum maintainers to add (default: $email_git_max_maintainers)
@@ -545,6 +692,7 @@ MAINTAINER field selection options:
--git-blame => use git blame to find modified commits for patch or file
--git-since => git history to use (default: $email_git_since)
--hg-since => hg history to use (default: $email_hg_since)
+ --interactive => display a menu (mostly useful if used with the --git option)
--m => include maintainer(s) if any
--n => include name 'Full Name <addr\@domain.tld>'
--l => include list(s) if any
@@ -565,8 +713,9 @@ Output type options:
Other options:
--pattern-depth => Number of pattern directory traversals (default: 0 (all))
- --keywords => scan patch for keywords (default: 1 (on))
- --sections => print the entire subsystem sections with pattern matches
+ --keywords => scan patch for keywords (default: $keywords)
+ --sections => print all of the subsystem sections with pattern matches
+ --mailmap => use .mailmap file (default: $email_use_mailmap)
--version => show version
--help => show this help information
@@ -606,30 +755,30 @@ EOT
}
sub top_of_kernel_tree {
- my ($lk_path) = @_;
+ my ($lk_path) = @_;
- if ($lk_path ne "" && substr($lk_path,length($lk_path)-1,1) ne "/") {
- $lk_path .= "/";
- }
- if ( (-f "${lk_path}COPYING")
- && (-f "${lk_path}CREDITS")
- && (-f "${lk_path}Kbuild")
- && (-f "${lk_path}MAINTAINERS")
- && (-f "${lk_path}Makefile")
- && (-f "${lk_path}README")
- && (-d "${lk_path}Documentation")
- && (-d "${lk_path}arch")
- && (-d "${lk_path}include")
- && (-d "${lk_path}drivers")
- && (-d "${lk_path}fs")
- && (-d "${lk_path}init")
- && (-d "${lk_path}ipc")
- && (-d "${lk_path}kernel")
- && (-d "${lk_path}lib")
- && (-d "${lk_path}scripts")) {
- return 1;
- }
- return 0;
+ if ($lk_path ne "" && substr($lk_path,length($lk_path)-1,1) ne "/") {
+ $lk_path .= "/";
+ }
+ if ( (-f "${lk_path}COPYING")
+ && (-f "${lk_path}CREDITS")
+ && (-f "${lk_path}Kbuild")
+ && (-f "${lk_path}MAINTAINERS")
+ && (-f "${lk_path}Makefile")
+ && (-f "${lk_path}README")
+ && (-d "${lk_path}Documentation")
+ && (-d "${lk_path}arch")
+ && (-d "${lk_path}include")
+ && (-d "${lk_path}drivers")
+ && (-d "${lk_path}fs")
+ && (-d "${lk_path}init")
+ && (-d "${lk_path}ipc")
+ && (-d "${lk_path}kernel")
+ && (-d "${lk_path}lib")
+ && (-d "${lk_path}scripts")) {
+ return 1;
+ }
+ return 0;
}
sub parse_email {
@@ -821,11 +970,19 @@ sub add_categories {
}
if ($list_additional =~ m/subscribers-only/) {
if ($email_subscriber_list) {
- push(@list_to, [$list_address, "subscriber list${list_role}"]);
+ if (!$hash_list_to{lc($list_address)}) {
+ $hash_list_to{lc($list_address)} = 1;
+ push(@list_to, [$list_address,
+ "subscriber list${list_role}"]);
+ }
}
} else {
if ($email_list) {
- push(@list_to, [$list_address, "open list${list_role}"]);
+ if (!$hash_list_to{lc($list_address)}) {
+ $hash_list_to{lc($list_address)} = 1;
+ push(@list_to, [$list_address,
+ "open list${list_role}"]);
+ }
}
}
} elsif ($ptype eq "M") {
@@ -856,15 +1013,12 @@ sub add_categories {
}
}
-my %email_hash_name;
-my %email_hash_address;
-
sub email_inuse {
my ($name, $address) = @_;
return 1 if (($name eq "") && ($address eq ""));
- return 1 if (($name ne "") && exists($email_hash_name{$name}));
- return 1 if (($address ne "") && exists($email_hash_address{$address}));
+ return 1 if (($name ne "") && exists($email_hash_name{lc($name)}));
+ return 1 if (($address ne "") && exists($email_hash_address{lc($address)}));
return 0;
}
@@ -882,8 +1036,8 @@ sub push_email_address {
push(@email_to, [format_email($name, $address, $email_usename), $role]);
} elsif (!email_inuse($name, $address)) {
push(@email_to, [format_email($name, $address, $email_usename), $role]);
- $email_hash_name{$name}++;
- $email_hash_address{$address}++;
+ $email_hash_name{lc($name)}++ if ($name ne "");
+ $email_hash_address{lc($address)}++;
}
return 1;
@@ -952,30 +1106,69 @@ sub which {
return "";
}
-sub mailmap {
- my (@lines) = @_;
- my %hash;
+sub which_conf {
+ my ($conf) = @_;
- foreach my $line (@lines) {
- my ($name, $address) = parse_email($line);
- if (!exists($hash{$name})) {
- $hash{$name} = $address;
- } elsif ($address ne $hash{$name}) {
- $address = $hash{$name};
- $line = format_email($name, $address, $email_usename);
+ foreach my $path (split(/:/, ".:$ENV{HOME}:.scripts")) {
+ if (-e "$path/$conf") {
+ return "$path/$conf";
}
- if (exists($mailmap{$name})) {
- my $obj = $mailmap{$name};
- foreach my $map_address (@$obj) {
- if (($map_address eq $address) &&
- ($map_address ne $hash{$name})) {
- $line = format_email($name, $hash{$name}, $email_usename);
- }
- }
+ }
+
+ return "";
+}
+
+sub mailmap_email {
+ my ($line) = @_;
+
+ my ($name, $address) = parse_email($line);
+ my $email = format_email($name, $address, 1);
+ my $real_name = $name;
+ my $real_address = $address;
+
+ if (exists $mailmap->{names}->{$email} ||
+ exists $mailmap->{addresses}->{$email}) {
+ if (exists $mailmap->{names}->{$email}) {
+ $real_name = $mailmap->{names}->{$email};
+ }
+ if (exists $mailmap->{addresses}->{$email}) {
+ $real_address = $mailmap->{addresses}->{$email};
+ }
+ } else {
+ if (exists $mailmap->{names}->{$address}) {
+ $real_name = $mailmap->{names}->{$address};
+ }
+ if (exists $mailmap->{addresses}->{$address}) {
+ $real_address = $mailmap->{addresses}->{$address};
}
}
+ return format_email($real_name, $real_address, 1);
+}
- return @lines;
+sub mailmap {
+ my (@addresses) = @_;
+
+ my @mapped_emails = ();
+ foreach my $line (@addresses) {
+ push(@mapped_emails, mailmap_email($line));
+ }
+ merge_by_realname(@mapped_emails) if ($email_use_mailmap);
+ return @mapped_emails;
+}
+
+sub merge_by_realname {
+ my %address_map;
+ my (@emails) = @_;
+
+ foreach my $email (@emails) {
+ my ($name, $address) = parse_email($email);
+ if (exists $address_map{$name}) {
+ $address = $address_map{$name};
+ $email = format_email($name, $address, 1);
+ } else {
+ $address_map{$name} = $address;
+ }
+ }
}
sub git_execute_cmd {
@@ -999,10 +1192,30 @@ sub hg_execute_cmd {
return @lines;
}
+sub extract_formatted_signatures {
+ my (@signature_lines) = @_;
+
+ my @type = @signature_lines;
+
+ s/\s*(.*):.*/$1/ for (@type);
+
+ # cut -f2- -d":"
+ s/\s*.*:\s*(.+)\s*/$1/ for (@signature_lines);
+
+## Reformat email addresses (with names) to avoid badly written signatures
+
+ foreach my $signer (@signature_lines) {
+ $signer = deduplicate_email($signer);
+ }
+
+ return (\@type, \@signature_lines);
+}
+
sub vcs_find_signers {
my ($cmd) = @_;
- my @lines = ();
my $commits;
+ my @lines = ();
+ my @signatures = ();
@lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
@@ -1010,21 +1223,48 @@ sub vcs_find_signers {
$commits = grep(/$pattern/, @lines); # of commits
- @lines = grep(/^[ \t]*${signaturePattern}.*\@.*$/, @lines);
+ @signatures = grep(/^[ \t]*${signature_pattern}.*\@.*$/, @lines);
+
+ return (0, @signatures) if !@signatures;
+
+ save_commits_by_author(@lines) if ($interactive);
+ save_commits_by_signer(@lines) if ($interactive);
+
+ if (!$email_git_penguin_chiefs) {
+ @signatures = grep(!/${penguin_chiefs}/i, @signatures);
+ }
+
+ my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
+
+ return ($commits, @$signers_ref);
+}
+
+sub vcs_find_author {
+ my ($cmd) = @_;
+ my @lines = ();
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
if (!$email_git_penguin_chiefs) {
@lines = grep(!/${penguin_chiefs}/i, @lines);
}
- # cut -f2- -d":"
- s/.*:\s*(.+)\s*/$1/ for (@lines);
-## Reformat email addresses (with names) to avoid badly written signatures
+ return @lines if !@lines;
+ my @authors = ();
foreach my $line (@lines) {
- my ($name, $address) = parse_email($line);
- $line = format_email($name, $address, 1);
+ if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+ my $author = $1;
+ my ($name, $address) = parse_email($author);
+ $author = format_email($name, $address, 1);
+ push(@authors, $author);
+ }
}
- return ($commits, @lines);
+ save_commits_by_author(@lines) if ($interactive);
+ save_commits_by_signer(@lines) if ($interactive);
+
+ return @authors;
}
sub vcs_save_commits {
@@ -1084,6 +1324,10 @@ sub vcs_blame {
@commits = vcs_save_commits($cmd);
}
+ foreach my $commit (@commits) {
+ $commit =~ s/^\^//g;
+ }
+
return @commits;
}
@@ -1092,7 +1336,7 @@ sub vcs_exists {
%VCS_cmds = %VCS_cmds_git;
return 1 if eval $VCS_cmds{"available"};
%VCS_cmds = %VCS_cmds_hg;
- return 1 if eval $VCS_cmds{"available"};
+ return 2 if eval $VCS_cmds{"available"};
%VCS_cmds = ();
if (!$printed_novcs) {
warn("$P: No supported VCS found. Add --nogit to options?\n");
@@ -1104,6 +1348,405 @@ sub vcs_exists {
return 0;
}
+sub vcs_is_git {
+ vcs_exists();
+ return $vcs_used == 1;
+}
+
+sub vcs_is_hg {
+ return $vcs_used == 2;
+}
+
+sub interactive_get_maintainers {
+ my ($list_ref) = @_;
+ my @list = @$list_ref;
+
+ vcs_exists();
+
+ my %selected;
+ my %authored;
+ my %signed;
+ my $count = 0;
+ my $maintained = 0;
+ foreach my $entry (@list) {
+ $maintained = 1 if ($entry->[1] =~ /^(maintainer|supporter)/i);
+ $selected{$count} = 1;
+ $authored{$count} = 0;
+ $signed{$count} = 0;
+ $count++;
+ }
+
+ #menu loop
+ my $done = 0;
+ my $print_options = 0;
+ my $redraw = 1;
+ while (!$done) {
+ $count = 0;
+ if ($redraw) {
+ printf STDERR "\n%1s %2s %-65s",
+ "*", "#", "email/list and role:stats";
+ if ($email_git ||
+ ($email_git_fallback && !$maintained) ||
+ $email_git_blame) {
+ print STDERR "auth sign";
+ }
+ print STDERR "\n";
+ foreach my $entry (@list) {
+ my $email = $entry->[0];
+ my $role = $entry->[1];
+ my $sel = "";
+ $sel = "*" if ($selected{$count});
+ my $commit_author = $commit_author_hash{$email};
+ my $commit_signer = $commit_signer_hash{$email};
+ my $authored = 0;
+ my $signed = 0;
+ $authored++ for (@{$commit_author});
+ $signed++ for (@{$commit_signer});
+ printf STDERR "%1s %2d %-65s", $sel, $count + 1, $email;
+ printf STDERR "%4d %4d", $authored, $signed
+ if ($authored > 0 || $signed > 0);
+ printf STDERR "\n %s\n", $role;
+ if ($authored{$count}) {
+ my $commit_author = $commit_author_hash{$email};
+ foreach my $ref (@{$commit_author}) {
+ print STDERR " Author: @{$ref}[1]\n";
+ }
+ }
+ if ($signed{$count}) {
+ my $commit_signer = $commit_signer_hash{$email};
+ foreach my $ref (@{$commit_signer}) {
+ print STDERR " @{$ref}[2]: @{$ref}[1]\n";
+ }
+ }
+
+ $count++;
+ }
+ }
+ my $date_ref = \$email_git_since;
+ $date_ref = \$email_hg_since if (vcs_is_hg());
+ if ($print_options) {
+ $print_options = 0;
+ if (vcs_exists()) {
+ print STDERR <<EOT
+
+Version Control options:
+g use git history [$email_git]
+gf use git-fallback [$email_git_fallback]
+b use git blame [$email_git_blame]
+bs use blame signatures [$email_git_blame_signatures]
+c# minimum commits [$email_git_min_signatures]
+%# min percent [$email_git_min_percent]
+d# history to use [$$date_ref]
+x# max maintainers [$email_git_max_maintainers]
+t all signature types [$email_git_all_signature_types]
+m use .mailmap [$email_use_mailmap]
+EOT
+ }
+ print STDERR <<EOT
+
+Additional options:
+0 toggle all
+tm toggle maintainers
+tg toggle git entries
+tl toggle open list entries
+ts toggle subscriber list entries
+f emails in file [$file_emails]
+k keywords in file [$keywords]
+r remove duplicates [$email_remove_duplicates]
+p# pattern match depth [$pattern_depth]
+EOT
+ }
+ print STDERR
+"\n#(toggle), A#(author), S#(signed) *(all), ^(none), O(options), Y(approve): ";
+
+ my $input = <STDIN>;
+ chomp($input);
+
+ $redraw = 1;
+ my $rerun = 0;
+ my @wish = split(/[, ]+/, $input);
+ foreach my $nr (@wish) {
+ $nr = lc($nr);
+ my $sel = substr($nr, 0, 1);
+ my $str = substr($nr, 1);
+ my $val = 0;
+ $val = $1 if $str =~ /^(\d+)$/;
+
+ if ($sel eq "y") {
+ $interactive = 0;
+ $done = 1;
+ $output_rolestats = 0;
+ $output_roles = 0;
+ last;
+ } elsif ($nr =~ /^\d+$/ && $nr > 0 && $nr <= $count) {
+ $selected{$nr - 1} = !$selected{$nr - 1};
+ } elsif ($sel eq "*" || $sel eq '^') {
+ my $toggle = 0;
+ $toggle = 1 if ($sel eq '*');
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = $toggle;
+ }
+ } elsif ($sel eq "0") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i};
+ }
+ } elsif ($sel eq "t") {
+ if (lc($str) eq "m") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(maintainer|supporter)/i);
+ }
+ } elsif (lc($str) eq "g") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(author|commit|signer)/i);
+ }
+ } elsif (lc($str) eq "l") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(open list)/i);
+ }
+ } elsif (lc($str) eq "s") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(subscriber list)/i);
+ }
+ }
+ } elsif ($sel eq "a") {
+ if ($val > 0 && $val <= $count) {
+ $authored{$val - 1} = !$authored{$val - 1};
+ } elsif ($str eq '*' || $str eq '^') {
+ my $toggle = 0;
+ $toggle = 1 if ($str eq '*');
+ for (my $i = 0; $i < $count; $i++) {
+ $authored{$i} = $toggle;
+ }
+ }
+ } elsif ($sel eq "s") {
+ if ($val > 0 && $val <= $count) {
+ $signed{$val - 1} = !$signed{$val - 1};
+ } elsif ($str eq '*' || $str eq '^') {
+ my $toggle = 0;
+ $toggle = 1 if ($str eq '*');
+ for (my $i = 0; $i < $count; $i++) {
+ $signed{$i} = $toggle;
+ }
+ }
+ } elsif ($sel eq "o") {
+ $print_options = 1;
+ $redraw = 1;
+ } elsif ($sel eq "g") {
+ if ($str eq "f") {
+ bool_invert(\$email_git_fallback);
+ } else {
+ bool_invert(\$email_git);
+ }
+ $rerun = 1;
+ } elsif ($sel eq "b") {
+ if ($str eq "s") {
+ bool_invert(\$email_git_blame_signatures);
+ } else {
+ bool_invert(\$email_git_blame);
+ }
+ $rerun = 1;
+ } elsif ($sel eq "c") {
+ if ($val > 0) {
+ $email_git_min_signatures = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "x") {
+ if ($val > 0) {
+ $email_git_max_maintainers = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "%") {
+ if ($str ne "" && $val >= 0) {
+ $email_git_min_percent = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "d") {
+ if (vcs_is_git()) {
+ $email_git_since = $str;
+ } elsif (vcs_is_hg()) {
+ $email_hg_since = $str;
+ }
+ $rerun = 1;
+ } elsif ($sel eq "t") {
+ bool_invert(\$email_git_all_signature_types);
+ $rerun = 1;
+ } elsif ($sel eq "f") {
+ bool_invert(\$file_emails);
+ $rerun = 1;
+ } elsif ($sel eq "r") {
+ bool_invert(\$email_remove_duplicates);
+ $rerun = 1;
+ } elsif ($sel eq "m") {
+ bool_invert(\$email_use_mailmap);
+ read_mailmap();
+ $rerun = 1;
+ } elsif ($sel eq "k") {
+ bool_invert(\$keywords);
+ $rerun = 1;
+ } elsif ($sel eq "p") {
+ if ($str ne "" && $val >= 0) {
+ $pattern_depth = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "h" || $sel eq "?") {
+ print STDERR <<EOT
+
+Interactive mode allows you to select the various maintainers, submitters,
+commit signers and mailing lists that could be CC'd on a patch.
+
+Any *'d entry is selected.
+
+If you have git or hg installed, you can choose to summarize the commit
+history of files in the patch. Also, each line of the current file can
+be matched to its commit author and that commits signers with blame.
+
+Various knobs exist to control the length of time for active commit
+tracking, the maximum number of commit authors and signers to add,
+and such.
+
+Enter selections at the prompt until you are satisfied that the selected
+maintainers are appropriate. You may enter multiple selections separated
+by either commas or spaces.
+
+EOT
+ } else {
+ print STDERR "invalid option: '$nr'\n";
+ $redraw = 0;
+ }
+ }
+ if ($rerun) {
+ print STDERR "git-blame can be very slow, please have patience..."
+ if ($email_git_blame);
+ goto &get_maintainers;
+ }
+ }
+
+ #drop not selected entries
+ $count = 0;
+ my @new_emailto = ();
+ foreach my $entry (@list) {
+ if ($selected{$count}) {
+ push(@new_emailto, $list[$count]);
+ }
+ $count++;
+ }
+ return @new_emailto;
+}
+
+sub bool_invert {
+ my ($bool_ref) = @_;
+
+ if ($$bool_ref) {
+ $$bool_ref = 0;
+ } else {
+ $$bool_ref = 1;
+ }
+}
+
+sub deduplicate_email {
+ my ($email) = @_;
+
+ my $matched = 0;
+ my ($name, $address) = parse_email($email);
+ $email = format_email($name, $address, 1);
+ $email = mailmap_email($email);
+
+ return $email if (!$email_remove_duplicates);
+
+ ($name, $address) = parse_email($email);
+
+ if ($name ne "" && $deduplicate_name_hash{lc($name)}) {
+ $name = $deduplicate_name_hash{lc($name)}->[0];
+ $address = $deduplicate_name_hash{lc($name)}->[1];
+ $matched = 1;
+ } elsif ($deduplicate_address_hash{lc($address)}) {
+ $name = $deduplicate_address_hash{lc($address)}->[0];
+ $address = $deduplicate_address_hash{lc($address)}->[1];
+ $matched = 1;
+ }
+ if (!$matched) {
+ $deduplicate_name_hash{lc($name)} = [ $name, $address ];
+ $deduplicate_address_hash{lc($address)} = [ $name, $address ];
+ }
+ $email = format_email($name, $address, 1);
+ $email = mailmap_email($email);
+ return $email;
+}
+
+sub save_commits_by_author {
+ my (@lines) = @_;
+
+ my @authors = ();
+ my @commits = ();
+ my @subjects = ();
+
+ foreach my $line (@lines) {
+ if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+ my $author = $1;
+ $author = deduplicate_email($author);
+ push(@authors, $author);
+ }
+ push(@commits, $1) if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
+ push(@subjects, $1) if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
+ }
+
+ for (my $i = 0; $i < @authors; $i++) {
+ my $exists = 0;
+ foreach my $ref(@{$commit_author_hash{$authors[$i]}}) {
+ if (@{$ref}[0] eq $commits[$i] &&
+ @{$ref}[1] eq $subjects[$i]) {
+ $exists = 1;
+ last;
+ }
+ }
+ if (!$exists) {
+ push(@{$commit_author_hash{$authors[$i]}},
+ [ ($commits[$i], $subjects[$i]) ]);
+ }
+ }
+}
+
+sub save_commits_by_signer {
+ my (@lines) = @_;
+
+ my $commit = "";
+ my $subject = "";
+
+ foreach my $line (@lines) {
+ $commit = $1 if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
+ $subject = $1 if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
+ if ($line =~ /^[ \t]*${signature_pattern}.*\@.*$/) {
+ my @signatures = ($line);
+ my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
+ my @types = @$types_ref;
+ my @signers = @$signers_ref;
+
+ my $type = $types[0];
+ my $signer = $signers[0];
+
+ $signer = deduplicate_email($signer);
+
+ my $exists = 0;
+ foreach my $ref(@{$commit_signer_hash{$signer}}) {
+ if (@{$ref}[0] eq $commit &&
+ @{$ref}[1] eq $subject &&
+ @{$ref}[2] eq $type) {
+ $exists = 1;
+ last;
+ }
+ }
+ if (!$exists) {
+ push(@{$commit_signer_hash{$signer}},
+ [ ($commit, $subject, $type) ]);
+ }
+ }
+ }
+}
+
sub vcs_assign {
my ($role, $divisor, @lines) = @_;
@@ -1117,9 +1760,9 @@ sub vcs_assign {
$divisor = 1;
}
- if ($email_remove_duplicates) {
- @lines = mailmap(@lines);
- }
+ @lines = mailmap(@lines);
+
+ return if (@lines <= 0);
@lines = sort(@lines);
@@ -1152,12 +1795,18 @@ sub vcs_file_signoffs {
my @signers = ();
my $commits;
- return if (!vcs_exists());
+ $vcs_used = vcs_exists();
+ return if (!$vcs_used);
my $cmd = $VCS_cmds{"find_signers_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
($commits, @signers) = vcs_find_signers($cmd);
+
+ foreach my $signer (@signers) {
+ $signer = deduplicate_email($signer);
+ }
+
vcs_assign("commit_signer", $commits, @signers);
}
@@ -1165,29 +1814,114 @@ sub vcs_file_blame {
my ($file) = @_;
my @signers = ();
+ my @all_commits = ();
my @commits = ();
my $total_commits;
+ my $total_lines;
- return if (!vcs_exists());
+ $vcs_used = vcs_exists();
+ return if (!$vcs_used);
- @commits = vcs_blame($file);
- @commits = uniq(@commits);
+ @all_commits = vcs_blame($file);
+ @commits = uniq(@all_commits);
$total_commits = @commits;
+ $total_lines = @all_commits;
- foreach my $commit (@commits) {
- my $commit_count;
- my @commit_signers = ();
+ if ($email_git_blame_signatures) {
+ if (vcs_is_hg()) {
+ my $commit_count;
+ my @commit_signers = ();
+ my $commit = join(" -r ", @commits);
+ my $cmd;
+
+ $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
+
+ ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+
+ push(@signers, @commit_signers);
+ } else {
+ foreach my $commit (@commits) {
+ my $commit_count;
+ my @commit_signers = ();
+ my $cmd;
- my $cmd = $VCS_cmds{"find_commit_signers_cmd"};
- $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
- ($commit_count, @commit_signers) = vcs_find_signers($cmd);
- push(@signers, @commit_signers);
+ ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+
+ push(@signers, @commit_signers);
+ }
+ }
}
if ($from_filename) {
+ if ($output_rolestats) {
+ my @blame_signers;
+ if (vcs_is_hg()) {{ # Double brace for last exit
+ my $commit_count;
+ my @commit_signers = ();
+ @commits = uniq(@commits);
+ @commits = sort(@commits);
+ my $commit = join(" -r ", @commits);
+ my $cmd;
+
+ $cmd = $VCS_cmds{"find_commit_author_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
+
+ my @lines = ();
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ if (!$email_git_penguin_chiefs) {
+ @lines = grep(!/${penguin_chiefs}/i, @lines);
+ }
+
+ last if !@lines;
+
+ my @authors = ();
+ foreach my $line (@lines) {
+ if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+ my $author = $1;
+ $author = deduplicate_email($author);
+ push(@authors, $author);
+ }
+ }
+
+ save_commits_by_author(@lines) if ($interactive);
+ save_commits_by_signer(@lines) if ($interactive);
+
+ push(@signers, @authors);
+ }}
+ else {
+ foreach my $commit (@commits) {
+ my $i;
+ my $cmd = $VCS_cmds{"find_commit_author_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ my @author = vcs_find_author($cmd);
+ next if !@author;
+
+ my $formatted_author = deduplicate_email($author[0]);
+
+ my $count = grep(/$commit/, @all_commits);
+ for ($i = 0; $i < $count ; $i++) {
+ push(@blame_signers, $formatted_author);
+ }
+ }
+ }
+ if (@blame_signers) {
+ vcs_assign("authored lines", $total_lines, @blame_signers);
+ }
+ }
+ foreach my $signer (@signers) {
+ $signer = deduplicate_email($signer);
+ }
vcs_assign("commits", $total_commits, @signers);
} else {
+ foreach my $signer (@signers) {
+ $signer = deduplicate_email($signer);
+ }
vcs_assign("modified commits", $total_commits, @signers);
}
}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 82396050f186..36cc0cc39e78 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -72,10 +72,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
path_get(&root);
}
- spin_lock(&dcache_lock);
tmp = root;
res = __d_path(path, &tmp, buf, buflen);
- spin_unlock(&dcache_lock);
*name = res;
/* handle error conditions - and still allow a partial path to
diff --git a/security/inode.c b/security/inode.c
index 88839866cbcd..cb8f47c66a58 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -61,6 +61,7 @@ static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev)
struct inode *inode = new_inode(sb);
if (inode) {
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 3fbcd1dda0ef..ac79032bdf23 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -70,6 +70,7 @@ int ima_init(void);
void ima_cleanup(void);
int ima_fs_init(void);
void ima_fs_cleanup(void);
+int ima_inode_alloc(struct inode *inode);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode);
int ima_calc_hash(struct file *file, char *digest);
@@ -96,19 +97,16 @@ static inline unsigned long ima_hash_key(u8 *digest)
}
/* iint cache flags */
-#define IMA_MEASURED 1
+#define IMA_MEASURED 0x01
/* integrity data associated with an inode */
struct ima_iint_cache {
+ struct rb_node rb_node; /* rooted in ima_iint_tree */
+ struct inode *inode; /* back pointer to inode in question */
u64 version; /* track inode changes */
- unsigned long flags;
+ unsigned char flags;
u8 digest[IMA_DIGEST_SIZE];
struct mutex mutex; /* protects: version, flags, digest */
- long readcount; /* measured files readcount */
- long writecount; /* measured files writecount */
- long opencount; /* opens reference count */
- struct kref refcount; /* ima_iint_cache reference count */
- struct rcu_head rcu;
};
/* LIM API function definitions */
@@ -122,13 +120,11 @@ int ima_store_template(struct ima_template_entry *entry, int violation,
void ima_template_show(struct seq_file *m, void *e,
enum ima_show_type show);
-/* radix tree calls to lookup, insert, delete
+/* rbtree tree calls to lookup, insert, delete
* integrity data associated with an inode.
*/
struct ima_iint_cache *ima_iint_insert(struct inode *inode);
-struct ima_iint_cache *ima_iint_find_get(struct inode *inode);
-void iint_free(struct kref *kref);
-void iint_rcu_free(struct rcu_head *rcu);
+struct ima_iint_cache *ima_iint_find(struct inode *inode);
/* IMA policy related functions */
enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 52015d098fdf..d3963de6003d 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -116,7 +116,7 @@ int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
{
int must_measure;
- if (iint->flags & IMA_MEASURED)
+ if (iint && iint->flags & IMA_MEASURED)
return 1;
must_measure = ima_match_policy(inode, function, mask);
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index afba4aef812f..c442e47b6785 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -12,98 +12,119 @@
* File: ima_iint.c
* - implements the IMA hooks: ima_inode_alloc, ima_inode_free
* - cache integrity information associated with an inode
- * using a radix tree.
+ * using a rbtree tree.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
#include "ima.h"
-RADIX_TREE(ima_iint_store, GFP_ATOMIC);
-DEFINE_SPINLOCK(ima_iint_lock);
+static struct rb_root ima_iint_tree = RB_ROOT;
+static DEFINE_SPINLOCK(ima_iint_lock);
static struct kmem_cache *iint_cache __read_mostly;
int iint_initialized = 0;
-/* ima_iint_find_get - return the iint associated with an inode
- *
- * ima_iint_find_get gets a reference to the iint. Caller must
- * remember to put the iint reference.
+/*
+ * __ima_iint_find - return the iint associated with an inode
*/
-struct ima_iint_cache *ima_iint_find_get(struct inode *inode)
+static struct ima_iint_cache *__ima_iint_find(struct inode *inode)
{
struct ima_iint_cache *iint;
+ struct rb_node *n = ima_iint_tree.rb_node;
+
+ assert_spin_locked(&ima_iint_lock);
+
+ while (n) {
+ iint = rb_entry(n, struct ima_iint_cache, rb_node);
+
+ if (inode < iint->inode)
+ n = n->rb_left;
+ else if (inode > iint->inode)
+ n = n->rb_right;
+ else
+ break;
+ }
+ if (!n)
+ return NULL;
- rcu_read_lock();
- iint = radix_tree_lookup(&ima_iint_store, (unsigned long)inode);
- if (!iint)
- goto out;
- kref_get(&iint->refcount);
-out:
- rcu_read_unlock();
return iint;
}
-/**
- * ima_inode_alloc - allocate an iint associated with an inode
- * @inode: pointer to the inode
+/*
+ * ima_iint_find - return the iint associated with an inode
*/
-int ima_inode_alloc(struct inode *inode)
+struct ima_iint_cache *ima_iint_find(struct inode *inode)
{
- struct ima_iint_cache *iint = NULL;
- int rc = 0;
-
- iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
- if (!iint)
- return -ENOMEM;
+ struct ima_iint_cache *iint;
- rc = radix_tree_preload(GFP_NOFS);
- if (rc < 0)
- goto out;
+ if (!IS_IMA(inode))
+ return NULL;
spin_lock(&ima_iint_lock);
- rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
+ iint = __ima_iint_find(inode);
spin_unlock(&ima_iint_lock);
- radix_tree_preload_end();
-out:
- if (rc < 0)
- kmem_cache_free(iint_cache, iint);
- return rc;
+ return iint;
}
-/* iint_free - called when the iint refcount goes to zero */
-void iint_free(struct kref *kref)
+static void iint_free(struct ima_iint_cache *iint)
{
- struct ima_iint_cache *iint = container_of(kref, struct ima_iint_cache,
- refcount);
iint->version = 0;
iint->flags = 0UL;
- if (iint->readcount != 0) {
- printk(KERN_INFO "%s: readcount: %ld\n", __func__,
- iint->readcount);
- iint->readcount = 0;
- }
- if (iint->writecount != 0) {
- printk(KERN_INFO "%s: writecount: %ld\n", __func__,
- iint->writecount);
- iint->writecount = 0;
- }
- if (iint->opencount != 0) {
- printk(KERN_INFO "%s: opencount: %ld\n", __func__,
- iint->opencount);
- iint->opencount = 0;
- }
- kref_init(&iint->refcount);
kmem_cache_free(iint_cache, iint);
}
-void iint_rcu_free(struct rcu_head *rcu_head)
+/**
+ * ima_inode_alloc - allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ */
+int ima_inode_alloc(struct inode *inode)
{
- struct ima_iint_cache *iint = container_of(rcu_head,
- struct ima_iint_cache, rcu);
- kref_put(&iint->refcount, iint_free);
+ struct rb_node **p;
+ struct rb_node *new_node, *parent = NULL;
+ struct ima_iint_cache *new_iint, *test_iint;
+ int rc;
+
+ new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!new_iint)
+ return -ENOMEM;
+
+ new_iint->inode = inode;
+ new_node = &new_iint->rb_node;
+
+ mutex_lock(&inode->i_mutex); /* i_flags */
+ spin_lock(&ima_iint_lock);
+
+ p = &ima_iint_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ test_iint = rb_entry(parent, struct ima_iint_cache, rb_node);
+
+ rc = -EEXIST;
+ if (inode < test_iint->inode)
+ p = &(*p)->rb_left;
+ else if (inode > test_iint->inode)
+ p = &(*p)->rb_right;
+ else
+ goto out_err;
+ }
+
+ inode->i_flags |= S_IMA;
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &ima_iint_tree);
+
+ spin_unlock(&ima_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+
+ return 0;
+out_err:
+ spin_unlock(&ima_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+ iint_free(new_iint);
+
+ return rc;
}
/**
@@ -116,11 +137,20 @@ void ima_inode_free(struct inode *inode)
{
struct ima_iint_cache *iint;
+ if (inode->i_readcount)
+ printk(KERN_INFO "%s: readcount: %u\n", __func__, inode->i_readcount);
+
+ inode->i_readcount = 0;
+
+ if (!IS_IMA(inode))
+ return;
+
spin_lock(&ima_iint_lock);
- iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
+ iint = __ima_iint_find(inode);
+ rb_erase(&iint->rb_node, &ima_iint_tree);
spin_unlock(&ima_iint_lock);
- if (iint)
- call_rcu(&iint->rcu, iint_rcu_free);
+
+ iint_free(iint);
}
static void init_once(void *foo)
@@ -131,10 +161,6 @@ static void init_once(void *foo)
iint->version = 0;
iint->flags = 0UL;
mutex_init(&iint->mutex);
- iint->readcount = 0;
- iint->writecount = 0;
- iint->opencount = 0;
- kref_init(&iint->refcount);
}
static int __init ima_iintcache_init(void)
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index e662b89d4079..203de979d305 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -85,50 +85,6 @@ out:
return found;
}
-/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
- *
- * When opening a file for read, if the file is already open for write,
- * the file could change, resulting in a file measurement error.
- *
- * Opening a file for write, if the file is already open for read, results
- * in a time of measure, time of use (ToMToU) error.
- *
- * In either case invalidate the PCR.
- */
-enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
-static void ima_read_write_check(enum iint_pcr_error error,
- struct ima_iint_cache *iint,
- struct inode *inode,
- const unsigned char *filename)
-{
- switch (error) {
- case TOMTOU:
- if (iint->readcount > 0)
- ima_add_violation(inode, filename, "invalid_pcr",
- "ToMToU");
- break;
- case OPEN_WRITERS:
- if (iint->writecount > 0)
- ima_add_violation(inode, filename, "invalid_pcr",
- "open_writers");
- break;
- }
-}
-
-/*
- * Update the counts given an fmode_t
- */
-static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
-{
- BUG_ON(!mutex_is_locked(&iint->mutex));
-
- iint->opencount++;
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- iint->readcount++;
- if (mode & FMODE_WRITE)
- iint->writecount++;
-}
-
/*
* ima_counts_get - increment file counts
*
@@ -145,62 +101,101 @@ void ima_counts_get(struct file *file)
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
fmode_t mode = file->f_mode;
- struct ima_iint_cache *iint;
int rc;
+ bool send_tomtou = false, send_writers = false;
- if (!iint_initialized || !S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode))
return;
- iint = ima_iint_find_get(inode);
- if (!iint)
- return;
- mutex_lock(&iint->mutex);
+
+ spin_lock(&inode->i_lock);
+
if (!ima_initialized)
goto out;
- rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
- if (rc < 0)
- goto out;
if (mode & FMODE_WRITE) {
- ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name);
+ if (inode->i_readcount && IS_IMA(inode))
+ send_tomtou = true;
goto out;
}
- ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name);
+
+ rc = ima_must_measure(NULL, inode, MAY_READ, FILE_CHECK);
+ if (rc < 0)
+ goto out;
+
+ if (atomic_read(&inode->i_writecount) > 0)
+ send_writers = true;
out:
- ima_inc_counts(iint, file->f_mode);
- mutex_unlock(&iint->mutex);
+ /* remember the vfs deals with i_writecount */
+ if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ inode->i_readcount++;
- kref_put(&iint->refcount, iint_free);
+ spin_unlock(&inode->i_lock);
+
+ if (send_tomtou)
+ ima_add_violation(inode, dentry->d_name.name, "invalid_pcr",
+ "ToMToU");
+ if (send_writers)
+ ima_add_violation(inode, dentry->d_name.name, "invalid_pcr",
+ "open_writers");
}
/*
* Decrement ima counts
*/
-static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
- struct file *file)
+static void ima_dec_counts(struct inode *inode, struct file *file)
{
mode_t mode = file->f_mode;
- BUG_ON(!mutex_is_locked(&iint->mutex));
- iint->opencount--;
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- iint->readcount--;
- if (mode & FMODE_WRITE) {
- iint->writecount--;
- if (iint->writecount == 0) {
- if (iint->version != inode->i_version)
- iint->flags &= ~IMA_MEASURED;
+ assert_spin_locked(&inode->i_lock);
+
+ if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
+ if (unlikely(inode->i_readcount == 0)) {
+ if (!ima_limit_imbalance(file)) {
+ printk(KERN_INFO "%s: open/free imbalance (r:%u)\n",
+ __func__, inode->i_readcount);
+ dump_stack();
+ }
+ return;
}
+ inode->i_readcount--;
}
+}
- if (((iint->opencount < 0) ||
- (iint->readcount < 0) ||
- (iint->writecount < 0)) &&
- !ima_limit_imbalance(file)) {
- printk(KERN_INFO "%s: open/free imbalance (r:%ld w:%ld o:%ld)\n",
- __func__, iint->readcount, iint->writecount,
- iint->opencount);
- dump_stack();
- }
+static void ima_check_last_writer(struct ima_iint_cache *iint,
+ struct inode *inode,
+ struct file *file)
+{
+ mode_t mode = file->f_mode;
+
+ BUG_ON(!mutex_is_locked(&iint->mutex));
+ assert_spin_locked(&inode->i_lock);
+
+ if (mode & FMODE_WRITE &&
+ atomic_read(&inode->i_writecount) == 1 &&
+ iint->version != inode->i_version)
+ iint->flags &= ~IMA_MEASURED;
+}
+
+static void ima_file_free_iint(struct ima_iint_cache *iint, struct inode *inode,
+ struct file *file)
+{
+ mutex_lock(&iint->mutex);
+ spin_lock(&inode->i_lock);
+
+ ima_dec_counts(inode, file);
+ ima_check_last_writer(iint, inode, file);
+
+ spin_unlock(&inode->i_lock);
+ mutex_unlock(&iint->mutex);
+}
+
+static void ima_file_free_noiint(struct inode *inode, struct file *file)
+{
+ spin_lock(&inode->i_lock);
+
+ ima_dec_counts(inode, file);
+
+ spin_unlock(&inode->i_lock);
}
/**
@@ -208,7 +203,7 @@ static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
* @file: pointer to file structure being freed
*
* Flag files that changed, based on i_version;
- * and decrement the iint readcount/writecount.
+ * and decrement the i_readcount.
*/
void ima_file_free(struct file *file)
{
@@ -217,14 +212,14 @@ void ima_file_free(struct file *file)
if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
- iint = ima_iint_find_get(inode);
- if (!iint)
- return;
- mutex_lock(&iint->mutex);
- ima_dec_counts(iint, inode, file);
- mutex_unlock(&iint->mutex);
- kref_put(&iint->refcount, iint_free);
+ iint = ima_iint_find(inode);
+
+ if (iint)
+ ima_file_free_iint(iint, inode, file);
+ else
+ ima_file_free_noiint(inode, file);
+
}
static int process_measurement(struct file *file, const unsigned char *filename,
@@ -236,11 +231,21 @@ static int process_measurement(struct file *file, const unsigned char *filename,
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
- iint = ima_iint_find_get(inode);
- if (!iint)
- return -ENOMEM;
+
+ rc = ima_must_measure(NULL, inode, mask, function);
+ if (rc != 0)
+ return rc;
+retry:
+ iint = ima_iint_find(inode);
+ if (!iint) {
+ rc = ima_inode_alloc(inode);
+ if (!rc || rc == -EEXIST)
+ goto retry;
+ return rc;
+ }
mutex_lock(&iint->mutex);
+
rc = ima_must_measure(iint, inode, mask, function);
if (rc != 0)
goto out;
@@ -250,7 +255,6 @@ static int process_measurement(struct file *file, const unsigned char *filename,
ima_store_measurement(iint, file, filename);
out:
mutex_unlock(&iint->mutex);
- kref_put(&iint->refcount, iint_free);
return rc;
}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index f8e7251ae2c8..504bdd2452bd 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -207,7 +207,7 @@ static int install_process_keyring(void)
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
- return ret != -EEXIST ?: 0;
+ return ret != -EEXIST ? ret : 0;
}
return commit_creds(new);
diff --git a/security/security.c b/security/security.c
index b50f472061a4..3ef5e2a7a741 100644
--- a/security/security.c
+++ b/security/security.c
@@ -325,16 +325,8 @@ EXPORT_SYMBOL(security_sb_parse_opts_str);
int security_inode_alloc(struct inode *inode)
{
- int ret;
-
inode->i_security = NULL;
- ret = security_ops->inode_alloc_security(inode);
- if (ret)
- return ret;
- ret = ima_inode_alloc(inode);
- if (ret)
- security_inode_free(inode);
- return ret;
+ return security_ops->inode_alloc_security(inode);
}
void security_inode_free(struct inode *inode)
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 87e0556bae70..55a755c1a1bd 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -978,6 +978,7 @@ static struct inode *sel_make_inode(struct super_block *sb, int mode)
struct inode *ret = new_inode(sb);
if (ret) {
+ ret->i_ino = get_next_ino();
ret->i_mode = mode;
ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index ed8ccd680102..1d0bf8fa1922 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -127,10 +127,8 @@ char *tomoyo_realpath_from_path(struct path *path)
/* If we don't have a vfsmount, we can't calculate. */
if (!path->mnt)
break;
- spin_lock(&dcache_lock);
/* go to whatever namespace root we are under */
pos = __d_path(path, &ns_root, buf, buf_len);
- spin_unlock(&dcache_lock);
/* Prepend "/proc" prefix if using internal proc vfs mount. */
if (!IS_ERR(pos) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
(path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
diff --git a/sound/oss/sb_ess.c b/sound/oss/sb_ess.c
index 51a3d381a59e..9890cf2066ff 100644
--- a/sound/oss/sb_ess.c
+++ b/sound/oss/sb_ess.c
@@ -1722,7 +1722,6 @@ printk (KERN_INFO "FKS: es_rec_set_recmask mask = %x\n", mask);
right = (value & 0x0000ff00) >> 8;
} else { /* Turn it off (3) */
left = 0;
- left = 0;
right = 0;
}
sb_common_mixer_set(devc, i + ES_REC_MIXER_RECDIFF, left, right);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 82ebeb9544fe..93fa59cc60ef 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -5326,6 +5326,82 @@ again:
return 0;
}
+static int stac92hd83xxx_set_system_btl_amp(struct hda_codec *codec)
+{
+ if (codec->vendor_id != 0x111d7605 &&
+ codec->vendor_id != 0x111d76d1)
+ return 0;
+
+ switch (codec->subsystem_id) {
+ case 0x103c1618:
+ case 0x103c1619:
+ case 0x103c161a:
+ case 0x103c161b:
+ case 0x103c161c:
+ case 0x103c161d:
+ case 0x103c161e:
+ case 0x103c161f:
+ case 0x103c1620:
+ case 0x103c1621:
+ case 0x103c1622:
+ case 0x103c1623:
+
+ case 0x103c162a:
+ case 0x103c162b:
+
+ case 0x103c1630:
+ case 0x103c1631:
+
+ case 0x103c1633:
+
+ case 0x103c1635:
+
+ case 0x103c164f:
+
+ case 0x103c1676:
+ case 0x103c1677:
+ case 0x103c1678:
+ case 0x103c1679:
+ case 0x103c167a:
+ case 0x103c167b:
+ case 0x103c167c:
+ case 0x103c167d:
+ case 0x103c167e:
+ case 0x103c167f:
+ case 0x103c1680:
+ case 0x103c1681:
+ case 0x103c1682:
+ case 0x103c1683:
+ case 0x103c1684:
+ case 0x103c1685:
+ case 0x103c1686:
+ case 0x103c1687:
+ case 0x103c1688:
+ case 0x103c1689:
+ case 0x103c168a:
+ case 0x103c168b:
+ case 0x103c168c:
+ case 0x103c168d:
+ case 0x103c168e:
+ case 0x103c168f:
+ case 0x103c1690:
+ case 0x103c1691:
+ case 0x103c1692:
+
+ case 0x103c3587:
+ case 0x103c3588:
+ case 0x103c3589:
+ case 0x103c358a:
+
+ case 0x103c3667:
+ case 0x103c3668:
+ /* set BTL amp level to 13.43dB for louder speaker output */
+ return snd_hda_codec_write_cache(codec, codec->afg, 0,
+ 0x7F4, 0x14);
+ }
+ return 0;
+}
+
static int patch_stac92hd83xxx(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
@@ -5452,6 +5528,8 @@ again:
AC_VERB_SET_CONNECT_SEL, num_dacs);
}
+ stac92hd83xxx_set_system_btl_amp(codec);
+
codec->proc_widget_hook = stac92hd_proc_hook;
return 0;
diff --git a/sound/soc/codecs/ad73311.c b/sound/soc/codecs/ad73311.c
index c53955fe17b6..de799cd1ba72 100644
--- a/sound/soc/codecs/ad73311.c
+++ b/sound/soc/codecs/ad73311.c
@@ -47,7 +47,7 @@ static int ad73311_probe(struct platform_device *pdev)
&soc_codec_dev_ad73311, &ad73311_dai, 1);
}
-static int ad73311_remove(struct platform_device *pdev)
+static int __devexit ad73311_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index e7a40d16df90..bc22ee93a75d 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -2051,7 +2051,7 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int max98088_i2c_remove(struct i2c_client *client)
+static int __devexit max98088_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
kfree(i2c_get_clientdata(client));
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 7a1825418ee4..99c046ba46bb 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -665,7 +665,7 @@ static int wm9090_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int wm9090_i2c_remove(struct i2c_client *i2c)
+static int __devexit wm9090_i2c_remove(struct i2c_client *i2c)
{
struct wm9090_priv *wm9090 = i2c_get_clientdata(i2c);
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index fe15bb26e484..25f27ec1dd6e 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -24,7 +24,6 @@
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
-#include <sound/soc-of-simple.h>
#include "mpc5200_dma.h"
#include "mpc5200_psc_ac97.h"
@@ -49,7 +48,7 @@ static struct snd_soc_dai_link pcm030_fabric_dai[] = {
.codec_dai_name = "wm9712-aux",
.cpu_dai_name = "mpc5200-psc-ac97.1",
.platform_name = "mpc5200-pcm-audio",
- ..codec_name = "wm9712-codec",
+ .codec_name = "wm9712-codec",
},
};
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 1febf2f23754..ae4251d5abf7 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -62,12 +62,14 @@ struct snd_usb_substream {
unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */
unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */
+ int freqshift; /* how much to shift the feedback value to get Q16.16 */
unsigned int freqmax; /* maximum sampling rate, used for buffer management */
unsigned int phase; /* phase accumulator */
unsigned int maxpacksize; /* max packet size in bytes */
unsigned int maxframesize; /* max packet size in frames */
unsigned int curpacksize; /* current packet size in bytes (for capture) */
unsigned int curframesize; /* current packet size in frames (for capture) */
+ unsigned int syncmaxsize; /* sync endpoint packet size */
unsigned int fill_max: 1; /* fill max packet size always */
unsigned int txfr_quirk:1; /* allow sub-frame alignment */
unsigned int fmt_type; /* USB audio format type (1-3) */
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index f49756c1b837..cff3a3c465d7 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -237,6 +237,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
subs->datainterval = fmt->datainterval;
subs->syncpipe = subs->syncinterval = 0;
subs->maxpacksize = fmt->maxpacksize;
+ subs->syncmaxsize = 0;
subs->fill_max = 0;
/* we need a sync pipe in async OUT or adaptive IN mode */
@@ -283,6 +284,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
subs->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
else
subs->syncinterval = 3;
+ subs->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
}
/* always fill max packet size */
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 3c650ab3c91d..961c9a250686 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -132,6 +132,11 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
? get_full_speed_hz(subs->freqm)
: get_high_speed_hz(subs->freqm),
subs->freqm >> 16, subs->freqm & 0xffff);
+ if (subs->freqshift != INT_MIN)
+ snd_iprintf(buffer, " Feedback Format = %d.%d\n",
+ (subs->syncmaxsize > 3 ? 32 : 24)
+ - (16 - subs->freqshift),
+ 16 - subs->freqshift);
} else {
snd_iprintf(buffer, " Status: Stop\n");
}
diff --git a/sound/usb/urb.c b/sound/usb/urb.c
index 8deeaad10f10..e184349aee83 100644
--- a/sound/usb/urb.c
+++ b/sound/usb/urb.c
@@ -225,6 +225,7 @@ int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
else
subs->freqn = get_usb_high_speed_rate(rate);
subs->freqm = subs->freqn;
+ subs->freqshift = INT_MIN;
/* calculate max. frequency */
if (subs->maxpacksize) {
/* whatever fits into a max. size packet */
@@ -513,11 +514,10 @@ static int retire_paused_capture_urb(struct snd_usb_substream *subs,
/*
- * prepare urb for full speed playback sync pipe
+ * prepare urb for playback sync pipe
*
* set up the offset and length to receive the current frequency.
*/
-
static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
@@ -525,103 +525,78 @@ static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
struct snd_urb_ctx *ctx = urb->context;
urb->dev = ctx->subs->dev; /* we need to set this at each time */
- urb->iso_frame_desc[0].length = 3;
+ urb->iso_frame_desc[0].length = min(4u, ctx->subs->syncmaxsize);
urb->iso_frame_desc[0].offset = 0;
return 0;
}
/*
- * prepare urb for high speed playback sync pipe
+ * process after playback sync complete
*
- * set up the offset and length to receive the current frequency.
- */
-
-static int prepare_playback_sync_urb_hs(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- struct snd_urb_ctx *ctx = urb->context;
-
- urb->dev = ctx->subs->dev; /* we need to set this at each time */
- urb->iso_frame_desc[0].length = 4;
- urb->iso_frame_desc[0].offset = 0;
- return 0;
-}
-
-/*
- * process after full speed playback sync complete
- *
- * retrieve the current 10.14 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
+ * Full speed devices report feedback values in 10.14 format as samples per
+ * frame, high speed devices in 16.16 format as samples per microframe.
+ * Because the Audio Class 1 spec was written before USB 2.0, many high speed
+ * devices use a wrong interpretation, some others use an entirely different
+ * format. Therefore, we cannot predict what format any particular device uses
+ * and must detect it automatically.
*/
static int retire_playback_sync_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
unsigned int f;
+ int shift;
unsigned long flags;
- if (urb->iso_frame_desc[0].status == 0 &&
- urb->iso_frame_desc[0].actual_length == 3) {
- f = combine_triple((u8*)urb->transfer_buffer) << 2;
- if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
- spin_lock_irqsave(&subs->lock, flags);
- subs->freqm = f;
- spin_unlock_irqrestore(&subs->lock, flags);
- }
- }
-
- return 0;
-}
+ if (urb->iso_frame_desc[0].status != 0 ||
+ urb->iso_frame_desc[0].actual_length < 3)
+ return 0;
-/*
- * process after high speed playback sync complete
- *
- * retrieve the current 12.13 frequency from pipe, and set it.
- * the value is referred in prepare_playback_urb().
- */
-static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned int f;
- unsigned long flags;
+ f = le32_to_cpup(urb->transfer_buffer);
+ if (urb->iso_frame_desc[0].actual_length == 3)
+ f &= 0x00ffffff;
+ else
+ f &= 0x0fffffff;
+ if (f == 0)
+ return 0;
- if (urb->iso_frame_desc[0].status == 0 &&
- urb->iso_frame_desc[0].actual_length == 4) {
- f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
- if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
- spin_lock_irqsave(&subs->lock, flags);
- subs->freqm = f;
- spin_unlock_irqrestore(&subs->lock, flags);
+ if (unlikely(subs->freqshift == INT_MIN)) {
+ /*
+ * The first time we see a feedback value, determine its format
+ * by shifting it left or right until it matches the nominal
+ * frequency value. This assumes that the feedback does not
+ * differ from the nominal value more than +50% or -25%.
+ */
+ shift = 0;
+ while (f < subs->freqn - subs->freqn / 4) {
+ f <<= 1;
+ shift++;
+ }
+ while (f > subs->freqn + subs->freqn / 2) {
+ f >>= 1;
+ shift--;
}
+ subs->freqshift = shift;
}
+ else if (subs->freqshift >= 0)
+ f <<= subs->freqshift;
+ else
+ f >>= -subs->freqshift;
- return 0;
-}
-
-/*
- * process after E-Mu 0202/0404/Tracker Pre high speed playback sync complete
- *
- * These devices return the number of samples per packet instead of the number
- * of samples per microframe.
- */
-static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs,
- struct snd_pcm_runtime *runtime,
- struct urb *urb)
-{
- unsigned int f;
- unsigned long flags;
-
- if (urb->iso_frame_desc[0].status == 0 &&
- urb->iso_frame_desc[0].actual_length == 4) {
- f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff;
- f >>= subs->datainterval;
- if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) {
- spin_lock_irqsave(&subs->lock, flags);
- subs->freqm = f;
- spin_unlock_irqrestore(&subs->lock, flags);
- }
+ if (likely(f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax)) {
+ /*
+ * If the frequency looks valid, set it.
+ * This value is referred to in prepare_playback_urb().
+ */
+ spin_lock_irqsave(&subs->lock, flags);
+ subs->freqm = f;
+ spin_unlock_irqrestore(&subs->lock, flags);
+ } else {
+ /*
+ * Out of range; maybe the shift value is wrong.
+ * Reset it so that we autodetect again the next time.
+ */
+ subs->freqshift = INT_MIN;
}
return 0;
@@ -878,21 +853,6 @@ static struct snd_urb_ops audio_urb_ops[2] = {
},
};
-static struct snd_urb_ops audio_urb_ops_high_speed[2] = {
- {
- .prepare = prepare_nodata_playback_urb,
- .retire = retire_playback_urb,
- .prepare_sync = prepare_playback_sync_urb_hs,
- .retire_sync = retire_playback_sync_urb_hs,
- },
- {
- .prepare = prepare_capture_urb,
- .retire = retire_capture_urb,
- .prepare_sync = prepare_capture_sync_urb_hs,
- .retire_sync = retire_capture_sync_urb,
- },
-};
-
/*
* initialize the substream instance.
*/
@@ -909,23 +869,9 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
subs->direction = stream;
subs->dev = as->chip->dev;
subs->txfr_quirk = as->chip->txfr_quirk;
- if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
- subs->ops = audio_urb_ops[stream];
- } else {
- subs->ops = audio_urb_ops_high_speed[stream];
- switch (as->chip->usb_id) {
- case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
- case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
- case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
- subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
- break;
- case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra 8 */
- case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
- subs->ops.prepare_sync = prepare_playback_sync_urb;
- subs->ops.retire_sync = retire_playback_sync_urb;
- break;
- }
- }
+ subs->ops = audio_urb_ops[stream];
+ if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
+ subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
snd_usb_set_pcm_ops(as->pcm, stream);
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 43e3dd284b90..399751befeed 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -15,6 +15,23 @@ DESCRIPTION
This command displays the symbolic event types which can be selected in the
various perf commands with the -e option.
+EVENT MODIFIERS
+---------------
+
+Events can optionally have a modifer by appending a colon and one or
+more modifiers. Modifiers allow the user to restrict when events are
+counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor.
+
+The 'p' modifier can be used for specifying how precise the instruction
+address should be. The 'p' modifier is currently only implemented for
+Intel PEBS and can be specified multiple times:
+ 0 - SAMPLE_IP can have arbitrary skid
+ 1 - SAMPLE_IP must have constant skid
+ 2 - SAMPLE_IP requested to have 0 skid
+ 3 - SAMPLE_IP must have 0 skid
+
+The PEBS implementation now supports up to 2.
+
RAW HARDWARE EVENT DESCRIPTOR
-----------------------------
Even when an event is not available in a symbolic form within perf right now,
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 27d52dae5a43..62de1b7f4e76 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -16,7 +16,9 @@ or
or
'perf probe' --list
or
-'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+'perf probe' [options] --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+or
+'perf probe' [options] --vars='PROBEPOINT'
DESCRIPTION
-----------
@@ -31,6 +33,11 @@ OPTIONS
--vmlinux=PATH::
Specify vmlinux path which has debuginfo (Dwarf binary).
+-m::
+--module=MODNAME::
+ Specify module name in which perf-probe searches probe points
+ or lines.
+
-s::
--source=PATH::
Specify path to kernel source.
@@ -57,6 +64,15 @@ OPTIONS
Show source code lines which can be probed. This needs an argument
which specifies a range of the source code. (see LINE SYNTAX for detail)
+-V::
+--vars=::
+ Show available local variables at given probe point. The argument
+ syntax is same as PROBE SYNTAX, but NO ARGs.
+
+--externs::
+ (Only for --vars) Show external defined variables in addition to local
+ variables.
+
-f::
--force::
Forcibly add events with existing name.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 3ee27dccfde9..a91f9f9e6e5c 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -83,6 +83,10 @@ OPTIONS
--call-graph::
Do call-graph (stack chain/backtrace) recording.
+-q::
+--quiet::
+ Don't print any message, useful for scripting.
+
-v::
--verbose::
Be more verbose (show counter open errors, etc).
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 199d5e19554f..2e000c068cc5 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -50,14 +50,17 @@ static struct {
bool list_events;
bool force_add;
bool show_lines;
+ bool show_vars;
+ bool show_ext_vars;
+ bool mod_events;
int nevents;
struct perf_probe_event events[MAX_PROBES];
struct strlist *dellist;
struct line_range line_range;
+ const char *target_module;
int max_probe_points;
} params;
-
/* Parse an event definition. Note that any error must die. */
static int parse_probe_event(const char *str)
{
@@ -92,6 +95,7 @@ static int parse_probe_event_argv(int argc, const char **argv)
len = 0;
for (i = 0; i < argc; i++)
len += sprintf(&buf[len], "%s ", argv[i]);
+ params.mod_events = true;
ret = parse_probe_event(buf);
free(buf);
return ret;
@@ -100,9 +104,10 @@ static int parse_probe_event_argv(int argc, const char **argv)
static int opt_add_probe_event(const struct option *opt __used,
const char *str, int unset __used)
{
- if (str)
+ if (str) {
+ params.mod_events = true;
return parse_probe_event(str);
- else
+ } else
return 0;
}
@@ -110,6 +115,7 @@ static int opt_del_probe_event(const struct option *opt __used,
const char *str, int unset __used)
{
if (str) {
+ params.mod_events = true;
if (!params.dellist)
params.dellist = strlist__new(true, NULL);
strlist__add(params.dellist, str);
@@ -130,6 +136,25 @@ static int opt_show_lines(const struct option *opt __used,
return ret;
}
+
+static int opt_show_vars(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ struct perf_probe_event *pev = &params.events[params.nevents];
+ int ret;
+
+ if (!str)
+ return 0;
+
+ ret = parse_probe_event(str);
+ if (!ret && pev->nargs != 0) {
+ pr_err(" Error: '--vars' doesn't accept arguments.\n");
+ return -EINVAL;
+ }
+ params.show_vars = true;
+
+ return ret;
+}
#endif
static const char * const probe_usage[] = {
@@ -138,7 +163,8 @@ static const char * const probe_usage[] = {
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
#ifdef DWARF_SUPPORT
- "perf probe --line 'LINEDESC'",
+ "perf probe [<options>] --line 'LINEDESC'",
+ "perf probe [<options>] --vars 'PROBEPOINT'",
#endif
NULL
};
@@ -180,10 +206,17 @@ static const struct option options[] = {
OPT_CALLBACK('L', "line", NULL,
"FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
"Show source code lines.", opt_show_lines),
+ OPT_CALLBACK('V', "vars", NULL,
+ "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT",
+ "Show accessible variables on PROBEDEF", opt_show_vars),
+ OPT_BOOLEAN('\0', "externs", &params.show_ext_vars,
+ "Show external variables too (with --vars only)"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING('s', "source", &symbol_conf.source_prefix,
"directory", "path to kernel source"),
+ OPT_STRING('m', "module", &params.target_module,
+ "modname", "target module name"),
#endif
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
@@ -217,7 +250,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
usage_with_options(probe_usage, options);
if (params.list_events) {
- if (params.nevents != 0 || params.dellist) {
+ if (params.mod_events) {
pr_err(" Error: Don't use --list with --add/--del.\n");
usage_with_options(probe_usage, options);
}
@@ -225,6 +258,10 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
pr_err(" Error: Don't use --list with --line.\n");
usage_with_options(probe_usage, options);
}
+ if (params.show_vars) {
+ pr_err(" Error: Don't use --list with --vars.\n");
+ usage_with_options(probe_usage, options);
+ }
ret = show_perf_probe_events();
if (ret < 0)
pr_err(" Error: Failed to show event list. (%d)\n",
@@ -234,17 +271,35 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
#ifdef DWARF_SUPPORT
if (params.show_lines) {
- if (params.nevents != 0 || params.dellist) {
- pr_warning(" Error: Don't use --line with"
- " --add/--del.\n");
+ if (params.mod_events) {
+ pr_err(" Error: Don't use --line with"
+ " --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
+ if (params.show_vars) {
+ pr_err(" Error: Don't use --line with --vars.\n");
usage_with_options(probe_usage, options);
}
- ret = show_line_range(&params.line_range);
+ ret = show_line_range(&params.line_range, params.target_module);
if (ret < 0)
pr_err(" Error: Failed to show lines. (%d)\n", ret);
return ret;
}
+ if (params.show_vars) {
+ if (params.mod_events) {
+ pr_err(" Error: Don't use --vars with"
+ " --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
+ ret = show_available_vars(params.events, params.nevents,
+ params.max_probe_points,
+ params.target_module,
+ params.show_ext_vars);
+ if (ret < 0)
+ pr_err(" Error: Failed to show vars. (%d)\n", ret);
+ return ret;
+ }
#endif
if (params.dellist) {
@@ -258,8 +313,9 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
if (params.nevents) {
ret = add_perf_probe_events(params.events, params.nevents,
- params.force_add,
- params.max_probe_points);
+ params.max_probe_points,
+ params.target_module,
+ params.force_add);
if (ret < 0) {
pr_err(" Error: Failed to add events. (%d)\n", ret);
return ret;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ff77b805de71..4e75583ddd6d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -353,7 +353,7 @@ try_again:
}
if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
- perror("Unable to read perf file descriptor\n");
+ perror("Unable to read perf file descriptor");
exit(-1);
}
@@ -626,7 +626,7 @@ static int __cmd_record(int argc, const char **argv)
nr_cpus = read_cpu_map(cpu_list);
if (nr_cpus < 1) {
- perror("failed to collect number of CPUs\n");
+ perror("failed to collect number of CPUs");
return -1;
}
@@ -761,6 +761,9 @@ static int __cmd_record(int argc, const char **argv)
}
}
+ if (quiet)
+ return 0;
+
fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
/*
@@ -820,6 +823,7 @@ static const struct option options[] = {
"do call-graph (stack chain/backtrace) recording"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
OPT_BOOLEAN('s', "stat", &inherit_stat,
"per thread counts"),
OPT_BOOLEAN('d', "data", &sample_address,
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 40a6a2992d15..2f8df45c4dcb 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -46,9 +46,6 @@ static struct scripting_ops *scripting_ops;
static void setup_scripting(void)
{
- /* make sure PERF_EXEC_PATH is set for scripts */
- perf_set_argv_exec_path(perf_exec_path());
-
setup_perl_scripting();
setup_python_scripting();
@@ -285,7 +282,7 @@ static int parse_scriptname(const struct option *opt __used,
script++;
} else {
script = str;
- ext = strchr(script, '.');
+ ext = strrchr(script, '.');
if (!ext) {
fprintf(stderr, "invalid script extension");
return -1;
@@ -593,6 +590,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
suffix = REPORT_SUFFIX;
}
+ /* make sure PERF_EXEC_PATH is set for scripts */
+ perf_set_argv_exec_path(perf_exec_path());
+
if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
char *record_script_path, *report_script_path;
int live_pipe[2];
@@ -625,12 +625,13 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
dup2(live_pipe[1], 1);
close(live_pipe[0]);
- __argv = malloc(5 * sizeof(const char *));
+ __argv = malloc(6 * sizeof(const char *));
__argv[0] = "/bin/sh";
__argv[1] = record_script_path;
- __argv[2] = "-o";
- __argv[3] = "-";
- __argv[4] = NULL;
+ __argv[2] = "-q";
+ __argv[3] = "-o";
+ __argv[4] = "-";
+ __argv[5] = NULL;
execvp("/bin/sh", (char **)__argv);
exit(-1);
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report
index e3a5e55d54ff..4028d92dc4ae 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-report
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-report
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
shift
fi
fi
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/failed-syscalls.pl $comm
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report
index d83070b7eeb5..ba25f4d41fb0 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-report
+++ b/tools/perf/scripts/perl/bin/rw-by-file-report
@@ -7,7 +7,7 @@ if [ $# -lt 1 ] ; then
fi
comm=$1
shift
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-file.pl $comm
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report
index 7ef46983f62f..641a3f5d085c 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-report
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-report
@@ -1,6 +1,6 @@
#!/bin/bash
# description: system-wide r/w activity
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-pid.pl
diff --git a/tools/perf/scripts/perl/bin/rwtop-report b/tools/perf/scripts/perl/bin/rwtop-report
index 93e698cd3f38..4918dba77021 100644
--- a/tools/perf/scripts/perl/bin/rwtop-report
+++ b/tools/perf/scripts/perl/bin/rwtop-report
@@ -17,7 +17,7 @@ if [ "$n_args" -gt 0 ] ; then
interval=$1
shift
fi
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/rwtop.pl $interval
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rwtop.pl $interval
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report
index a0d898f9ca1d..49052ebcb632 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-report
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-report
@@ -1,6 +1,6 @@
#!/bin/bash
# description: system-wide min/max/avg wakeup latency
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/wakeup-latency.pl
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report
index 35081132ef97..df0c65f4ca93 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-report
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-report
@@ -1,6 +1,6 @@
#!/bin/bash
# description: workqueue stats (ins/exe/create/destroy)
-perf trace $@ -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
index 9689bc0acd9f..13cc02b5893a 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -6,6 +6,14 @@
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
+import errno, os
+
+FUTEX_WAIT = 0
+FUTEX_WAKE = 1
+FUTEX_PRIVATE_FLAG = 128
+FUTEX_CLOCK_REALTIME = 256
+FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
+
NSECS_PER_SEC = 1000000000
def avg(total, n):
@@ -24,5 +32,55 @@ def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
+def add_stats(dict, key, value):
+ if not dict.has_key(key):
+ dict[key] = (value, value, value, 1)
+ else:
+ min, max, avg, count = dict[key]
+ if value < min:
+ min = value
+ if value > max:
+ max = value
+ avg = (avg + value) / 2
+ dict[key] = (min, max, avg, count + 1)
+
def clear_term():
print("\x1b[H\x1b[2J")
+
+audit_package_warned = False
+
+try:
+ import audit
+ machine_to_id = {
+ 'x86_64': audit.MACH_86_64,
+ 'alpha' : audit.MACH_ALPHA,
+ 'ia64' : audit.MACH_IA64,
+ 'ppc' : audit.MACH_PPC,
+ 'ppc64' : audit.MACH_PPC64,
+ 's390' : audit.MACH_S390,
+ 's390x' : audit.MACH_S390X,
+ 'i386' : audit.MACH_X86,
+ 'i586' : audit.MACH_X86,
+ 'i686' : audit.MACH_X86,
+ }
+ try:
+ machine_to_id['armeb'] = audit.MACH_ARMEB
+ except:
+ pass
+ machine_id = machine_to_id[os.uname()[4]]
+except:
+ if not audit_package_warned:
+ audit_package_warned = True
+ print "Install the audit-libs-python package to get syscall names"
+
+def syscall_name(id):
+ try:
+ return audit.audit_syscall_to_name(id, machine_id)
+ except:
+ return str(id)
+
+def strerror(nr):
+ try:
+ return errno.errorcode[abs(nr)]
+ except:
+ return "Unknown %d errno" % nr
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
index 30293545fcc2..03587021463d 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
shift
fi
fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/failed-syscalls-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
new file mode 100644
index 000000000000..5ecbb433caf4
--- /dev/null
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-report b/tools/perf/scripts/python/bin/futex-contention-report
new file mode 100644
index 000000000000..c8268138fb7e
--- /dev/null
+++ b/tools/perf/scripts/python/bin/futex-contention-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+# description: futext contention measurement
+
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/futex-contention.py
diff --git a/tools/perf/scripts/python/bin/netdev-times-report b/tools/perf/scripts/python/bin/netdev-times-report
index c3d0a638123d..4ad361b31249 100644
--- a/tools/perf/scripts/python/bin/netdev-times-report
+++ b/tools/perf/scripts/python/bin/netdev-times-report
@@ -2,4 +2,4 @@
# description: display a process of packet and processing time
# args: [tx] [rx] [dev=] [debug]
-perf trace -s ~/libexec/perf-core/scripts/python/netdev-times.py $@
+perf trace -s "$PERF_EXEC_PATH"/scripts/python/netdev-times.py $@
diff --git a/tools/perf/scripts/python/bin/sched-migration-report b/tools/perf/scripts/python/bin/sched-migration-report
index 61d05f72e443..df1791f07c24 100644
--- a/tools/perf/scripts/python/bin/sched-migration-report
+++ b/tools/perf/scripts/python/bin/sched-migration-report
@@ -1,3 +1,3 @@
#!/bin/bash
# description: sched migration overview
-perf trace $@ -s ~/libexec/perf-core/scripts/python/sched-migration.py
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
diff --git a/tools/perf/scripts/python/bin/sctop-report b/tools/perf/scripts/python/bin/sctop-report
index b01c842ae7b4..36b409c05e50 100644
--- a/tools/perf/scripts/python/bin/sctop-report
+++ b/tools/perf/scripts/python/bin/sctop-report
@@ -21,4 +21,4 @@ elif [ "$n_args" -gt 0 ] ; then
interval=$1
shift
fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/sctop.py $comm $interval
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sctop.py $comm $interval
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
index 9e9d8ddd72ce..4eb88c9fc83c 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
shift
fi
fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report
index dc076b618796..cb2f9c5cf17e 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-report
+++ b/tools/perf/scripts/python/bin/syscall-counts-report
@@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then
shift
fi
fi
-perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts.py $comm
+perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index 0ca02278fe69..acd7848717b3 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -13,21 +13,26 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
from perf_trace_context import *
from Core import *
+from Util import *
-usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
+usage = "perf trace -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
+for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
- for_comm = sys.argv[1]
+ try:
+ for_pid = int(sys.argv[1])
+ except:
+ for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
- pass
+ print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
@@ -35,9 +40,9 @@ def trace_end():
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
- if for_comm is not None:
- if common_comm != for_comm:
- return
+ if (for_comm and common_comm != for_comm) or \
+ (for_pid and common_pid != for_pid ):
+ return
if ret < 0:
try:
@@ -62,7 +67,7 @@ def print_error_totals():
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
- print " syscall: %-16d\n" % (id),
+ print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
- print " err = %-20d %10d\n" % (ret, val),
+ print " err = %-20s %10d\n" % (strerror(ret), val),
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
new file mode 100644
index 000000000000..11e70a388d41
--- /dev/null
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -0,0 +1,50 @@
+# futex contention
+# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Translation of:
+#
+# http://sourceware.org/systemtap/wiki/WSFutexContention
+#
+# to perf python scripting.
+#
+# Measures futex contention
+
+import os, sys
+sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+from Util import *
+
+process_names = {}
+thread_thislock = {}
+thread_blocktime = {}
+
+lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
+process_names = {} # long-lived pid-to-execname mapping
+
+def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
+ nr, uaddr, op, val, utime, uaddr2, val3):
+ cmd = op & FUTEX_CMD_MASK
+ if cmd != FUTEX_WAIT:
+ return # we don't care about originators of WAKE events
+
+ process_names[tid] = comm
+ thread_thislock[tid] = uaddr
+ thread_blocktime[tid] = nsecs(s, ns)
+
+def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
+ nr, ret):
+ if thread_blocktime.has_key(tid):
+ elapsed = nsecs(s, ns) - thread_blocktime[tid]
+ add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
+ del thread_blocktime[tid]
+ del thread_thislock[tid]
+
+def trace_begin():
+ print "Press control+C to stop and show the summary"
+
+def trace_end():
+ for (tid, lock) in lock_waits:
+ min, max, avg, count = lock_waits[tid, lock]
+ print "%s[%d] lock %x contended %d times, %d avg ns" % \
+ (process_names[tid], tid, lock, count, avg)
+
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 6cafad40c296..7a6ec2c7d8ab 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -8,10 +8,7 @@
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
-import thread
-import time
-import os
-import sys
+import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -20,7 +17,7 @@ from perf_trace_context import *
from Core import *
from Util import *
-usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
+usage = "perf trace -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
@@ -71,7 +68,7 @@ def print_syscall_totals(interval):
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
- print "%-40d %10d\n" % (id, val),
+ print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index af722d6a4b3f..d1ee3ec10cf2 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -5,29 +5,33 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
-import os
-import sys
+import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
+from Util import syscall_name
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
+for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
- for_comm = sys.argv[1]
+ try:
+ for_pid = int(sys.argv[1])
+ except:
+ for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
- pass
+ print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
@@ -35,9 +39,10 @@ def trace_end():
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
- if for_comm is not None:
- if common_comm != for_comm:
- return
+
+ if (for_comm and common_comm != for_comm) or \
+ (for_pid and common_pid != for_pid ):
+ return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
@@ -61,4 +66,4 @@ def print_syscall_totals():
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
- print " %-38d %10d\n" % (id, val),
+ print " %-38s %10d\n" % (syscall_name(id), val),
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index f977e85ff049..ea183dc82d29 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -13,6 +13,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
from perf_trace_context import *
from Core import *
+from Util import syscall_name
usage = "perf trace -s syscall-counts.py [comm]\n";
@@ -27,7 +28,7 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- pass
+ print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
@@ -55,4 +56,4 @@ def print_syscall_totals():
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
- print "%-40d %10d\n" % (id, val),
+ print "%-40s %10d\n" % (syscall_name(id), val),
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index f9c7e3ad1aa7..c8d81b00089d 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -12,8 +12,8 @@
#include "debug.h"
#include "util.h"
-int verbose = 0;
-bool dump_trace = false;
+int verbose;
+bool dump_trace = false, quiet = false;
int eprintf(int level, const char *fmt, ...)
{
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 7a17ee061bcb..7b514082bbaf 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -6,7 +6,7 @@
#include "event.h"
extern int verbose;
-extern bool dump_trace;
+extern bool quiet, dump_trace;
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
void trace_event(event_t *event);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 78575796d5f3..b397c0383728 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -215,6 +215,16 @@ struct symbol *map_groups__find_function_by_name(struct map_groups *self,
return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
}
+static inline
+struct symbol *machine__find_kernel_function_by_name(struct machine *self,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_function_by_name(&self->kmaps, name, mapp,
+ filter);
+}
+
int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
int verbose, FILE *fp);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index fcc16e4349df..3b6a5297bf16 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -74,10 +74,9 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
static struct machine machine;
-/* Initialize symbol maps and path of vmlinux */
+/* Initialize symbol maps and path of vmlinux/modules */
static int init_vmlinux(void)
{
- struct dso *kernel;
int ret;
symbol_conf.sort_by_name = true;
@@ -91,33 +90,61 @@ static int init_vmlinux(void)
goto out;
}
- ret = machine__init(&machine, "/", 0);
+ ret = machine__init(&machine, "", HOST_KERNEL_ID);
if (ret < 0)
goto out;
- kernel = dso__new_kernel(symbol_conf.vmlinux_name);
- if (kernel == NULL)
- die("Failed to create kernel dso.");
-
- ret = __machine__create_kernel_maps(&machine, kernel);
- if (ret < 0)
- pr_debug("Failed to create kernel maps.\n");
-
+ if (machine__create_kernel_maps(&machine) < 0) {
+ pr_debug("machine__create_kernel_maps ");
+ goto out;
+ }
out:
if (ret < 0)
pr_warning("Failed to init vmlinux path.\n");
return ret;
}
+static struct symbol *__find_kernel_function_by_name(const char *name,
+ struct map **mapp)
+{
+ return machine__find_kernel_function_by_name(&machine, name, mapp,
+ NULL);
+}
+
+const char *kernel_get_module_path(const char *module)
+{
+ struct dso *dso;
+
+ if (module) {
+ list_for_each_entry(dso, &machine.kernel_dsos, node) {
+ if (strncmp(dso->short_name + 1, module,
+ dso->short_name_len - 2) == 0)
+ goto found;
+ }
+ pr_debug("Failed to find module %s.\n", module);
+ return NULL;
+ } else {
+ dso = machine.vmlinux_maps[MAP__FUNCTION]->dso;
+ if (dso__load_vmlinux_path(dso,
+ machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
+ pr_debug("Failed to load kernel map.\n");
+ return NULL;
+ }
+ }
+found:
+ return dso->long_name;
+}
+
#ifdef DWARF_SUPPORT
-static int open_vmlinux(void)
+static int open_vmlinux(const char *module)
{
- if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
- pr_debug("Failed to load kernel map.\n");
- return -EINVAL;
+ const char *path = kernel_get_module_path(module);
+ if (!path) {
+ pr_err("Failed to find path of %s module", module ?: "kernel");
+ return -ENOENT;
}
- pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name);
- return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
+ pr_debug("Try to open %s\n", path);
+ return open(path, O_RDONLY);
}
/*
@@ -125,20 +152,19 @@ static int open_vmlinux(void)
* Currently only handles kprobes.
*/
static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
- struct perf_probe_point *pp)
+ struct perf_probe_point *pp)
{
struct symbol *sym;
- int fd, ret = -ENOENT;
+ struct map *map;
+ u64 addr;
+ int ret = -ENOENT;
- sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
- tp->symbol, NULL);
+ sym = __find_kernel_function_by_name(tp->symbol, &map);
if (sym) {
- fd = open_vmlinux();
- if (fd >= 0) {
- ret = find_perf_probe_point(fd,
- sym->start + tp->offset, pp);
- close(fd);
- }
+ addr = map->unmap_ip(map, sym->start + tp->offset);
+ pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
+ tp->offset, addr);
+ ret = find_perf_probe_point((unsigned long)addr, pp);
}
if (ret <= 0) {
pr_debug("Failed to find corresponding probes from "
@@ -156,12 +182,12 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
/* Try to find perf_probe_event with debuginfo */
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
- int max_tevs)
+ int max_tevs, const char *module)
{
bool need_dwarf = perf_probe_event_need_dwarf(pev);
int fd, ntevs;
- fd = open_vmlinux();
+ fd = open_vmlinux(module);
if (fd < 0) {
if (need_dwarf) {
pr_warning("Failed to open debuginfo file.\n");
@@ -300,7 +326,7 @@ error:
* Show line-range always requires debuginfo to find source file and
* line number.
*/
-int show_line_range(struct line_range *lr)
+int show_line_range(struct line_range *lr, const char *module)
{
int l = 1;
struct line_node *ln;
@@ -313,7 +339,7 @@ int show_line_range(struct line_range *lr)
if (ret < 0)
return ret;
- fd = open_vmlinux();
+ fd = open_vmlinux(module);
if (fd < 0) {
pr_warning("Failed to open debuginfo file.\n");
return fd;
@@ -378,11 +404,84 @@ end:
return ret;
}
+static int show_available_vars_at(int fd, struct perf_probe_event *pev,
+ int max_vls, bool externs)
+{
+ char *buf;
+ int ret, i;
+ struct str_node *node;
+ struct variable_list *vls = NULL, *vl;
+
+ buf = synthesize_perf_probe_point(&pev->point);
+ if (!buf)
+ return -EINVAL;
+ pr_debug("Searching variables at %s\n", buf);
+
+ ret = find_available_vars_at(fd, pev, &vls, max_vls, externs);
+ if (ret > 0) {
+ /* Some variables were found */
+ fprintf(stdout, "Available variables at %s\n", buf);
+ for (i = 0; i < ret; i++) {
+ vl = &vls[i];
+ /*
+ * A probe point might be converted to
+ * several trace points.
+ */
+ fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
+ vl->point.offset);
+ free(vl->point.symbol);
+ if (vl->vars) {
+ strlist__for_each(node, vl->vars)
+ fprintf(stdout, "\t\t%s\n", node->s);
+ strlist__delete(vl->vars);
+ } else
+ fprintf(stdout, "(No variables)\n");
+ }
+ free(vls);
+ } else
+ pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+
+ free(buf);
+ return ret;
+}
+
+/* Show available variables on given probe point */
+int show_available_vars(struct perf_probe_event *pevs, int npevs,
+ int max_vls, const char *module, bool externs)
+{
+ int i, fd, ret = 0;
+
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ fd = open_vmlinux(module);
+ if (fd < 0) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return fd;
+ }
+
+ setup_pager();
+
+ for (i = 0; i < npevs && ret >= 0; i++)
+ ret = show_available_vars_at(fd, &pevs[i], max_vls, externs);
+
+ close(fd);
+ return ret;
+}
+
#else /* !DWARF_SUPPORT */
static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
- struct perf_probe_point *pp)
+ struct perf_probe_point *pp)
{
+ struct symbol *sym;
+
+ sym = __find_kernel_function_by_name(tp->symbol, NULL);
+ if (!sym) {
+ pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
+ return -ENOENT;
+ }
pp->function = strdup(tp->symbol);
if (pp->function == NULL)
return -ENOMEM;
@@ -394,7 +493,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs __unused,
- int max_tevs __unused)
+ int max_tevs __unused, const char *mod __unused)
{
if (perf_probe_event_need_dwarf(pev)) {
pr_warning("Debuginfo-analysis is not supported.\n");
@@ -403,12 +502,19 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
return 0;
}
-int show_line_range(struct line_range *lr __unused)
+int show_line_range(struct line_range *lr __unused, const char *module __unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
+int show_available_vars(struct perf_probe_event *pevs __unused,
+ int npevs __unused, int max_vls __unused,
+ const char *module __unused, bool externs __unused)
+{
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+}
#endif
int parse_line_range_desc(const char *arg, struct line_range *lr)
@@ -1087,7 +1193,7 @@ error:
}
static int convert_to_perf_probe_event(struct probe_trace_event *tev,
- struct perf_probe_event *pev)
+ struct perf_probe_event *pev)
{
char buf[64] = "";
int i, ret;
@@ -1516,14 +1622,14 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
static int convert_to_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
- int max_tevs)
+ int max_tevs, const char *module)
{
struct symbol *sym;
int ret = 0, i;
struct probe_trace_event *tev;
/* Convert perf_probe_event with debuginfo */
- ret = try_to_find_probe_trace_events(pev, tevs, max_tevs);
+ ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module);
if (ret != 0)
return ret;
@@ -1572,8 +1678,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
}
/* Currently just checking function name from symbol map */
- sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
- tev->point.symbol, NULL);
+ sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
if (!sym) {
pr_warning("Kernel symbol \'%s\' not found.\n",
tev->point.symbol);
@@ -1596,7 +1701,7 @@ struct __event_package {
};
int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
- bool force_add, int max_tevs)
+ int max_tevs, const char *module, bool force_add)
{
int i, j, ret;
struct __event_package *pkgs;
@@ -1617,7 +1722,9 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
pkgs[i].pev = &pevs[i];
/* Convert with or without debuginfo */
ret = convert_to_probe_trace_events(pkgs[i].pev,
- &pkgs[i].tevs, max_tevs);
+ &pkgs[i].tevs,
+ max_tevs,
+ module);
if (ret < 0)
goto end;
pkgs[i].ntevs = ret;
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 5af39243a25b..5accbedfea37 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -90,6 +90,12 @@ struct line_range {
struct list_head line_list; /* Visible lines */
};
+/* List of variables */
+struct variable_list {
+ struct probe_trace_point point; /* Actual probepoint */
+ struct strlist *vars; /* Available variables */
+};
+
/* Command string to events */
extern int parse_perf_probe_command(const char *cmd,
struct perf_probe_event *pev);
@@ -109,12 +115,18 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev);
/* Command string to line-range */
extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
+/* Internal use: Return kernel/module path */
+extern const char *kernel_get_module_path(const char *module);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
- bool force_add, int max_probe_points);
+ int max_probe_points, const char *module,
+ bool force_add);
extern int del_perf_probe_events(struct strlist *dellist);
extern int show_perf_probe_events(void);
-extern int show_line_range(struct line_range *lr);
+extern int show_line_range(struct line_range *lr, const char *module);
+extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
+ int max_probe_points, const char *module,
+ bool externs);
/* Maximum index number of event-name postfix */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 32b81f707ff5..3991d73d1cff 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -116,6 +116,101 @@ static void line_list__free(struct list_head *head)
}
}
+/* Dwarf FL wrappers */
+
+static int __linux_kernel_find_elf(Dwfl_Module *mod,
+ void **userdata,
+ const char *module_name,
+ Dwarf_Addr base,
+ char **file_name, Elf **elfp)
+{
+ int fd;
+ const char *path = kernel_get_module_path(module_name);
+
+ if (path) {
+ fd = open(path, O_RDONLY);
+ if (fd >= 0) {
+ *file_name = strdup(path);
+ return fd;
+ }
+ }
+ /* If failed, try to call standard method */
+ return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
+ file_name, elfp);
+}
+
+static char *debuginfo_path; /* Currently dummy */
+
+static const Dwfl_Callbacks offline_callbacks = {
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .debuginfo_path = &debuginfo_path,
+
+ .section_address = dwfl_offline_section_address,
+
+ /* We use this table for core files too. */
+ .find_elf = dwfl_build_id_find_elf,
+};
+
+static const Dwfl_Callbacks kernel_callbacks = {
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .debuginfo_path = &debuginfo_path,
+
+ .find_elf = __linux_kernel_find_elf,
+ .section_address = dwfl_linux_kernel_module_section_address,
+};
+
+/* Get a Dwarf from offline image */
+static Dwarf *dwfl_init_offline_dwarf(int fd, Dwfl **dwflp, Dwarf_Addr *bias)
+{
+ Dwfl_Module *mod;
+ Dwarf *dbg = NULL;
+
+ if (!dwflp)
+ return NULL;
+
+ *dwflp = dwfl_begin(&offline_callbacks);
+ if (!*dwflp)
+ return NULL;
+
+ mod = dwfl_report_offline(*dwflp, "", "", fd);
+ if (!mod)
+ goto error;
+
+ dbg = dwfl_module_getdwarf(mod, bias);
+ if (!dbg) {
+error:
+ dwfl_end(*dwflp);
+ *dwflp = NULL;
+ }
+ return dbg;
+}
+
+/* Get a Dwarf from live kernel image */
+static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp,
+ Dwarf_Addr *bias)
+{
+ Dwarf *dbg;
+
+ if (!dwflp)
+ return NULL;
+
+ *dwflp = dwfl_begin(&kernel_callbacks);
+ if (!*dwflp)
+ return NULL;
+
+ /* Load the kernel dwarves: Don't care the result here */
+ dwfl_linux_kernel_report_kernel(*dwflp);
+ dwfl_linux_kernel_report_modules(*dwflp);
+
+ dbg = dwfl_addrdwarf(*dwflp, addr, bias);
+ /* Here, check whether we could get a real dwarf */
+ if (!dbg) {
+ dwfl_end(*dwflp);
+ *dwflp = NULL;
+ }
+ return dbg;
+}
+
/* Dwarf wrappers */
/* Find the realpath of the target file. */
@@ -160,26 +255,44 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
return name ? (strcmp(tname, name) == 0) : false;
}
-/* Get type die, but skip qualifiers and typedef */
-static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+/* Get type die */
+static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
{
Dwarf_Attribute attr;
+
+ if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
+ dwarf_formref_die(&attr, die_mem))
+ return die_mem;
+ else
+ return NULL;
+}
+
+/* Get a type die, but skip qualifiers */
+static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
int tag;
do {
- if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL ||
- dwarf_formref_die(&attr, die_mem) == NULL)
- return NULL;
-
- tag = dwarf_tag(die_mem);
- vr_die = die_mem;
+ vr_die = die_get_type(vr_die, die_mem);
+ if (!vr_die)
+ break;
+ tag = dwarf_tag(vr_die);
} while (tag == DW_TAG_const_type ||
tag == DW_TAG_restrict_type ||
tag == DW_TAG_volatile_type ||
- tag == DW_TAG_shared_type ||
- tag == DW_TAG_typedef);
+ tag == DW_TAG_shared_type);
+
+ return vr_die;
+}
- return die_mem;
+/* Get a type die, but skip qualifiers and typedef */
+static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ do {
+ vr_die = __die_get_real_type(vr_die, die_mem);
+ } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
+
+ return vr_die;
}
static bool die_is_signed_type(Dwarf_Die *tp_die)
@@ -320,25 +433,35 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
}
+struct __find_variable_param {
+ const char *name;
+ Dwarf_Addr addr;
+};
+
static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
{
- const char *name = data;
+ struct __find_variable_param *fvp = data;
int tag;
tag = dwarf_tag(die_mem);
if ((tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) &&
- die_compare_name(die_mem, name))
+ die_compare_name(die_mem, fvp->name))
return DIE_FIND_CB_FOUND;
- return DIE_FIND_CB_CONTINUE;
+ if (dwarf_haspc(die_mem, fvp->addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
}
-/* Find a variable called 'name' */
-static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
- Dwarf_Die *die_mem)
+/* Find a variable called 'name' at given address */
+static Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Addr addr, Dwarf_Die *die_mem)
{
- return die_find_child(sp_die, __die_find_variable_cb, (void *)name,
+ struct __find_variable_param fvp = { .name = name, .addr = addr};
+
+ return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
die_mem);
}
@@ -361,6 +484,60 @@ static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
die_mem);
}
+/* Get the name of given variable DIE */
+static int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
+{
+ Dwarf_Die type;
+ int tag, ret, ret2;
+ const char *tmp = "";
+
+ if (__die_get_real_type(vr_die, &type) == NULL)
+ return -ENOENT;
+
+ tag = dwarf_tag(&type);
+ if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
+ tmp = "*";
+ else if (tag == DW_TAG_subroutine_type) {
+ /* Function pointer */
+ ret = snprintf(buf, len, "(function_type)");
+ return (ret >= len) ? -E2BIG : ret;
+ } else {
+ if (!dwarf_diename(&type))
+ return -ENOENT;
+ if (tag == DW_TAG_union_type)
+ tmp = "union ";
+ else if (tag == DW_TAG_structure_type)
+ tmp = "struct ";
+ /* Write a base name */
+ ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
+ return (ret >= len) ? -E2BIG : ret;
+ }
+ ret = die_get_typename(&type, buf, len);
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
+/* Get the name and type of given variable DIE, stored as "type\tname" */
+static int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+{
+ int ret, ret2;
+
+ ret = die_get_typename(vr_die, buf, len);
+ if (ret < 0) {
+ pr_debug("Failed to get type, make it unknown.\n");
+ ret = snprintf(buf, len, "(unknown_type)");
+ }
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "\t%s",
+ dwarf_diename(vr_die));
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
/*
* Probe finder related functions
*/
@@ -374,8 +551,13 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
return ref;
}
-/* Show a location */
-static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
+/*
+ * Convert a location into trace_arg.
+ * If tvar == NULL, this just checks variable can be converted.
+ */
+static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
+ Dwarf_Op *fb_ops,
+ struct probe_trace_arg *tvar)
{
Dwarf_Attribute attr;
Dwarf_Op *op;
@@ -384,20 +566,23 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
Dwarf_Word offs = 0;
bool ref = false;
const char *regs;
- struct probe_trace_arg *tvar = pf->tvar;
int ret;
+ if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL)
+ goto static_var;
+
/* TODO: handle more than 1 exprs */
if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL ||
- dwarf_getlocation_addr(&attr, pf->addr, &op, &nops, 1) <= 0 ||
+ dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 ||
nops == 0) {
/* TODO: Support const_value */
- pr_err("Failed to find the location of %s at this address.\n"
- " Perhaps, it has been optimized out.\n", pf->pvar->var);
return -ENOENT;
}
if (op->atom == DW_OP_addr) {
+static_var:
+ if (!tvar)
+ return 0;
/* Static variables on memory (not stack), make @varname */
ret = strlen(dwarf_diename(vr_die));
tvar->value = zalloc(ret + 2);
@@ -412,14 +597,11 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
/* If this is based on frame buffer, set the offset */
if (op->atom == DW_OP_fbreg) {
- if (pf->fb_ops == NULL) {
- pr_warning("The attribute of frame base is not "
- "supported.\n");
+ if (fb_ops == NULL)
return -ENOTSUP;
- }
ref = true;
offs = op->number;
- op = &pf->fb_ops[0];
+ op = &fb_ops[0];
}
if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
@@ -435,13 +617,18 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
} else if (op->atom == DW_OP_regx) {
regn = op->number;
} else {
- pr_warning("DW_OP %x is not supported.\n", op->atom);
+ pr_debug("DW_OP %x is not supported.\n", op->atom);
return -ENOTSUP;
}
+ if (!tvar)
+ return 0;
+
regs = get_arch_regstr(regn);
if (!regs) {
- pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn);
+ /* This should be a bug in DWARF or this tool */
+ pr_warning("Mapping for DWARF register number %u "
+ "missing on this architecture.", regn);
return -ERANGE;
}
@@ -666,8 +853,14 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
pr_debug("Converting variable %s into trace event.\n",
dwarf_diename(vr_die));
- ret = convert_variable_location(vr_die, pf);
- if (ret == 0 && pf->pvar->field) {
+ ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
+ pf->tvar);
+ if (ret == -ENOENT)
+ pr_err("Failed to find the location of %s at this address.\n"
+ " Perhaps, it has been optimized out.\n", pf->pvar->var);
+ else if (ret == -ENOTSUP)
+ pr_err("Sorry, we don't support this variable location yet.\n");
+ else if (pf->pvar->field) {
ret = convert_variable_fields(vr_die, pf->pvar->var,
pf->pvar->field, &pf->tvar->ref,
&die_mem);
@@ -722,56 +915,39 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
pr_debug("Searching '%s' variable in context.\n",
pf->pvar->var);
/* Search child die for local variables and parameters. */
- if (die_find_variable(sp_die, pf->pvar->var, &vr_die))
+ if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die))
ret = convert_variable(&vr_die, pf);
else {
/* Search upper class */
nscopes = dwarf_getscopes_die(sp_die, &scopes);
- if (nscopes > 0) {
- ret = dwarf_getscopevar(scopes, nscopes, pf->pvar->var,
- 0, NULL, 0, 0, &vr_die);
- if (ret >= 0)
+ while (nscopes-- > 1) {
+ pr_debug("Searching variables in %s\n",
+ dwarf_diename(&scopes[nscopes]));
+ /* We should check this scope, so give dummy address */
+ if (die_find_variable_at(&scopes[nscopes],
+ pf->pvar->var, 0,
+ &vr_die)) {
ret = convert_variable(&vr_die, pf);
- else
- ret = -ENOENT;
+ goto found;
+ }
+ }
+ if (scopes)
free(scopes);
- } else
- ret = -ENOENT;
+ ret = -ENOENT;
}
+found:
if (ret < 0)
pr_warning("Failed to find '%s' in this function.\n",
pf->pvar->var);
return ret;
}
-/* Show a probe point to output buffer */
-static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
+/* Convert subprogram DIE to trace point */
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
+ bool retprobe, struct probe_trace_point *tp)
{
- struct probe_trace_event *tev;
Dwarf_Addr eaddr;
- Dwarf_Die die_mem;
const char *name;
- int ret, i;
- Dwarf_Attribute fb_attr;
- size_t nops;
-
- if (pf->ntevs == pf->max_tevs) {
- pr_warning("Too many( > %d) probe point found.\n",
- pf->max_tevs);
- return -ERANGE;
- }
- tev = &pf->tevs[pf->ntevs++];
-
- /* If no real subprogram, find a real one */
- if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
- sp_die = die_find_real_subprogram(&pf->cu_die,
- pf->addr, &die_mem);
- if (!sp_die) {
- pr_warning("Failed to find probe point in any "
- "functions.\n");
- return -ENOENT;
- }
- }
/* Copy the name of probe point */
name = dwarf_diename(sp_die);
@@ -781,26 +957,45 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
dwarf_diename(sp_die));
return -ENOENT;
}
- tev->point.symbol = strdup(name);
- if (tev->point.symbol == NULL)
+ tp->symbol = strdup(name);
+ if (tp->symbol == NULL)
return -ENOMEM;
- tev->point.offset = (unsigned long)(pf->addr - eaddr);
+ tp->offset = (unsigned long)(paddr - eaddr);
} else
/* This function has no name. */
- tev->point.offset = (unsigned long)pf->addr;
+ tp->offset = (unsigned long)paddr;
/* Return probe must be on the head of a subprogram */
- if (pf->pev->point.retprobe) {
- if (tev->point.offset != 0) {
+ if (retprobe) {
+ if (eaddr != paddr) {
pr_warning("Return probe must be on the head of"
" a real function\n");
return -EINVAL;
}
- tev->point.retprobe = true;
+ tp->retprobe = true;
}
- pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
- tev->point.offset);
+ return 0;
+}
+
+/* Call probe_finder callback with real subprogram DIE */
+static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ Dwarf_Die die_mem;
+ Dwarf_Attribute fb_attr;
+ size_t nops;
+ int ret;
+
+ /* If no real subprogram, find a real one */
+ if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
+ sp_die = die_find_real_subprogram(&pf->cu_die,
+ pf->addr, &die_mem);
+ if (!sp_die) {
+ pr_warning("Failed to find probe point in any "
+ "functions.\n");
+ return -ENOENT;
+ }
+ }
/* Get the frame base attribute/ops */
dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
@@ -820,22 +1015,13 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
#endif
}
- /* Find each argument */
- tev->nargs = pf->pev->nargs;
- tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
- if (tev->args == NULL)
- return -ENOMEM;
- for (i = 0; i < pf->pev->nargs; i++) {
- pf->pvar = &pf->pev->args[i];
- pf->tvar = &tev->args[i];
- ret = find_variable(sp_die, pf);
- if (ret != 0)
- return ret;
- }
+ /* Call finder's callback handler */
+ ret = pf->callback(sp_die, pf);
/* *pf->fb_ops will be cached in libdw. Don't free it. */
pf->fb_ops = NULL;
- return 0;
+
+ return ret;
}
/* Find probe point from its line number */
@@ -871,7 +1057,7 @@ static int find_probe_point_by_line(struct probe_finder *pf)
(int)i, lineno, (uintmax_t)addr);
pf->addr = addr;
- ret = convert_probe_point(NULL, pf);
+ ret = call_probe_finder(NULL, pf);
/* Continuing, because target line might be inlined. */
}
return ret;
@@ -984,7 +1170,7 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
(int)i, lineno, (unsigned long long)addr);
pf->addr = addr;
- ret = convert_probe_point(sp_die, pf);
+ ret = call_probe_finder(sp_die, pf);
/* Continuing, because target line might be inlined. */
}
/* TODO: deallocate lines, but how? */
@@ -1019,7 +1205,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
pr_debug("found inline addr: 0x%jx\n",
(uintmax_t)pf->addr);
- param->retval = convert_probe_point(in_die, pf);
+ param->retval = call_probe_finder(in_die, pf);
if (param->retval < 0)
return DWARF_CB_ABORT;
}
@@ -1057,7 +1243,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
}
pf->addr += pp->offset;
/* TODO: Check the address in this function */
- param->retval = convert_probe_point(sp_die, pf);
+ param->retval = call_probe_finder(sp_die, pf);
}
} else {
struct dwarf_callback_param _param = {.data = (void *)pf,
@@ -1079,90 +1265,276 @@ static int find_probe_point_by_func(struct probe_finder *pf)
return _param.retval;
}
-/* Find probe_trace_events specified by perf_probe_event from debuginfo */
-int find_probe_trace_events(int fd, struct perf_probe_event *pev,
- struct probe_trace_event **tevs, int max_tevs)
+/* Find probe points from debuginfo */
+static int find_probes(int fd, struct probe_finder *pf)
{
- struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs};
- struct perf_probe_point *pp = &pev->point;
+ struct perf_probe_point *pp = &pf->pev->point;
Dwarf_Off off, noff;
size_t cuhl;
Dwarf_Die *diep;
- Dwarf *dbg;
+ Dwarf *dbg = NULL;
+ Dwfl *dwfl;
+ Dwarf_Addr bias; /* Currently ignored */
int ret = 0;
- pf.tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
- if (pf.tevs == NULL)
- return -ENOMEM;
- *tevs = pf.tevs;
- pf.ntevs = 0;
-
- dbg = dwarf_begin(fd, DWARF_C_READ);
+ dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
if (!dbg) {
pr_warning("No dwarf info found in the vmlinux - "
"please rebuild with CONFIG_DEBUG_INFO=y.\n");
- free(pf.tevs);
- *tevs = NULL;
return -EBADF;
}
#if _ELFUTILS_PREREQ(0, 142)
/* Get the call frame information from this dwarf */
- pf.cfi = dwarf_getcfi(dbg);
+ pf->cfi = dwarf_getcfi(dbg);
#endif
off = 0;
- line_list__init(&pf.lcache);
+ line_list__init(&pf->lcache);
/* Loop on CUs (Compilation Unit) */
while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
ret >= 0) {
/* Get the DIE(Debugging Information Entry) of this CU */
- diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
+ diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die);
if (!diep)
continue;
/* Check if target file is included. */
if (pp->file)
- pf.fname = cu_find_realpath(&pf.cu_die, pp->file);
+ pf->fname = cu_find_realpath(&pf->cu_die, pp->file);
else
- pf.fname = NULL;
+ pf->fname = NULL;
- if (!pp->file || pf.fname) {
+ if (!pp->file || pf->fname) {
if (pp->function)
- ret = find_probe_point_by_func(&pf);
+ ret = find_probe_point_by_func(pf);
else if (pp->lazy_line)
- ret = find_probe_point_lazy(NULL, &pf);
+ ret = find_probe_point_lazy(NULL, pf);
else {
- pf.lno = pp->line;
- ret = find_probe_point_by_line(&pf);
+ pf->lno = pp->line;
+ ret = find_probe_point_by_line(pf);
}
}
off = noff;
}
- line_list__free(&pf.lcache);
- dwarf_end(dbg);
+ line_list__free(&pf->lcache);
+ if (dwfl)
+ dwfl_end(dwfl);
- return (ret < 0) ? ret : pf.ntevs;
+ return ret;
+}
+
+/* Add a found probe point into trace event list */
+static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ struct trace_event_finder *tf =
+ container_of(pf, struct trace_event_finder, pf);
+ struct probe_trace_event *tev;
+ int ret, i;
+
+ /* Check number of tevs */
+ if (tf->ntevs == tf->max_tevs) {
+ pr_warning("Too many( > %d) probe point found.\n",
+ tf->max_tevs);
+ return -ERANGE;
+ }
+ tev = &tf->tevs[tf->ntevs++];
+
+ ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
+ &tev->point);
+ if (ret < 0)
+ return ret;
+
+ pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
+ tev->point.offset);
+
+ /* Find each argument */
+ tev->nargs = pf->pev->nargs;
+ tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ if (tev->args == NULL)
+ return -ENOMEM;
+ for (i = 0; i < pf->pev->nargs; i++) {
+ pf->pvar = &pf->pev->args[i];
+ pf->tvar = &tev->args[i];
+ ret = find_variable(sp_die, pf);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Find probe_trace_events specified by perf_probe_event from debuginfo */
+int find_probe_trace_events(int fd, struct perf_probe_event *pev,
+ struct probe_trace_event **tevs, int max_tevs)
+{
+ struct trace_event_finder tf = {
+ .pf = {.pev = pev, .callback = add_probe_trace_event},
+ .max_tevs = max_tevs};
+ int ret;
+
+ /* Allocate result tevs array */
+ *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
+ if (*tevs == NULL)
+ return -ENOMEM;
+
+ tf.tevs = *tevs;
+ tf.ntevs = 0;
+
+ ret = find_probes(fd, &tf.pf);
+ if (ret < 0) {
+ free(*tevs);
+ *tevs = NULL;
+ return ret;
+ }
+
+ return (ret < 0) ? ret : tf.ntevs;
+}
+
+#define MAX_VAR_LEN 64
+
+/* Collect available variables in this scope */
+static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
+{
+ struct available_var_finder *af = data;
+ struct variable_list *vl;
+ char buf[MAX_VAR_LEN];
+ int tag, ret;
+
+ vl = &af->vls[af->nvls - 1];
+
+ tag = dwarf_tag(die_mem);
+ if (tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) {
+ ret = convert_variable_location(die_mem, af->pf.addr,
+ af->pf.fb_ops, NULL);
+ if (ret == 0) {
+ ret = die_get_varname(die_mem, buf, MAX_VAR_LEN);
+ pr_debug2("Add new var: %s\n", buf);
+ if (ret > 0)
+ strlist__add(vl->vars, buf);
+ }
+ }
+
+ if (af->child && dwarf_haspc(die_mem, af->pf.addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
+}
+
+/* Add a found vars into available variables list */
+static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ struct available_var_finder *af =
+ container_of(pf, struct available_var_finder, pf);
+ struct variable_list *vl;
+ Dwarf_Die die_mem, *scopes = NULL;
+ int ret, nscopes;
+
+ /* Check number of tevs */
+ if (af->nvls == af->max_vls) {
+ pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
+ return -ERANGE;
+ }
+ vl = &af->vls[af->nvls++];
+
+ ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
+ &vl->point);
+ if (ret < 0)
+ return ret;
+
+ pr_debug("Probe point found: %s+%lu\n", vl->point.symbol,
+ vl->point.offset);
+
+ /* Find local variables */
+ vl->vars = strlist__new(true, NULL);
+ if (vl->vars == NULL)
+ return -ENOMEM;
+ af->child = true;
+ die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem);
+
+ /* Find external variables */
+ if (!af->externs)
+ goto out;
+ /* Don't need to search child DIE for externs. */
+ af->child = false;
+ nscopes = dwarf_getscopes_die(sp_die, &scopes);
+ while (nscopes-- > 1)
+ die_find_child(&scopes[nscopes], collect_variables_cb,
+ (void *)af, &die_mem);
+ if (scopes)
+ free(scopes);
+
+out:
+ if (strlist__empty(vl->vars)) {
+ strlist__delete(vl->vars);
+ vl->vars = NULL;
+ }
+
+ return ret;
+}
+
+/* Find available variables at given probe point */
+int find_available_vars_at(int fd, struct perf_probe_event *pev,
+ struct variable_list **vls, int max_vls,
+ bool externs)
+{
+ struct available_var_finder af = {
+ .pf = {.pev = pev, .callback = add_available_vars},
+ .max_vls = max_vls, .externs = externs};
+ int ret;
+
+ /* Allocate result vls array */
+ *vls = zalloc(sizeof(struct variable_list) * max_vls);
+ if (*vls == NULL)
+ return -ENOMEM;
+
+ af.vls = *vls;
+ af.nvls = 0;
+
+ ret = find_probes(fd, &af.pf);
+ if (ret < 0) {
+ /* Free vlist for error */
+ while (af.nvls--) {
+ if (af.vls[af.nvls].point.symbol)
+ free(af.vls[af.nvls].point.symbol);
+ if (af.vls[af.nvls].vars)
+ strlist__delete(af.vls[af.nvls].vars);
+ }
+ free(af.vls);
+ *vls = NULL;
+ return ret;
+ }
+
+ return (ret < 0) ? ret : af.nvls;
}
/* Reverse search */
-int find_perf_probe_point(int fd, unsigned long addr,
- struct perf_probe_point *ppt)
+int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt)
{
Dwarf_Die cudie, spdie, indie;
- Dwarf *dbg;
+ Dwarf *dbg = NULL;
+ Dwfl *dwfl = NULL;
Dwarf_Line *line;
- Dwarf_Addr laddr, eaddr;
+ Dwarf_Addr laddr, eaddr, bias = 0;
const char *tmp;
int lineno, ret = 0;
bool found = false;
- dbg = dwarf_begin(fd, DWARF_C_READ);
- if (!dbg)
- return -EBADF;
+ /* Open the live linux kernel */
+ dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias);
+ if (!dbg) {
+ pr_warning("No dwarf info found in the vmlinux - "
+ "please rebuild with CONFIG_DEBUG_INFO=y.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* Adjust address with bias */
+ addr += bias;
/* Find cu die */
- if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) {
+ if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) {
+ pr_warning("No CU DIE is found at %lx\n", addr);
ret = -EINVAL;
goto end;
}
@@ -1225,7 +1597,8 @@ found:
}
end:
- dwarf_end(dbg);
+ if (dwfl)
+ dwfl_end(dwfl);
if (ret >= 0)
ret = found ? 1 : 0;
return ret;
@@ -1358,6 +1731,9 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
struct line_finder *lf = param->data;
struct line_range *lr = lf->lr;
+ pr_debug("find (%llx) %s\n",
+ (unsigned long long)dwarf_dieoffset(sp_die),
+ dwarf_diename(sp_die));
if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
die_compare_name(sp_die, lr->function)) {
lf->fname = dwarf_decl_file(sp_die);
@@ -1401,10 +1777,12 @@ int find_line_range(int fd, struct line_range *lr)
Dwarf_Off off = 0, noff;
size_t cuhl;
Dwarf_Die *diep;
- Dwarf *dbg;
+ Dwarf *dbg = NULL;
+ Dwfl *dwfl;
+ Dwarf_Addr bias; /* Currently ignored */
const char *comp_dir;
- dbg = dwarf_begin(fd, DWARF_C_READ);
+ dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
if (!dbg) {
pr_warning("No dwarf info found in the vmlinux - "
"please rebuild with CONFIG_DEBUG_INFO=y.\n");
@@ -1450,8 +1828,7 @@ int find_line_range(int fd, struct line_range *lr)
}
pr_debug("path: %s\n", lr->path);
- dwarf_end(dbg);
-
+ dwfl_end(dwfl);
return (ret < 0) ? ret : lf.found;
}
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 4507d519f183..bba69d455699 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -22,20 +22,27 @@ extern int find_probe_trace_events(int fd, struct perf_probe_event *pev,
int max_tevs);
/* Find a perf_probe_point from debuginfo */
-extern int find_perf_probe_point(int fd, unsigned long addr,
+extern int find_perf_probe_point(unsigned long addr,
struct perf_probe_point *ppt);
+/* Find a line range */
extern int find_line_range(int fd, struct line_range *lr);
+/* Find available variables */
+extern int find_available_vars_at(int fd, struct perf_probe_event *pev,
+ struct variable_list **vls, int max_points,
+ bool externs);
+
#include <dwarf.h>
#include <libdw.h>
+#include <libdwfl.h>
#include <version.h>
struct probe_finder {
struct perf_probe_event *pev; /* Target probe event */
- struct probe_trace_event *tevs; /* Result trace events */
- int ntevs; /* Number of trace events */
- int max_tevs; /* Max number of trace events */
+
+ /* Callback when a probe point is found */
+ int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf);
/* For function searching */
int lno; /* Line number */
@@ -53,6 +60,22 @@ struct probe_finder {
struct probe_trace_arg *tvar; /* Current result variable */
};
+struct trace_event_finder {
+ struct probe_finder pf;
+ struct probe_trace_event *tevs; /* Found trace events */
+ int ntevs; /* Number of trace events */
+ int max_tevs; /* Max number of trace events */
+};
+
+struct available_var_finder {
+ struct probe_finder pf;
+ struct variable_list *vls; /* Found variable lists */
+ int nvls; /* Number of variable lists */
+ int max_vls; /* Max no. of variable lists */
+ bool externs; /* Find external vars too */
+ bool child; /* Search child scopes */
+};
+
struct line_finder {
struct line_range *lr; /* Target line range */
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
index 6d0df809a2ed..8bc010edca25 100644
--- a/tools/perf/util/ui/browser.c
+++ b/tools/perf/util/ui/browser.c
@@ -1,4 +1,3 @@
-#include <slang.h>
#include "libslang.h"
#include <linux/compiler.h>
#include <linux/list.h>
OpenPOWER on IntegriCloud